From 0d4b8ae13364f5129b726894a18f5586714cd0e7 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Tue, 16 Jan 2018 14:42:47 +0800 Subject: [PATCH] remove v1_api_demo --- v1_api_demo/README.md | 5 - v1_api_demo/gan/.gitignore | 11 - v1_api_demo/gan/README.md | 13 - v1_api_demo/gan/data/download_cifar.sh | 18 - v1_api_demo/gan/data/get_mnist_data.sh | 17 - v1_api_demo/gan/gan_conf.py | 151 -------- v1_api_demo/gan/gan_conf_image.py | 298 --------------- v1_api_demo/gan/gan_trainer.py | 349 ------------------ v1_api_demo/mnist/.gitignore | 10 - v1_api_demo/mnist/api_train.py | 209 ----------- v1_api_demo/mnist/data/generate_list.py | 21 -- v1_api_demo/mnist/data/get_mnist_data.sh | 21 -- v1_api_demo/mnist/light_mnist.py | 79 ---- v1_api_demo/mnist/mnist_provider.py | 25 -- v1_api_demo/mnist/mnist_util.py | 30 -- v1_api_demo/mnist/train.sh | 32 -- v1_api_demo/mnist/vgg_16_mnist.py | 50 --- v1_api_demo/model_zoo/embedding/.gitignore | 2 - .../model_zoo/embedding/extract_para.py | 113 ------ .../model_zoo/embedding/paraconvert.py | 159 -------- .../model_zoo/embedding/pre_DictAndModel.sh | 32 -- v1_api_demo/model_zoo/resnet/.gitignore | 5 - v1_api_demo/model_zoo/resnet/classify.py | 312 ---------------- .../model_zoo/resnet/example/.gitignore | 1 - .../model_zoo/resnet/example/__init__.py | 13 - v1_api_demo/model_zoo/resnet/example/cat.jpg | Bin 12881 -> 0 bytes v1_api_demo/model_zoo/resnet/example/dog.jpg | Bin 71483 -> 0 bytes .../resnet/example/image_list_provider.py | 102 ----- .../model_zoo/resnet/example/test.list | 2 - .../model_zoo/resnet/extract_fea_c++.sh | 40 -- .../model_zoo/resnet/extract_fea_py.sh | 29 -- v1_api_demo/model_zoo/resnet/get_model.sh | 32 -- v1_api_demo/model_zoo/resnet/load_feature.py | 63 ---- v1_api_demo/model_zoo/resnet/net_diagram.sh | 39 -- v1_api_demo/model_zoo/resnet/predict.sh | 23 -- v1_api_demo/model_zoo/resnet/resnet.py | 271 -------------- v1_api_demo/quick_start/.gitignore | 15 - v1_api_demo/quick_start/api_predict.py | 147 -------- v1_api_demo/quick_start/api_predict.sh | 30 -- v1_api_demo/quick_start/api_train.py | 122 ------ v1_api_demo/quick_start/api_train.sh | 29 -- .../quick_start/cluster/cluster_train.sh | 45 --- v1_api_demo/quick_start/cluster/env.sh | 28 -- v1_api_demo/quick_start/cluster/pserver.sh | 26 -- v1_api_demo/quick_start/data/README.md | 9 - v1_api_demo/quick_start/data/get_data.sh | 27 -- .../data/proc_from_raw_data/get_data.sh | 79 ---- .../data/proc_from_raw_data/preprocess.py | 236 ------------ v1_api_demo/quick_start/dataprovider_bow.py | 86 ----- v1_api_demo/quick_start/dataprovider_emb.py | 52 --- v1_api_demo/quick_start/predict.sh | 32 -- v1_api_demo/quick_start/train.sh | 34 -- .../quick_start/trainer_config.bidi-lstm.py | 74 ---- v1_api_demo/quick_start/trainer_config.cnn.py | 68 ---- .../quick_start/trainer_config.db-lstm.py | 74 ---- v1_api_demo/quick_start/trainer_config.emb.py | 64 ---- v1_api_demo/quick_start/trainer_config.lr.py | 85 ----- .../quick_start/trainer_config.lstm.py | 70 ---- .../quick_start/trainer_config.resnet-lstm.py | 104 ------ v1_api_demo/sequence_tagging/data/get_data.sh | 21 -- v1_api_demo/sequence_tagging/data/test.list | 1 - v1_api_demo/sequence_tagging/data/train.list | 1 - v1_api_demo/sequence_tagging/dataprovider.py | 260 ------------- v1_api_demo/sequence_tagging/linear_crf.py | 83 ----- v1_api_demo/sequence_tagging/readme.md | 45 --- v1_api_demo/sequence_tagging/rnn_crf.py | 121 ------ v1_api_demo/sequence_tagging/train.sh | 12 - v1_api_demo/sequence_tagging/train_linear.sh | 11 - v1_api_demo/traffic_prediction/README | 7 - .../traffic_prediction/data/get_data.sh | 34 -- .../traffic_prediction/dataprovider.py | 82 ---- v1_api_demo/traffic_prediction/gen_result.py | 61 --- v1_api_demo/traffic_prediction/predict.sh | 30 -- v1_api_demo/traffic_prediction/train.sh | 27 -- .../traffic_prediction/trainer_config.py | 52 --- v1_api_demo/vae/README.md | 13 - v1_api_demo/vae/data/get_mnist_data.sh | 17 - v1_api_demo/vae/dataloader.py | 60 --- v1_api_demo/vae/vae_conf.py | 116 ------ v1_api_demo/vae/vae_train.py | 175 --------- 80 files changed, 5342 deletions(-) delete mode 100644 v1_api_demo/README.md delete mode 100644 v1_api_demo/gan/.gitignore delete mode 100644 v1_api_demo/gan/README.md delete mode 100755 v1_api_demo/gan/data/download_cifar.sh delete mode 100755 v1_api_demo/gan/data/get_mnist_data.sh delete mode 100644 v1_api_demo/gan/gan_conf.py delete mode 100644 v1_api_demo/gan/gan_conf_image.py delete mode 100644 v1_api_demo/gan/gan_trainer.py delete mode 100644 v1_api_demo/mnist/.gitignore delete mode 100644 v1_api_demo/mnist/api_train.py delete mode 100644 v1_api_demo/mnist/data/generate_list.py delete mode 100755 v1_api_demo/mnist/data/get_mnist_data.sh delete mode 100644 v1_api_demo/mnist/light_mnist.py delete mode 100644 v1_api_demo/mnist/mnist_provider.py delete mode 100644 v1_api_demo/mnist/mnist_util.py delete mode 100755 v1_api_demo/mnist/train.sh delete mode 100644 v1_api_demo/mnist/vgg_16_mnist.py delete mode 100644 v1_api_demo/model_zoo/embedding/.gitignore delete mode 100755 v1_api_demo/model_zoo/embedding/extract_para.py delete mode 100755 v1_api_demo/model_zoo/embedding/paraconvert.py delete mode 100755 v1_api_demo/model_zoo/embedding/pre_DictAndModel.sh delete mode 100644 v1_api_demo/model_zoo/resnet/.gitignore delete mode 100755 v1_api_demo/model_zoo/resnet/classify.py delete mode 100644 v1_api_demo/model_zoo/resnet/example/.gitignore delete mode 100644 v1_api_demo/model_zoo/resnet/example/__init__.py delete mode 100644 v1_api_demo/model_zoo/resnet/example/cat.jpg delete mode 100644 v1_api_demo/model_zoo/resnet/example/dog.jpg delete mode 100644 v1_api_demo/model_zoo/resnet/example/image_list_provider.py delete mode 100644 v1_api_demo/model_zoo/resnet/example/test.list delete mode 100755 v1_api_demo/model_zoo/resnet/extract_fea_c++.sh delete mode 100755 v1_api_demo/model_zoo/resnet/extract_fea_py.sh delete mode 100755 v1_api_demo/model_zoo/resnet/get_model.sh delete mode 100644 v1_api_demo/model_zoo/resnet/load_feature.py delete mode 100755 v1_api_demo/model_zoo/resnet/net_diagram.sh delete mode 100755 v1_api_demo/model_zoo/resnet/predict.sh delete mode 100644 v1_api_demo/model_zoo/resnet/resnet.py delete mode 100644 v1_api_demo/quick_start/.gitignore delete mode 100755 v1_api_demo/quick_start/api_predict.py delete mode 100755 v1_api_demo/quick_start/api_predict.sh delete mode 100644 v1_api_demo/quick_start/api_train.py delete mode 100755 v1_api_demo/quick_start/api_train.sh delete mode 100755 v1_api_demo/quick_start/cluster/cluster_train.sh delete mode 100644 v1_api_demo/quick_start/cluster/env.sh delete mode 100755 v1_api_demo/quick_start/cluster/pserver.sh delete mode 100644 v1_api_demo/quick_start/data/README.md delete mode 100755 v1_api_demo/quick_start/data/get_data.sh delete mode 100755 v1_api_demo/quick_start/data/proc_from_raw_data/get_data.sh delete mode 100755 v1_api_demo/quick_start/data/proc_from_raw_data/preprocess.py delete mode 100644 v1_api_demo/quick_start/dataprovider_bow.py delete mode 100755 v1_api_demo/quick_start/dataprovider_emb.py delete mode 100755 v1_api_demo/quick_start/predict.sh delete mode 100755 v1_api_demo/quick_start/train.sh delete mode 100644 v1_api_demo/quick_start/trainer_config.bidi-lstm.py delete mode 100644 v1_api_demo/quick_start/trainer_config.cnn.py delete mode 100644 v1_api_demo/quick_start/trainer_config.db-lstm.py delete mode 100644 v1_api_demo/quick_start/trainer_config.emb.py delete mode 100644 v1_api_demo/quick_start/trainer_config.lr.py delete mode 100644 v1_api_demo/quick_start/trainer_config.lstm.py delete mode 100644 v1_api_demo/quick_start/trainer_config.resnet-lstm.py delete mode 100755 v1_api_demo/sequence_tagging/data/get_data.sh delete mode 100644 v1_api_demo/sequence_tagging/data/test.list delete mode 100644 v1_api_demo/sequence_tagging/data/train.list delete mode 100644 v1_api_demo/sequence_tagging/dataprovider.py delete mode 100644 v1_api_demo/sequence_tagging/linear_crf.py delete mode 100644 v1_api_demo/sequence_tagging/readme.md delete mode 100644 v1_api_demo/sequence_tagging/rnn_crf.py delete mode 100755 v1_api_demo/sequence_tagging/train.sh delete mode 100755 v1_api_demo/sequence_tagging/train_linear.sh delete mode 100644 v1_api_demo/traffic_prediction/README delete mode 100755 v1_api_demo/traffic_prediction/data/get_data.sh delete mode 100644 v1_api_demo/traffic_prediction/dataprovider.py delete mode 100644 v1_api_demo/traffic_prediction/gen_result.py delete mode 100755 v1_api_demo/traffic_prediction/predict.sh delete mode 100755 v1_api_demo/traffic_prediction/train.sh delete mode 100755 v1_api_demo/traffic_prediction/trainer_config.py delete mode 100644 v1_api_demo/vae/README.md delete mode 100755 v1_api_demo/vae/data/get_mnist_data.sh delete mode 100644 v1_api_demo/vae/dataloader.py delete mode 100644 v1_api_demo/vae/vae_conf.py delete mode 100644 v1_api_demo/vae/vae_train.py diff --git a/v1_api_demo/README.md b/v1_api_demo/README.md deleted file mode 100644 index 0460a85fae..0000000000 --- a/v1_api_demo/README.md +++ /dev/null @@ -1,5 +0,0 @@ -The examples in v1_api_demo are using v1_api currently, and will be upgraded to v2_api later. -Thus, v1_api_demo is a temporary directory. We decide not to maintain it and will delete it in future. - -Please go to [PaddlePaddle/book](https://github.com/PaddlePaddle/book) and -[PaddlePaddle/models](https://github.com/PaddlePaddle/models) to learn PaddlePaddle. diff --git a/v1_api_demo/gan/.gitignore b/v1_api_demo/gan/.gitignore deleted file mode 100644 index 93a6f5080a..0000000000 --- a/v1_api_demo/gan/.gitignore +++ /dev/null @@ -1,11 +0,0 @@ -output/ -uniform_params/ -cifar_params/ -mnist_params/ -*.png -.pydevproject -.project -*.log -*.pyc -data/mnist_data/ -data/cifar-10-batches-py/ diff --git a/v1_api_demo/gan/README.md b/v1_api_demo/gan/README.md deleted file mode 100644 index 1908b534b0..0000000000 --- a/v1_api_demo/gan/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Generative Adversarial Networks (GAN) - -This demo implements GAN training described in the original GAN paper (https://arxiv.org/abs/1406.2661) and DCGAN (https://arxiv.org/abs/1511.06434). - -The general training procedures are implemented in gan_trainer.py. The neural network configurations are specified in gan_conf.py (for synthetic data) and gan_conf_image.py (for image data). - -In order to run the model, first download the corresponding data by running the shell script in ./data. -Then you can run the command below. The flag -d specifies the training data (cifar, mnist or uniform) and flag --useGpu specifies whether to use gpu for training (0 is cpu, 1 is gpu). - -$python gan_trainer.py -d cifar --use_gpu 1 - -The generated images will be stored in ./cifar_samples/ -The corresponding models will be stored in ./cifar_params/ diff --git a/v1_api_demo/gan/data/download_cifar.sh b/v1_api_demo/gan/data/download_cifar.sh deleted file mode 100755 index bbadc7c10c..0000000000 --- a/v1_api_demo/gan/data/download_cifar.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e -wget https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz -tar zxf cifar-10-python.tar.gz -rm cifar-10-python.tar.gz diff --git a/v1_api_demo/gan/data/get_mnist_data.sh b/v1_api_demo/gan/data/get_mnist_data.sh deleted file mode 100755 index a77c81bf5a..0000000000 --- a/v1_api_demo/gan/data/get_mnist_data.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env sh -# This script downloads the mnist data and unzips it. -set -e -DIR="$( cd "$(dirname "$0")" ; pwd -P )" -rm -rf "$DIR/mnist_data" -mkdir "$DIR/mnist_data" -cd "$DIR/mnist_data" - -echo "Downloading..." - -for fname in train-images-idx3-ubyte train-labels-idx1-ubyte t10k-images-idx3-ubyte t10k-labels-idx1-ubyte -do - if [ ! -e $fname ]; then - wget --no-check-certificate http://yann.lecun.com/exdb/mnist/${fname}.gz - gunzip ${fname}.gz - fi -done diff --git a/v1_api_demo/gan/gan_conf.py b/v1_api_demo/gan/gan_conf.py deleted file mode 100644 index 86ac2dffe5..0000000000 --- a/v1_api_demo/gan/gan_conf.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from paddle.trainer_config_helpers import * - -mode = get_config_arg("mode", str, "generator") -assert mode in set([ - "generator", "discriminator", "generator_training", "discriminator_training" -]) - -is_generator_training = mode == "generator_training" -is_discriminator_training = mode == "discriminator_training" -is_generator = mode == "generator" -is_discriminator = mode == "discriminator" - -# The network structure below follows the ref https://arxiv.org/abs/1406.2661 -# Here we used two hidden layers and batch_norm - -print('mode=%s' % mode) -# the dim of the noise (z) as the input of the generator network -noise_dim = 10 -# the dim of the hidden layer -hidden_dim = 10 -# the dim of the generated sample -sample_dim = 2 - -settings( - batch_size=128, - learning_rate=1e-4, - learning_method=AdamOptimizer(beta1=0.5)) - - -def discriminator(sample): - """ - discriminator ouputs the probablity of a sample is from generator - or real data. - The output has two dimenstional: dimension 0 is the probablity - of the sample is from generator and dimension 1 is the probabblity - of the sample is from real data. - """ - param_attr = ParamAttr(is_static=is_generator_training) - bias_attr = ParamAttr( - is_static=is_generator_training, initial_mean=1.0, initial_std=0) - - hidden = fc_layer( - input=sample, - name="dis_hidden", - size=hidden_dim, - bias_attr=bias_attr, - param_attr=param_attr, - act=ReluActivation()) - - hidden2 = fc_layer( - input=hidden, - name="dis_hidden2", - size=hidden_dim, - bias_attr=bias_attr, - param_attr=param_attr, - act=LinearActivation()) - - hidden_bn = batch_norm_layer( - hidden2, - act=ReluActivation(), - name="dis_hidden_bn", - bias_attr=bias_attr, - param_attr=ParamAttr( - is_static=is_generator_training, initial_mean=1.0, - initial_std=0.02), - use_global_stats=False) - - return fc_layer( - input=hidden_bn, - name="dis_prob", - size=2, - bias_attr=bias_attr, - param_attr=param_attr, - act=SoftmaxActivation()) - - -def generator(noise): - """ - generator generates a sample given noise - """ - param_attr = ParamAttr(is_static=is_discriminator_training) - bias_attr = ParamAttr( - is_static=is_discriminator_training, initial_mean=1.0, initial_std=0) - - hidden = fc_layer( - input=noise, - name="gen_layer_hidden", - size=hidden_dim, - bias_attr=bias_attr, - param_attr=param_attr, - act=ReluActivation()) - - hidden2 = fc_layer( - input=hidden, - name="gen_hidden2", - size=hidden_dim, - bias_attr=bias_attr, - param_attr=param_attr, - act=LinearActivation()) - - hidden_bn = batch_norm_layer( - hidden2, - act=ReluActivation(), - name="gen_layer_hidden_bn", - bias_attr=bias_attr, - param_attr=ParamAttr( - is_static=is_discriminator_training, - initial_mean=1.0, - initial_std=0.02), - use_global_stats=False) - - return fc_layer( - input=hidden_bn, - name="gen_layer1", - size=sample_dim, - bias_attr=bias_attr, - param_attr=param_attr, - act=LinearActivation()) - - -if is_generator_training: - noise = data_layer(name="noise", size=noise_dim) - sample = generator(noise) - -if is_discriminator_training: - sample = data_layer(name="sample", size=sample_dim) - -if is_generator_training or is_discriminator_training: - label = data_layer(name="label", size=1) - prob = discriminator(sample) - cost = cross_entropy(input=prob, label=label) - classification_error_evaluator( - input=prob, label=label, name=mode + '_error') - outputs(cost) - -if is_generator: - noise = data_layer(name="noise", size=noise_dim) - outputs(generator(noise)) diff --git a/v1_api_demo/gan/gan_conf_image.py b/v1_api_demo/gan/gan_conf_image.py deleted file mode 100644 index c469227994..0000000000 --- a/v1_api_demo/gan/gan_conf_image.py +++ /dev/null @@ -1,298 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from paddle.trainer_config_helpers import * - -mode = get_config_arg("mode", str, "generator") -dataSource = get_config_arg("data", str, "mnist") -assert mode in set([ - "generator", "discriminator", "generator_training", "discriminator_training" -]) - -is_generator_training = mode == "generator_training" -is_discriminator_training = mode == "discriminator_training" -is_generator = mode == "generator" -is_discriminator = mode == "discriminator" - -# The network structure below follows the dcgan paper -# (https://arxiv.org/abs/1511.06434) - -print('mode=%s' % mode) -# the dim of the noise (z) as the input of the generator network -noise_dim = 100 -# the number of filters in the layer in generator/discriminator that is -# closet to the image -gf_dim = 64 -df_dim = 64 -if dataSource == "mnist": - sample_dim = 28 # image dim - c_dim = 1 # image color -else: - sample_dim = 32 - c_dim = 3 -s2, s4 = int(sample_dim / 2), int(sample_dim / 4), -s8, s16 = int(sample_dim / 8), int(sample_dim / 16) - -settings( - batch_size=128, - learning_rate=2e-4, - learning_method=AdamOptimizer(beta1=0.5)) - - -def conv_bn(input, - channels, - imgSize, - num_filters, - output_x, - stride, - name, - param_attr, - bias_attr, - param_attr_bn, - bn, - trans=False, - act=ReluActivation()): - """ - conv_bn is a utility function that constructs a convolution/deconv layer - with an optional batch_norm layer - - :param bn: whether to use batch_norm_layer - :type bn: bool - :param trans: whether to use conv (False) or deconv (True) - :type trans: bool - """ - - # calculate the filter_size and padding size based on the given - # imgSize and ouput size - tmp = imgSize - (output_x - 1) * stride - if tmp <= 1 or tmp > 5: - raise ValueError("conv input-output dimension does not fit") - elif tmp <= 3: - filter_size = tmp + 2 - padding = 1 - else: - filter_size = tmp - padding = 0 - - print(imgSize, output_x, stride, filter_size, padding) - - if trans: - nameApx = "_convt" - else: - nameApx = "_conv" - - if bn: - conv = img_conv_layer( - input, - filter_size=filter_size, - num_filters=num_filters, - name=name + nameApx, - num_channels=channels, - act=LinearActivation(), - groups=1, - stride=stride, - padding=padding, - bias_attr=bias_attr, - param_attr=param_attr, - shared_biases=True, - layer_attr=None, - filter_size_y=None, - stride_y=None, - padding_y=None, - trans=trans) - - conv_bn = batch_norm_layer( - conv, - act=act, - name=name + nameApx + "_bn", - bias_attr=bias_attr, - param_attr=param_attr_bn, - use_global_stats=False) - - return conv_bn - else: - conv = img_conv_layer( - input, - filter_size=filter_size, - num_filters=num_filters, - name=name + nameApx, - num_channels=channels, - act=act, - groups=1, - stride=stride, - padding=padding, - bias_attr=bias_attr, - param_attr=param_attr, - shared_biases=True, - layer_attr=None, - filter_size_y=None, - stride_y=None, - padding_y=None, - trans=trans) - return conv - - -def generator(noise): - """ - generator generates a sample given noise - """ - param_attr = ParamAttr( - is_static=is_discriminator_training, initial_mean=0.0, initial_std=0.02) - bias_attr = ParamAttr( - is_static=is_discriminator_training, initial_mean=0.0, initial_std=0.0) - - param_attr_bn = ParamAttr( - is_static=is_discriminator_training, initial_mean=1.0, initial_std=0.02) - - h1 = fc_layer( - input=noise, - name="gen_layer_h1", - size=s8 * s8 * gf_dim * 4, - bias_attr=bias_attr, - param_attr=param_attr, - act=LinearActivation()) - - h1_bn = batch_norm_layer( - h1, - act=ReluActivation(), - name="gen_layer_h1_bn", - bias_attr=bias_attr, - param_attr=param_attr_bn, - use_global_stats=False) - - h2_bn = conv_bn( - h1_bn, - channels=gf_dim * 4, - output_x=s8, - num_filters=gf_dim * 2, - imgSize=s4, - stride=2, - name="gen_layer_h2", - param_attr=param_attr, - bias_attr=bias_attr, - param_attr_bn=param_attr_bn, - bn=True, - trans=True) - - h3_bn = conv_bn( - h2_bn, - channels=gf_dim * 2, - output_x=s4, - num_filters=gf_dim, - imgSize=s2, - stride=2, - name="gen_layer_h3", - param_attr=param_attr, - bias_attr=bias_attr, - param_attr_bn=param_attr_bn, - bn=True, - trans=True) - - return conv_bn( - h3_bn, - channels=gf_dim, - output_x=s2, - num_filters=c_dim, - imgSize=sample_dim, - stride=2, - name="gen_layer_h4", - param_attr=param_attr, - bias_attr=bias_attr, - param_attr_bn=param_attr_bn, - bn=False, - trans=True, - act=TanhActivation()) - - -def discriminator(sample): - """ - discriminator ouputs the probablity of a sample is from generator - or real data. - The output has two dimenstional: dimension 0 is the probablity - of the sample is from generator and dimension 1 is the probabblity - of the sample is from real data. - """ - param_attr = ParamAttr( - is_static=is_generator_training, initial_mean=0.0, initial_std=0.02) - bias_attr = ParamAttr( - is_static=is_generator_training, initial_mean=0.0, initial_std=0.0) - - param_attr_bn = ParamAttr( - is_static=is_generator_training, initial_mean=1.0, initial_std=0.02) - - h0 = conv_bn( - sample, - channels=c_dim, - imgSize=sample_dim, - num_filters=df_dim, - output_x=s2, - stride=2, - name="dis_h0", - param_attr=param_attr, - bias_attr=bias_attr, - param_attr_bn=param_attr_bn, - bn=False) - - h1_bn = conv_bn( - h0, - channels=df_dim, - imgSize=s2, - num_filters=df_dim * 2, - output_x=s4, - stride=2, - name="dis_h1", - param_attr=param_attr, - bias_attr=bias_attr, - param_attr_bn=param_attr_bn, - bn=True) - - h2_bn = conv_bn( - h1_bn, - channels=df_dim * 2, - imgSize=s4, - num_filters=df_dim * 4, - output_x=s8, - stride=2, - name="dis_h2", - param_attr=param_attr, - bias_attr=bias_attr, - param_attr_bn=param_attr_bn, - bn=True) - - return fc_layer( - input=h2_bn, - name="dis_prob", - size=2, - bias_attr=bias_attr, - param_attr=param_attr, - act=SoftmaxActivation()) - - -if is_generator_training: - noise = data_layer(name="noise", size=noise_dim) - sample = generator(noise) - -if is_discriminator_training: - sample = data_layer(name="sample", size=sample_dim * sample_dim * c_dim) - -if is_generator_training or is_discriminator_training: - label = data_layer(name="label", size=1) - prob = discriminator(sample) - cost = cross_entropy(input=prob, label=label) - classification_error_evaluator( - input=prob, label=label, name=mode + '_error') - outputs(cost) - -if is_generator: - noise = data_layer(name="noise", size=noise_dim) - outputs(generator(noise)) diff --git a/v1_api_demo/gan/gan_trainer.py b/v1_api_demo/gan/gan_trainer.py deleted file mode 100644 index 4a26c230f7..0000000000 --- a/v1_api_demo/gan/gan_trainer.py +++ /dev/null @@ -1,349 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import random -import numpy -import cPickle -import sys, os -from PIL import Image - -from paddle.trainer.config_parser import parse_config -from paddle.trainer.config_parser import logger -import py_paddle.swig_paddle as api -import matplotlib.pyplot as plt - - -def plot2DScatter(data, outputfile): - ''' - Plot the data as a 2D scatter plot and save to outputfile - data needs to be two dimensinoal - ''' - x = data[:, 0] - y = data[:, 1] - logger.info("The mean vector is %s" % numpy.mean(data, 0)) - logger.info("The std vector is %s" % numpy.std(data, 0)) - - heatmap, xedges, yedges = numpy.histogram2d(x, y, bins=50) - extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]] - - plt.clf() - plt.scatter(x, y) - plt.savefig(outputfile, bbox_inches='tight') - - -def CHECK_EQ(a, b): - assert a == b, "a=%s, b=%s" % (a, b) - - -def copy_shared_parameters(src, dst): - ''' - copy the parameters from src to dst - :param src: the source of the parameters - :type src: GradientMachine - :param dst: the destination of the parameters - :type dst: GradientMachine - ''' - src_params = [src.getParameter(i) for i in xrange(src.getParameterSize())] - src_params = dict([(p.getName(), p) for p in src_params]) - - for i in xrange(dst.getParameterSize()): - dst_param = dst.getParameter(i) - src_param = src_params.get(dst_param.getName(), None) - if src_param is None: - continue - src_value = src_param.getBuf(api.PARAMETER_VALUE) - dst_value = dst_param.getBuf(api.PARAMETER_VALUE) - CHECK_EQ(len(src_value), len(dst_value)) - dst_value.copyFrom(src_value) - dst_param.setValueUpdated() - - -def print_parameters(src): - src_params = [src.getParameter(i) for i in xrange(src.getParameterSize())] - - print "***************" - for p in src_params: - print "Name is %s" % p.getName() - print "value is %s \n" % p.getBuf(api.PARAMETER_VALUE).copyToNumpyArray( - ) - - -def load_mnist_data(imageFile): - f = open(imageFile, "rb") - f.read(16) - - # Define number of samples for train/test - if "train" in imageFile: - n = 60000 - else: - n = 10000 - - data = numpy.fromfile(f, 'ubyte', count=n * 28 * 28).reshape((n, 28 * 28)) - data = data / 255.0 * 2.0 - 1.0 - - f.close() - return data.astype('float32') - - -def load_cifar_data(cifar_path): - batch_size = 10000 - data = numpy.zeros((5 * batch_size, 32 * 32 * 3), dtype="float32") - for i in range(1, 6): - file = cifar_path + "/data_batch_" + str(i) - fo = open(file, 'rb') - dict = cPickle.load(fo) - fo.close() - data[(i - 1) * batch_size:(i * batch_size), :] = dict["data"] - - data = data / 255.0 * 2.0 - 1.0 - return data - - -# synthesize 2-D uniform data -def load_uniform_data(): - data = numpy.random.rand(1000000, 2).astype('float32') - return data - - -def merge(images, size): - if images.shape[1] == 28 * 28: - h, w, c = 28, 28, 1 - else: - h, w, c = 32, 32, 3 - img = numpy.zeros((h * size[0], w * size[1], c)) - for idx in xrange(size[0] * size[1]): - i = idx % size[1] - j = idx // size[1] - img[j*h:j*h+h, i*w:i*w+w, :] = \ - ((images[idx, :].reshape((h, w, c), order="F").transpose(1, 0, 2) + 1.0) / 2.0 * 255.0) - return img.astype('uint8') - - -def save_images(images, path): - merged_img = merge(images, [8, 8]) - if merged_img.shape[2] == 1: - im = Image.fromarray(numpy.squeeze(merged_img)).convert('RGB') - else: - im = Image.fromarray(merged_img, mode="RGB") - im.save(path) - - -def get_real_samples(batch_size, data_np): - return data_np[numpy.random.choice( - data_np.shape[0], batch_size, replace=False), :] - - -def get_noise(batch_size, noise_dim): - return numpy.random.normal(size=(batch_size, noise_dim)).astype('float32') - - -def get_fake_samples(generator_machine, batch_size, noise): - gen_inputs = api.Arguments.createArguments(1) - gen_inputs.setSlotValue(0, api.Matrix.createDenseFromNumpy(noise)) - gen_outputs = api.Arguments.createArguments(0) - generator_machine.forward(gen_inputs, gen_outputs, api.PASS_TEST) - fake_samples = gen_outputs.getSlotValue(0).copyToNumpyMat() - return fake_samples - - -def get_training_loss(training_machine, inputs): - outputs = api.Arguments.createArguments(0) - training_machine.forward(inputs, outputs, api.PASS_TEST) - loss = outputs.getSlotValue(0).copyToNumpyMat() - return numpy.mean(loss) - - -def prepare_discriminator_data_batch_pos(batch_size, data_np): - real_samples = get_real_samples(batch_size, data_np) - labels = numpy.ones(batch_size, dtype='int32') - inputs = api.Arguments.createArguments(2) - inputs.setSlotValue(0, api.Matrix.createDenseFromNumpy(real_samples)) - inputs.setSlotIds(1, api.IVector.createVectorFromNumpy(labels)) - return inputs - - -def prepare_discriminator_data_batch_neg(generator_machine, batch_size, noise): - fake_samples = get_fake_samples(generator_machine, batch_size, noise) - labels = numpy.zeros(batch_size, dtype='int32') - inputs = api.Arguments.createArguments(2) - inputs.setSlotValue(0, api.Matrix.createDenseFromNumpy(fake_samples)) - inputs.setSlotIds(1, api.IVector.createVectorFromNumpy(labels)) - return inputs - - -def prepare_generator_data_batch(batch_size, noise): - label = numpy.ones(batch_size, dtype='int32') - inputs = api.Arguments.createArguments(2) - inputs.setSlotValue(0, api.Matrix.createDenseFromNumpy(noise)) - inputs.setSlotIds(1, api.IVector.createVectorFromNumpy(label)) - return inputs - - -def find(iterable, cond): - for item in iterable: - if cond(item): - return item - return None - - -def get_layer_size(model_conf, layer_name): - layer_conf = find(model_conf.layers, lambda x: x.name == layer_name) - assert layer_conf is not None, "Cannot find '%s' layer" % layer_name - return layer_conf.size - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("-d", "--data_source", help="mnist or cifar or uniform") - parser.add_argument( - "--use_gpu", default="1", help="1 means use gpu for training") - parser.add_argument("--gpu_id", default="0", help="the gpu_id parameter") - args = parser.parse_args() - data_source = args.data_source - use_gpu = args.use_gpu - assert data_source in ["mnist", "cifar", "uniform"] - assert use_gpu in ["0", "1"] - - if not os.path.exists("./%s_samples/" % data_source): - os.makedirs("./%s_samples/" % data_source) - - if not os.path.exists("./%s_params/" % data_source): - os.makedirs("./%s_params/" % data_source) - - api.initPaddle('--use_gpu=' + use_gpu, '--dot_period=10', - '--log_period=100', '--gpu_id=' + args.gpu_id, - '--save_dir=' + "./%s_params/" % data_source) - - if data_source == "uniform": - conf = "gan_conf.py" - num_iter = 10000 - else: - conf = "gan_conf_image.py" - num_iter = 1000 - - gen_conf = parse_config(conf, "mode=generator_training,data=" + data_source) - dis_conf = parse_config(conf, - "mode=discriminator_training,data=" + data_source) - generator_conf = parse_config(conf, "mode=generator,data=" + data_source) - batch_size = dis_conf.opt_config.batch_size - noise_dim = get_layer_size(gen_conf.model_config, "noise") - - if data_source == "mnist": - data_np = load_mnist_data("./data/mnist_data/train-images-idx3-ubyte") - elif data_source == "cifar": - data_np = load_cifar_data("./data/cifar-10-batches-py/") - else: - data_np = load_uniform_data() - - # this creates a gradient machine for discriminator - dis_training_machine = api.GradientMachine.createFromConfigProto( - dis_conf.model_config) - # this create a gradient machine for generator - gen_training_machine = api.GradientMachine.createFromConfigProto( - gen_conf.model_config) - - # generator_machine is used to generate data only, which is used for - # training discriminator - logger.info(str(generator_conf.model_config)) - generator_machine = api.GradientMachine.createFromConfigProto( - generator_conf.model_config) - - dis_trainer = api.Trainer.create(dis_conf, dis_training_machine) - - gen_trainer = api.Trainer.create(gen_conf, gen_training_machine) - - dis_trainer.startTrain() - gen_trainer.startTrain() - - # Sync parameters between networks (GradientMachine) at the beginning - copy_shared_parameters(gen_training_machine, dis_training_machine) - copy_shared_parameters(gen_training_machine, generator_machine) - - # constrain that either discriminator or generator can not be trained - # consecutively more than MAX_strike times - curr_train = "dis" - curr_strike = 0 - MAX_strike = 5 - - for train_pass in xrange(100): - dis_trainer.startTrainPass() - gen_trainer.startTrainPass() - for i in xrange(num_iter): - # Do forward pass in discriminator to get the dis_loss - noise = get_noise(batch_size, noise_dim) - data_batch_dis_pos = prepare_discriminator_data_batch_pos( - batch_size, data_np) - dis_loss_pos = get_training_loss(dis_training_machine, - data_batch_dis_pos) - - data_batch_dis_neg = prepare_discriminator_data_batch_neg( - generator_machine, batch_size, noise) - dis_loss_neg = get_training_loss(dis_training_machine, - data_batch_dis_neg) - - dis_loss = (dis_loss_pos + dis_loss_neg) / 2.0 - - # Do forward pass in generator to get the gen_loss - data_batch_gen = prepare_generator_data_batch(batch_size, noise) - gen_loss = get_training_loss(gen_training_machine, data_batch_gen) - - if i % 100 == 0: - print "d_pos_loss is %s d_neg_loss is %s" % (dis_loss_pos, - dis_loss_neg) - print "d_loss is %s g_loss is %s" % (dis_loss, gen_loss) - - # Decide which network to train based on the training history - # And the relative size of the loss - if (not (curr_train == "dis" and curr_strike == MAX_strike)) and \ - ((curr_train == "gen" and curr_strike == MAX_strike) or dis_loss > gen_loss): - if curr_train == "dis": - curr_strike += 1 - else: - curr_train = "dis" - curr_strike = 1 - dis_trainer.trainOneDataBatch(batch_size, data_batch_dis_neg) - dis_trainer.trainOneDataBatch(batch_size, data_batch_dis_pos) - copy_shared_parameters(dis_training_machine, - gen_training_machine) - - else: - if curr_train == "gen": - curr_strike += 1 - else: - curr_train = "gen" - curr_strike = 1 - gen_trainer.trainOneDataBatch(batch_size, data_batch_gen) - # TODO: add API for paddle to allow true parameter sharing between different GradientMachines - # so that we do not need to copy shared parameters. - copy_shared_parameters(gen_training_machine, - dis_training_machine) - copy_shared_parameters(gen_training_machine, generator_machine) - - dis_trainer.finishTrainPass() - gen_trainer.finishTrainPass() - # At the end of each pass, save the generated samples/images - fake_samples = get_fake_samples(generator_machine, batch_size, noise) - if data_source == "uniform": - plot2DScatter(fake_samples, "./%s_samples/train_pass%s.png" % - (data_source, train_pass)) - else: - save_images(fake_samples, "./%s_samples/train_pass%s.png" % - (data_source, train_pass)) - dis_trainer.finishTrain() - gen_trainer.finishTrain() - - -if __name__ == '__main__': - main() diff --git a/v1_api_demo/mnist/.gitignore b/v1_api_demo/mnist/.gitignore deleted file mode 100644 index 7e61d5e3a0..0000000000 --- a/v1_api_demo/mnist/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -data/raw_data -data/*.list -mnist_vgg_model -plot.png -train.log -*pyc -.ipynb_checkpoints -params.pkl -params.tar -params.tar.gz diff --git a/v1_api_demo/mnist/api_train.py b/v1_api_demo/mnist/api_train.py deleted file mode 100644 index e42c6cbb7e..0000000000 --- a/v1_api_demo/mnist/api_train.py +++ /dev/null @@ -1,209 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. -# -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. -""" -A very basic example for how to use current Raw SWIG API to train mnist network. - -Current implementation uses Raw SWIG, which means the API call is directly \ -passed to C++ side of Paddle. - -The user api could be simpler and carefully designed. -""" -import random - -import numpy as np -import paddle.v2 as paddle_v2 -import py_paddle.swig_paddle as api -from paddle.trainer_config_helpers import * -from py_paddle import DataProviderConverter - -from mnist_util import read_from_mnist - - -def init_parameter(network): - assert isinstance(network, api.GradientMachine) - for each_param in network.getParameters(): - assert isinstance(each_param, api.Parameter) - array_size = len(each_param) - array = np.random.uniform(-1.0, 1.0, array_size).astype('float32') - each_param.getBuf(api.PARAMETER_VALUE).copyFromNumpyArray(array) - - -def generator_to_batch(generator, batch_size): - ret_val = list() - for each_item in generator: - ret_val.append(each_item) - if len(ret_val) == batch_size: - yield ret_val - ret_val = list() - if len(ret_val) != 0: - yield ret_val - - -class BatchPool(object): - def __init__(self, generator, batch_size): - self.data = list(generator) - self.batch_size = batch_size - - def __call__(self): - random.shuffle(self.data) - for offset in xrange(0, len(self.data), self.batch_size): - limit = min(offset + self.batch_size, len(self.data)) - yield self.data[offset:limit] - - -def input_order_converter(generator): - for each_item in generator: - yield each_item['pixel'], each_item['label'] - - -def main(): - api.initPaddle("-use_gpu=false", "-trainer_count=4") # use 4 cpu cores - - optimizer = paddle_v2.optimizer.Adam( - learning_rate=1e-4, - batch_size=1000, - model_average=ModelAverage(average_window=0.5), - regularization=L2Regularization(rate=0.5)) - - # Create Local Updater. Local means not run in cluster. - # For a cluster training, here we can change to createRemoteUpdater - # in future. - updater = optimizer.create_local_updater() - assert isinstance(updater, api.ParameterUpdater) - - # define network - images = paddle_v2.layer.data( - name='pixel', type=paddle_v2.data_type.dense_vector(784)) - label = paddle_v2.layer.data( - name='label', type=paddle_v2.data_type.integer_value(10)) - hidden1 = paddle_v2.layer.fc(input=images, size=200) - hidden2 = paddle_v2.layer.fc(input=hidden1, size=200) - inference = paddle_v2.layer.fc(input=hidden2, - size=10, - act=paddle_v2.activation.Softmax()) - cost = paddle_v2.layer.classification_cost(input=inference, label=label) - - # Create Simple Gradient Machine. - model_config = paddle_v2.layer.parse_network(cost) - m = api.GradientMachine.createFromConfigProto(model_config, - api.CREATE_MODE_NORMAL, - optimizer.enable_types()) - - # This type check is not useful. Only enable type hint in IDE. - # Such as PyCharm - assert isinstance(m, api.GradientMachine) - - # Initialize Parameter by numpy. - init_parameter(network=m) - - # Initialize ParameterUpdater. - updater.init(m) - - # DataProvider Converter is a utility convert Python Object to Paddle C++ - # Input. The input format is as same as Paddle's DataProvider. - converter = DataProviderConverter(input_types=[images.type, label.type]) - - train_file = './data/raw_data/train' - test_file = './data/raw_data/t10k' - - # start gradient machine. - # the gradient machine must be started before invoke forward/backward. - # not just for training, but also for inference. - m.start() - - # evaluator can print error rate, etc. It is a C++ class. - batch_evaluator = m.makeEvaluator() - test_evaluator = m.makeEvaluator() - - # Get Train Data. - # TrainData will stored in a data pool. Currently implementation is not care - # about memory, speed. Just a very naive implementation. - train_data_generator = input_order_converter(read_from_mnist(train_file)) - train_data = BatchPool(train_data_generator, 512) - - # outArgs is Neural Network forward result. Here is not useful, just passed - # to gradient_machine.forward - outArgs = api.Arguments.createArguments(0) - - for pass_id in xrange(2): # we train 2 passes. - updater.startPass() - - for batch_id, data_batch in enumerate(train_data()): - # data_batch is input images. - # here, for online learning, we could get data_batch from network. - - # Start update one batch. - pass_type = updater.startBatch(len(data_batch)) - - # Start BatchEvaluator. - # batch_evaluator can be used between start/finish. - batch_evaluator.start() - - # forwardBackward is a shortcut for forward and backward. - # It is sometimes faster than invoke forward/backward separately, - # because in GradientMachine, it may be async. - m.forwardBackward(converter(data_batch), outArgs, pass_type) - - for each_param in m.getParameters(): - updater.update(each_param) - - # Get cost. We use numpy to calculate total cost for this batch. - cost_vec = outArgs.getSlotValue(0) - cost_vec = cost_vec.copyToNumpyMat() - cost = cost_vec.sum() / len(data_batch) - - # Make evaluator works. - m.eval(batch_evaluator) - - # Print logs. - print 'Pass id', pass_id, 'Batch id', batch_id, 'with cost=', \ - cost, batch_evaluator - - batch_evaluator.finish() - # Finish batch. - # * will clear gradient. - # * ensure all values should be updated. - updater.finishBatch(cost) - - # testing stage. use test data set to test current network. - updater.apply() - test_evaluator.start() - test_data_generator = input_order_converter(read_from_mnist(test_file)) - for data_batch in generator_to_batch(test_data_generator, 512): - # in testing stage, only forward is needed. - m.forward(converter(data_batch), outArgs, api.PASS_TEST) - m.eval(test_evaluator) - - # print error rate for test data set - print 'Pass', pass_id, ' test evaluator: ', test_evaluator - test_evaluator.finish() - updater.restore() - - updater.catchUpWith() - params = m.getParameters() - for each_param in params: - assert isinstance(each_param, api.Parameter) - value = each_param.getBuf(api.PARAMETER_VALUE) - value = value.copyToNumpyArray() - - # Here, we could save parameter to every where you want - print each_param.getName(), value - - updater.finishPass() - - m.finish() - - -if __name__ == '__main__': - main() diff --git a/v1_api_demo/mnist/data/generate_list.py b/v1_api_demo/mnist/data/generate_list.py deleted file mode 100644 index 49981cc7a9..0000000000 --- a/v1_api_demo/mnist/data/generate_list.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -o = open("./" + "train.list", "w") -o.write("./data/raw_data/train" + "\n") -o.close() - -o = open("./" + "test.list", "w") -o.write("./data/raw_data/t10k" + "\n") -o.close() diff --git a/v1_api_demo/mnist/data/get_mnist_data.sh b/v1_api_demo/mnist/data/get_mnist_data.sh deleted file mode 100755 index 5a2e34026d..0000000000 --- a/v1_api_demo/mnist/data/get_mnist_data.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env sh -# This scripts downloads the mnist data and unzips it. -set -e -DIR="$( cd "$(dirname "$0")" ; pwd -P )" -rm -rf "$DIR/raw_data" -mkdir "$DIR/raw_data" -cd "$DIR/raw_data" - -echo "Downloading..." - -for fname in train-images-idx3-ubyte train-labels-idx1-ubyte t10k-images-idx3-ubyte t10k-labels-idx1-ubyte -do - if [ ! -e $fname ]; then - wget --no-check-certificate http://yann.lecun.com/exdb/mnist/${fname}.gz - gunzip ${fname}.gz - fi -done - -cd $DIR -rm -f *.list -python generate_list.py diff --git a/v1_api_demo/mnist/light_mnist.py b/v1_api_demo/mnist/light_mnist.py deleted file mode 100644 index 3340905435..0000000000 --- a/v1_api_demo/mnist/light_mnist.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer_config_helpers import * - -is_predict = get_config_arg("is_predict", bool, False) - -####################Data Configuration ################## - -if not is_predict: - data_dir = './data/' - define_py_data_sources2( - train_list=data_dir + 'train.list', - test_list=data_dir + 'test.list', - module='mnist_provider', - obj='process') - -######################Algorithm Configuration ############# -settings(batch_size=50, learning_rate=0.001, learning_method=AdamOptimizer()) - -#######################Network Configuration ############# - -data_size = 1 * 28 * 28 -label_size = 10 -img = data_layer(name='pixel', size=data_size) - - -# light cnn -# A shallower cnn model: [CNN, BN, ReLU, Max-Pooling] x4 + FC x1 -# Easier to train for mnist dataset and quite efficient -# Final performance is close to deeper ones on tasks such as digital and character classification -def light_cnn(input_image, num_channels, num_classes): - def __light__(ipt, - num_filter=128, - times=1, - conv_filter_size=3, - dropouts=0, - num_channels_=None): - return img_conv_group( - input=ipt, - num_channels=num_channels_, - pool_size=2, - pool_stride=2, - conv_padding=0, - conv_num_filter=[num_filter] * times, - conv_filter_size=conv_filter_size, - conv_act=ReluActivation(), - conv_with_batchnorm=True, - conv_batchnorm_drop_rate=dropouts, - pool_type=MaxPooling()) - - tmp = __light__(input_image, num_filter=128, num_channels_=num_channels) - tmp = __light__(tmp, num_filter=128) - tmp = __light__(tmp, num_filter=128) - tmp = __light__(tmp, num_filter=128, conv_filter_size=1) - - tmp = fc_layer(input=tmp, size=num_classes, act=SoftmaxActivation()) - return tmp - - -predict = light_cnn(input_image=img, num_channels=1, num_classes=label_size) - -if not is_predict: - lbl = data_layer(name="label", size=label_size) - inputs(img, lbl) - outputs(classification_cost(input=predict, label=lbl)) -else: - outputs(predict) diff --git a/v1_api_demo/mnist/mnist_provider.py b/v1_api_demo/mnist/mnist_provider.py deleted file mode 100644 index 4192339837..0000000000 --- a/v1_api_demo/mnist/mnist_provider.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. -# -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. -from paddle.trainer.PyDataProvider2 import * -from mnist_util import read_from_mnist - - -# Define a py data provider -@provider( - input_types={'pixel': dense_vector(28 * 28), - 'label': integer_value(10)}, - cache=CacheType.CACHE_PASS_IN_MEM) -def process(settings, filename): # settings is not used currently. - for each in read_from_mnist(filename): - yield each diff --git a/v1_api_demo/mnist/mnist_util.py b/v1_api_demo/mnist/mnist_util.py deleted file mode 100644 index 3fd88ae7ed..0000000000 --- a/v1_api_demo/mnist/mnist_util.py +++ /dev/null @@ -1,30 +0,0 @@ -import numpy - -__all__ = ['read_from_mnist'] - - -def read_from_mnist(filename): - imgf = filename + "-images-idx3-ubyte" - labelf = filename + "-labels-idx1-ubyte" - f = open(imgf, "rb") - l = open(labelf, "rb") - - f.read(16) - l.read(8) - - # Define number of samples for train/test - if "train" in filename: - n = 60000 - else: - n = 10000 - - images = numpy.fromfile( - f, 'ubyte', count=n * 28 * 28).reshape((n, 28 * 28)).astype('float32') - images = images / 255.0 * 2.0 - 1.0 - labels = numpy.fromfile(l, 'ubyte', count=n).astype("int") - - for i in xrange(n): - yield {"pixel": images[i, :], 'label': labels[i]} - - f.close() - l.close() diff --git a/v1_api_demo/mnist/train.sh b/v1_api_demo/mnist/train.sh deleted file mode 100755 index ca2b1ad9eb..0000000000 --- a/v1_api_demo/mnist/train.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e -config=vgg_16_mnist.py -output=./mnist_vgg_model -log=train.log - -paddle train \ ---config=$config \ ---dot_period=10 \ ---log_period=100 \ ---test_all_data_in_one_period=1 \ ---use_gpu=0 \ ---trainer_count=1 \ ---num_passes=100 \ ---save_dir=$output \ -2>&1 | tee $log -paddle usage -l $log -e $? -n "mnist_train" >/dev/null 2>&1 - -python -m paddle.utils.plotcurve -i $log > plot.png diff --git a/v1_api_demo/mnist/vgg_16_mnist.py b/v1_api_demo/mnist/vgg_16_mnist.py deleted file mode 100644 index a819b391c6..0000000000 --- a/v1_api_demo/mnist/vgg_16_mnist.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer_config_helpers import * - -is_predict = get_config_arg("is_predict", bool, False) - -####################Data Configuration ################## - -if not is_predict: - data_dir = './data/' - define_py_data_sources2( - train_list=data_dir + 'train.list', - test_list=data_dir + 'test.list', - module='mnist_provider', - obj='process') - -######################Algorithm Configuration ############# -settings( - batch_size=128, - learning_rate=0.1 / 128.0, - learning_method=MomentumOptimizer(0.9), - regularization=L2Regularization(0.0005 * 128)) - -#######################Network Configuration ############# - -data_size = 1 * 28 * 28 -label_size = 10 -img = data_layer(name='pixel', size=data_size) - -# small_vgg is predined in trainer_config_helpers.network -predict = small_vgg(input_image=img, num_channels=1, num_classes=label_size) - -if not is_predict: - lbl = data_layer(name="label", size=label_size) - inputs(img, lbl) - outputs(classification_cost(input=predict, label=lbl)) -else: - outputs(predict) diff --git a/v1_api_demo/model_zoo/embedding/.gitignore b/v1_api_demo/model_zoo/embedding/.gitignore deleted file mode 100644 index 908f5a3fb2..0000000000 --- a/v1_api_demo/model_zoo/embedding/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -baidu.dict -model_*.emb diff --git a/v1_api_demo/model_zoo/embedding/extract_para.py b/v1_api_demo/model_zoo/embedding/extract_para.py deleted file mode 100755 index 570b90c1f7..0000000000 --- a/v1_api_demo/model_zoo/embedding/extract_para.py +++ /dev/null @@ -1,113 +0,0 @@ -#!/bin/env python -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Example: - python extract_para.py --preModel PREMODEL --preDict PREDICT \ - --usrModel USRMODEL --usrDict USRDICT -d DIM - -Options: - -h, --help show this help message and exit - --preModel PREMODEL the name of pretrained embedding model - --preDict PREDICT the name of pretrained dictionary - --usrModel usrModel the name of output usr embedding model - --usrDict usrDict the name of user specified dictionary - -d DIM dimension of parameter -""" -from optparse import OptionParser -import struct - - -def get_row_index(preDict, usrDict): - """ - Get the row positions for all words in user dictionary from pre-trained dictionary. - return: a list of row positions - Example: preDict='a\nb\nc\n', usrDict='a\nc\n', then return [0,2] - """ - pos = [] - index = dict() - with open(preDict, "r") as f: - for line_index, line in enumerate(f): - word = line.strip().split()[0] - index[word] = line_index - with open(usrDict, "r") as f: - for line in f: - word = line.strip().split()[0] - pos.append(index[word]) - return pos - - -def extract_parameters_by_usrDict(preModel, preDict, usrModel, usrDict, - paraDim): - """ - Extract desired parameters from a pretrained embedding model based on user dictionary - """ - if paraDim not in [32, 64, 128, 256]: - raise RuntimeError("We only support 32, 64, 128, 256 dimensions now") - - fi = open(preModel, "rb") - fo = open(usrModel, "wb") - - # write filehead - rowIndex = get_row_index(preDict, usrDict) - newHead = struct.pack("iil", 0, 4, len(rowIndex) * paraDim) - fo.write(newHead) - bytes = 4 * paraDim - for i in range(0, len(rowIndex)): - # find the absolute position of input file - fi.seek(rowIndex[i] * bytes + 16, 0) - fo.write(fi.read(bytes)) - - print "extract parameters finish, total", len(rowIndex), "lines" - fi.close() - - -def main(): - """ - Main entry for running paraconvert.py - """ - usage = "usage: \n" \ - "python %prog --preModel PREMODEL --preDict PREDICT" \ - " --usrModel USRMODEL --usrDict USRDICT -d DIM" - parser = OptionParser(usage) - parser.add_option( - "--preModel", - action="store", - dest="preModel", - help="the name of pretrained embedding model") - parser.add_option( - "--preDict", - action="store", - dest="preDict", - help="the name of pretrained dictionary") - parser.add_option( - "--usrModel", - action="store", - dest="usrModel", - help="the name of output usr embedding model") - parser.add_option( - "--usrDict", - action="store", - dest="usrDict", - help="the name of user specified dictionary") - parser.add_option( - "-d", action="store", dest="dim", help="dimension of parameter") - (options, args) = parser.parse_args() - extract_parameters_by_usrDict(options.preModel, options.preDict, - options.usrModel, options.usrDict, - int(options.dim)) - - -if __name__ == '__main__': - main() diff --git a/v1_api_demo/model_zoo/embedding/paraconvert.py b/v1_api_demo/model_zoo/embedding/paraconvert.py deleted file mode 100755 index ce7a70efc4..0000000000 --- a/v1_api_demo/model_zoo/embedding/paraconvert.py +++ /dev/null @@ -1,159 +0,0 @@ -#!/bin/env python -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Example: - python paraconvert.py --b2t -i INPUT -o OUTPUT -d DIM - python paraconvert.py --t2b -i INPUT -o OUTPUT - -Options: - -h, --help show this help message and exit - --b2t convert parameter file of embedding model from binary to text - --t2b convert parameter file of embedding model from text to binary - -i INPUT input parameter file name - -o OUTPUT output parameter file name - -d DIM dimension of parameter -""" -from optparse import OptionParser -import struct - - -def binary2text(input, output, paraDim): - """ - Convert a binary parameter file of embedding model to be a text file. - input: the name of input binary parameter file, the format is: - 1) the first 16 bytes is filehead: - version(4 bytes): version of paddle, default = 0 - floatSize(4 bytes): sizeof(float) = 4 - paraCount(8 bytes): total number of parameter - 2) the next (paraCount * 4) bytes is parameters, each has 4 bytes - output: the name of output text parameter file, for example: - 0,4,32156096 - -0.7845433,1.1937413,-0.1704215,... - 0.0000909,0.0009465,-0.0008813,... - ... - the format is: - 1) the first line is filehead: - version=0, floatSize=4, paraCount=32156096 - 2) other lines print the paramters - a) each line prints paraDim paramters splitted by ',' - b) there is paraCount/paraDim lines (embedding words) - paraDim: dimension of parameters - """ - fi = open(input, "rb") - fo = open(output, "w") - """ - """ - version, floatSize, paraCount = struct.unpack("iil", fi.read(16)) - newHead = ','.join([str(version), str(floatSize), str(paraCount)]) - print >> fo, newHead - - bytes = 4 * int(paraDim) - format = "%df" % int(paraDim) - context = fi.read(bytes) - line = 0 - - while context: - numbers = struct.unpack(format, context) - lst = [] - for i in numbers: - lst.append('%8.7f' % i) - print >> fo, ','.join(lst) - context = fi.read(bytes) - line += 1 - fi.close() - fo.close() - print "binary2text finish, total", line, "lines" - - -def get_para_count(input): - """ - Compute the total number of embedding parameters in input text file. - input: the name of input text file - """ - numRows = 1 - paraDim = 0 - with open(input) as f: - line = f.readline() - paraDim = len(line.split(",")) - for line in f: - numRows += 1 - return numRows * paraDim - - -def text2binary(input, output, paddle_head=True): - """ - Convert a text parameter file of embedding model to be a binary file. - input: the name of input text parameter file, for example: - -0.7845433,1.1937413,-0.1704215,... - 0.0000909,0.0009465,-0.0008813,... - ... - the format is: - 1) it doesn't have filehead - 2) each line stores the same dimension of parameters, - the separator is commas ',' - output: the name of output binary parameter file, the format is: - 1) the first 16 bytes is filehead: - version(4 bytes), floatSize(4 bytes), paraCount(8 bytes) - 2) the next (paraCount * 4) bytes is parameters, each has 4 bytes - """ - fi = open(input, "r") - fo = open(output, "wb") - - newHead = struct.pack("iil", 0, 4, get_para_count(input)) - fo.write(newHead) - - count = 0 - for line in fi: - line = line.strip().split(",") - for i in range(0, len(line)): - binary_data = struct.pack("f", float(line[i])) - fo.write(binary_data) - count += 1 - fi.close() - fo.close() - print "text2binary finish, total", count, "lines" - - -def main(): - """ - Main entry for running paraconvert.py - """ - usage = "usage: \n" \ - "python %prog --b2t -i INPUT -o OUTPUT -d DIM \n" \ - "python %prog --t2b -i INPUT -o OUTPUT" - parser = OptionParser(usage) - parser.add_option( - "--b2t", - action="store_true", - help="convert parameter file of embedding model from binary to text") - parser.add_option( - "--t2b", - action="store_true", - help="convert parameter file of embedding model from text to binary") - parser.add_option( - "-i", action="store", dest="input", help="input parameter file name") - parser.add_option( - "-o", action="store", dest="output", help="output parameter file name") - parser.add_option( - "-d", action="store", dest="dim", help="dimension of parameter") - (options, args) = parser.parse_args() - if options.b2t: - binary2text(options.input, options.output, options.dim) - if options.t2b: - text2binary(options.input, options.output) - - -if __name__ == '__main__': - main() diff --git a/v1_api_demo/model_zoo/embedding/pre_DictAndModel.sh b/v1_api_demo/model_zoo/embedding/pre_DictAndModel.sh deleted file mode 100755 index f61c65a935..0000000000 --- a/v1_api_demo/model_zoo/embedding/pre_DictAndModel.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e -set -x -BASE_URL='http://paddlepaddle.cdn.bcebos.com/model_zoo/embedding' - -DOWNLOAD_ITEMS=(baidu.dict model_32.emb model_64.emb model_128.emb model_256.emb) -ITEM_MD5=(fa03a12321eaab6c30a8fcc9442eaea3 - f88c8325ee6da6187f1080e8fe66c1cd - 927cf70f27f860aff1a5703ebf7f1584 - a52e43655cd25d279777ed509a1ae27b - b92c67fe9ff70fea53596080e351ac80) - -for ((i=0; i<${#ITEM_MD5[@]}; i++)) -do - FILENAME=${DOWNLOAD_ITEMS[${i}]} - REAL_MD5=`wget ${BASE_URL}/${FILENAME} -O - | tee ${FILENAME} | md5sum | cut -d ' ' -f 1` - EXPECTED_MD5=${ITEM_MD5[${i}]} - [ "${EXPECTED_MD5}" = "${REAL_MD5}" ] -done diff --git a/v1_api_demo/model_zoo/resnet/.gitignore b/v1_api_demo/model_zoo/resnet/.gitignore deleted file mode 100644 index 7a64209b62..0000000000 --- a/v1_api_demo/model_zoo/resnet/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -fea_output/ -features/ -model.list -ResNet_50.dot -ResNet_50.png diff --git a/v1_api_demo/model_zoo/resnet/classify.py b/v1_api_demo/model_zoo/resnet/classify.py deleted file mode 100755 index 6074cc1d3a..0000000000 --- a/v1_api_demo/model_zoo/resnet/classify.py +++ /dev/null @@ -1,312 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys -import cPickle -import logging -from PIL import Image -import numpy as np -from optparse import OptionParser - -import paddle.utils.image_util as image_util - -from py_paddle import swig_paddle, DataProviderConverter -from paddle.trainer.PyDataProvider2 import dense_vector -from paddle.trainer.config_parser import parse_config - -logging.basicConfig( - format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s') -logging.getLogger().setLevel(logging.INFO) - - -class ImageClassifier(): - def __init__(self, - train_conf, - model_dir=None, - resize_dim=256, - crop_dim=224, - use_gpu=True, - mean_file=None, - output_layer=None, - oversample=False, - is_color=True): - """ - train_conf: network configure. - model_dir: string, directory of model. - resize_dim: int, resized image size. - crop_dim: int, crop size. - mean_file: string, image mean file. - oversample: bool, oversample means multiple crops, namely five - patches (the four corner patches and the center - patch) as well as their horizontal reflections, - ten crops in all. - """ - self.train_conf = train_conf - self.model_dir = model_dir - if model_dir is None: - self.model_dir = os.path.dirname(train_conf) - - self.resize_dim = resize_dim - self.crop_dims = [crop_dim, crop_dim] - self.oversample = oversample - self.is_color = is_color - - self.output_layer = output_layer - if self.output_layer: - assert isinstance(self.output_layer, basestring) - self.output_layer = self.output_layer.split(",") - - self.transformer = image_util.ImageTransformer(is_color=is_color) - self.transformer.set_transpose((2, 0, 1)) - self.transformer.set_channel_swap((2, 1, 0)) - - self.mean_file = mean_file - if self.mean_file is not None: - mean = np.load(self.mean_file)['data_mean'] - mean = mean.reshape(3, self.crop_dims[0], self.crop_dims[1]) - self.transformer.set_mean(mean) # mean pixel - else: - # if you use three mean value, set like: - # this three mean value is calculated from ImageNet. - self.transformer.set_mean(np.array([103.939, 116.779, 123.68])) - - conf_args = "is_test=1,use_gpu=%d,is_predict=1" % (int(use_gpu)) - conf = parse_config(train_conf, conf_args) - swig_paddle.initPaddle("--use_gpu=%d" % (int(use_gpu))) - self.network = swig_paddle.GradientMachine.createFromConfigProto( - conf.model_config) - assert isinstance(self.network, swig_paddle.GradientMachine) - self.network.loadParameters(self.model_dir) - - data_size = 3 * self.crop_dims[0] * self.crop_dims[1] - slots = [dense_vector(data_size)] - self.converter = DataProviderConverter(slots) - - def get_data(self, img_path): - """ - 1. load image from img_path. - 2. resize or oversampling. - 3. transformer data: transpose, channel swap, sub mean. - return K x H x W ndarray. - - img_path: image path. - """ - image = image_util.load_image(img_path, self.is_color) - # Another way to extract oversampled features is that - # cropping and averaging from large feature map which is - # calculated by large size of image. - # This way reduces the computation. - if self.oversample: - # image_util.resize_image: short side is self.resize_dim - image = image_util.resize_image(image, self.resize_dim) - image = np.array(image) - input = np.zeros( - (1, image.shape[0], image.shape[1], 3), dtype=np.float32) - input[0] = image.astype(np.float32) - input = image_util.oversample(input, self.crop_dims) - else: - image = image.resize(self.crop_dims, Image.ANTIALIAS) - input = np.zeros( - (1, self.crop_dims[0], self.crop_dims[1], 3), dtype=np.float32) - input[0] = np.array(image).astype(np.float32) - - data_in = [] - for img in input: - img = self.transformer.transformer(img).flatten() - data_in.append([img.tolist()]) - # paddle input: [[[]],[[]],...], [[]] is one sample. - return data_in - - def forward(self, input_data): - """ - return output arguments which are the Outputs() in network configure. - - input_data: py_paddle input data. - call forward. - """ - in_arg = self.converter(input_data) - return self.network.forwardTest(in_arg) - - def forward(self, data, output_layer): - """ - return output arguments which are the Outputs() in network configure. - - input_data: py_paddle input data. - call forward. - """ - input = self.converter(data) - self.network.forwardTest(input) - output = self.network.getLayerOutputs(output_layer) - res = {} - if isinstance(output_layer, basestring): - output_layer = [output_layer] - for name in output_layer: - # For oversampling, average predictions across crops. - # If not, the shape of output[name]: (1, class_number), - # the mean is also applicable. - res[name] = output[name]['value'].mean(0) - - return res - - def predict(self, data_file): - """ - call forward and predicting. - - data_file: input image list. - """ - image_files = open(data_file, 'rb').readlines() - results = {} - if self.output_layer is None: - self.output_layer = ["output"] - for line in image_files: - image = line.split()[0] - data = self.get_data(image) - prob = self.forward(data, self.output_layer) - lab = np.argsort(-prob[self.output_layer[0]]) - results[image] = lab[0] - logging.info("Label of %s is: %d", image, lab[0]) - return results - - def extract(self, data_file, output_dir, batch_size=10000): - """ - extract and save features of output layers, which are - specify in Outputs() in network configure. - - data_file: file name of input data. - output_dir: saved directory of extracted features. - batch_size: sample number of one batch file. - """ - if not os.path.exists(output_dir): - os.mkdir(output_dir) - - sample_num = 0 - batch_num = 0 - image_feature = {} - image_files = open(data_file, 'rb').readlines() - for idx, line in enumerate(image_files): - image = line.split()[0] - data = self.get_data(image) - feature = self.forward(data, self.output_layer) - # save extracted features - file_name = image.split("/")[-1] - image_feature[file_name] = feature - sample_num += 1 - if sample_num == batch_size: - batch_name = os.path.join(output_dir, 'batch_%d' % (batch_num)) - self.save_file(image_feature, batch_name) - logging.info('Finish batch %d', batch_num) - batch_num += 1 - sample_num = 0 - image_feature = {} - if idx % 1000 == 0: - logging.info('%d/%d, %s', idx, len(image_files), file_name) - if sample_num > 0: - batch_name = os.path.join(output_dir, 'batch_%d' % (batch_num)) - self.save_file(image_feature, batch_name) - logging.info('Finish batch %d', batch_num) - logging.info('Done: make image feature batch') - - def save_file(self, data, file): - of = open(file, 'wb') - cPickle.dump(data, of, protocol=cPickle.HIGHEST_PROTOCOL) - - -def option_parser(): - """ - Main entry for predciting - """ - usage = "%prog -c config -i data_list -w model_dir [options]" - parser = OptionParser(usage="usage: %s" % usage) - parser.add_option( - "-j", - "--job", - action="store", - dest="job_type", - help="job type: predict, extract\ - predict: predicting,\ - extract: extract features") - parser.add_option( - "-c", - "--conf", - action="store", - dest="train_conf", - help="network config") - parser.add_option( - "-i", "--data", action="store", dest="data_file", help="image list") - parser.add_option( - "-w", - "--model", - action="store", - dest="model_path", - default=None, - help="model path") - parser.add_option( - "-g", - "--use_gpu", - action="store", - dest="use_gpu", - default=True, - help="Whether to use gpu mode.") - parser.add_option( - "-o", - "--output_dir", - action="store", - dest="output_dir", - default="output", - help="output path") - parser.add_option( - "-m", - "--mean", - action="store", - dest="mean", - default=None, - help="mean file.") - parser.add_option( - "-p", - "--multi_crop", - action="store_true", - dest="multi_crop", - default=False, - help="Wether to use multiple crops on image.") - parser.add_option("-l", "--output_layer", action="store", - dest="output_layer", default=None, - help="--job=extract, specify layers to extract "\ - "features, --job=predict, specify layer of " - "classification probability, output in resnet.py.") - return parser.parse_args() - - -def main(): - """ - 1. parse input arguments. - 2. predicting or extract features according job type. - """ - options, args = option_parser() - obj = ImageClassifier( - options.train_conf, - options.model_path, - use_gpu=options.use_gpu, - mean_file=options.mean, - output_layer=options.output_layer, - oversample=options.multi_crop) - if options.job_type == "predict": - obj.predict(options.data_file) - - elif options.job_type == "extract": - obj.extract(options.data_file, options.output_dir) - - -if __name__ == '__main__': - main() diff --git a/v1_api_demo/model_zoo/resnet/example/.gitignore b/v1_api_demo/model_zoo/resnet/example/.gitignore deleted file mode 100644 index 4a2b5962a6..0000000000 --- a/v1_api_demo/model_zoo/resnet/example/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*image_list_provider_copy_1.py diff --git a/v1_api_demo/model_zoo/resnet/example/__init__.py b/v1_api_demo/model_zoo/resnet/example/__init__.py deleted file mode 100644 index f662d68263..0000000000 --- a/v1_api_demo/model_zoo/resnet/example/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/v1_api_demo/model_zoo/resnet/example/cat.jpg b/v1_api_demo/model_zoo/resnet/example/cat.jpg deleted file mode 100644 index 47b01db90eddc46ff845f10bc2accaf2364c272d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12881 zcmbWdcTf{f`0yEeuTla8LbD zr~xR+$tlRmC@Cl?sHiBZX<6uKX=rFUnC~#KaC6?};pPN^c)^lFynN#PAdv6_5pgML zSy|b;LNFyLL{U;k7VF9uvd!T!e|IhNb7r;abSR>LQA>soNGZB$6 z5&azi-2JytGNS(_fd5m7h)GDv$SEkPsA>Kw;1~hKL?k4{q$Ffyq@@3}BmT_;NSVl( z?@6nW-!XEe;PYdFL}ivz@~bxWu^P|(0LwV}M^jO=u>(0c1q6kJMMP!gFi_4$CuCD)w ziwHpSzp?&}{~PT8!Nv5Ci*!#(o zU9y=$e_rDD-g^|(uaISk(zB$&sPQ1c_*YR@Q8nl(Vd*b!8N_0b-Hu7~$1ck0(#~Ij zPK+rNS@h|6S>!bo$ltU)m)&tp^8}85HcA!ske`pqQlm9LnSb)yP48Z^U(CPnJytcw&UEo4~VtxQi3grmTbubJdP(D5nrABz1{wVGxSH8Gj8)1#^BDv+rx#0{IFhADg}_A@_cK`o5CI2TE;JwK z@82)2bZD0wg=vqCR5*=fnAZ0GUeHiQgxp$xtNd&vZ)HY$rk)uGG8Rj@DBYn*+MgX* zb1+HDaIfIoPKwmBM>D^c){Y_p%`4N+=8lv^?C%7i&w)RL`HIR&FrJ*BG&BHtm1t-U zqdQjn72-IL^+%ZL_-p|qHP8wBtbvWAfYVq5uFr{?9277V*V3wtCtV0eu9RuQS7$&1|-GlgMK~$s*r3S zT20;s%|Vbl-zY!<)Q>zKeU{cF#XI>3M<+$X;I;snFDnHXLawfKPXt=YSU#NQHqR3b@WYwW^SS>L-N<8J2@>|DNcbvQk;6PjP1J6R0Y$)-z&1;|Yx(o!VS36E; zx|Mpjb1hgrex&k0;*zsYlVr9!-8|q|$&K?JOs#eo%YwLOk0W@lnR|TKD6KWx>u0Tc zhQgfDGKe!5=B1E(O2l*+(Sl?yC0>#*O}fXq`~&nChV$|`&MGDTE})FPqp* zOv64>C%{YBi9Y-5Ak7^YU!?g~bMP?BR}Q5pjh=Y5o-XrP^DBD|18+oAqY~l;(Mr!l z%OLc2ph>B>mEkJMkVyQfY7wmN&qG6?a}lnkl+Ei)N=~zrXxYF_J71?#t@&HbjH}wP zTET z>};FXMhlNr)=4>dntdJFGh4Zfn3XwZ6ZVGgsWu>H5|TWg1YuzxlXj&1w6_;B@GRdY z+oJUVD)M{M=G#ssOjhP|blXX7OLqmFsfM#%ml+g)*1ZKQCJ}BoSO{|dbXv^h9{u%P zEhJA$tMj}M^ld6!_eI${%n^H8sjUtgGLWpK%DFbJx)v$dgT=S;^YS)|OENLpShjmj z?tq^)z>Lru4tb*GdG##QvX4$xwwG09+=SUoW!Flr-+&c_`(w9P%{y8@tG7T4CNL;R z&D$&rIYvxY2V`!5dcoWl6lNalHwtl+TR*S8lcP=DjDED)R_E$rseBh?(yX&v3c%zB zv13v>I~bepwHPFs6qx!;K?k)-y|&3nF$vGkX}WSq&J46LP5{uts<#aIxld2s6E#{J zWq{vGSDDJVc|d<`(T;Aoa)}RzobQp!E0N$KI%vI*4CW}7#Vm6PD4bbG`_b>fK-zos zqB=+(u3@(#Ig%~8&nZc$5FX*_FYNN)#JDtbFGFiGTg=wAc8kfiOO;eA9GB!1AG(8< zlmc0DCgAj9)i9bPLv7MvVY640&QEDLYY&hZU;G3(zvh8^k6vJvw%0(l0HWwnUTn1H zb+%dl#6mJPnWU(|3m%VMl{7I{(pczLh0L>QK8y=V&w~kC4+UQGlm{w?t&bM`{L(}= z367Du2gCrdmzl$YRZ<)WFe?=dX2Q*gBy9@1N{h(d)lo>RinJDsVqAv3afeyRQcxaK{LM zj23D6FPEbaTdSLEB>VV?2tnmO(tjamr8eTv!0M#n50u`sHO{Jj?buZI06f2)ZGRye zXWm!&&ow?@LK=jII$AE4?pis({{k#y z8T?^)t7|jYOZ^3=`GMR>do<_KoYA*1sE%Qc_Ge`I8;(SNp%R}s8P zBeRo9;clQ;FbkDbe!j?O;eTlswJf%&T3?D1aIlxQG|raE@}j?0#hL-t3*f_ICT6g|A7` zY^WVLDefCkjizbNSneaixb#LDbpwFaIE3&rj|Xkl$jMIgaRG)@2%zDd^r<-mahjqjhfPf5 z&a~cW4TaBttft9$hC%(3Cx$<1GxalVD072y(>+rk?E=sEMw^pS==jD~=^w%)a4mw+ z&H5*OeEdMZMsLfn+sLGGJOeM(M7v)qHDi_c4!dJg$e*u6VWxilBQPvbLX9?R8tV+y zF;KP4X=>Kvsv@&AcdPKav>u#j0yr?cG*(5T+xkj}$lO9U?|}U}b@tOr6HNM`-;>UN zoEs;fD)`E@t{hvin;)i_tKZYw2s^&KHKToCSg0cC$SdlfQuR#e;}>(DRMJTqc+Rft zusD?DAZgDrP9pvjAmcx}cOa2#3)>3|g~F_+w)^Fs4B{#BXdtW(i)=EYI)+5}Lp&w5 z#`Y?uq%0l0k0z};TvDI7+>F&-1vEsMxNht*uh_<;^$$sfZf>&|dzf@VlVd&v{uH-5 ztz!4q4$Pz4I5=$T<(~kQmW^%#8SNm25KbVtWId-!340*3fe>`h%cB6ir2K=ZTJ=6Q zq^t-;hkhuC1f1W9-2XC(|9wC2RAdLR|5$V=n1YD&d~6 zN&juxK=|71!K>Q!1LkWF>s-Dh;#aBIr#B(iRF2&$d3uYl zsWSLkt6@=CIu@Wqo9}h{a2NAorMBfs{h%eu&EAiRS&`3utn+@!sCt>j{KxA`dsDEf zuji~>g}qTlqFxHwU(vD4N5=NV-&CsGj55``k-U?t#9v{_LzZ@ zZMP6BZ39=2h26m12rtrVKI1trvl@NYV4|W_h4K%fm@Ul^?bzwyb!a3{4ySCW{LDKo#|PT4ZGNWJqrFK+RBwIBiM}bhJhfC=53Ar6K4(IYRdI)|eG!M1 zWHRC4erWErvd!*j=G4KrU#PN1Y@$;Icvg@8^uD@GtRvCVhHAwwh_I$Y!k*y!Jdvf$u zSJ!YTR1-7IK@B~yXv_CjHkxkGqbpR#7|EolzCuhx4!Mi z6~g*M^GEt$fSH|8_-LlFUTgel;LgZu6v@GC&Z39a($56b;wNT$T3VD&RgQ`o2w|3i z5kqGDp@;lc(l;MzUk1RfY`iLC{L`_T#KJ_ttB8FS{zC%hV^rFgj{f4IDv5+NH zAht}!|8kI(wy6%eQb!XHt0MP@R!vT{-gy=_*y?j+bO)uO|8$41*4=Y^HXCnf#Y`(8 z`PH;R64qP6+%{vxnHa6nmE0i!)&QzyX#QvW@9Ds?%y==4^Nt79YrVjAGRVgG+&Hh2 zbj!ge;l_ongD{`c2nksxmF`*>$ zUR*M5yNWs_v0$60XFsGwGyC4qN5tfW5kF}%Q3BTKO}dQWpoePS4|+PY#NE^%er@?d zF0l=prWn%<$cv-s@I^Vd3Z;N|d@7Lqgo`>|A8SF^?;9<#RJ-w?mo~7Y0lZ?IZAh#B zndd8X`;fy?*0Q6WI>;i2X(;O66tcj)Wu&oyKwkct%jU>zn|?tAUe+AOi%SAiX)2oa zz=~i$s6<1cyEOo-w1vF`6Pv3yT;PVs)GLJwdfHJEmxa6b>5nXIf_MuH?p!oHA6`+H zwyU3MST1k>M4SKBHvBK32tA{Q1iw@2`tt5~`H7YD0P*@%ah-Ld_iBk_fkhJ>tMt22 zI;)1ucI!s)sPb3tlO~T@iw@&jRsS9nwa-eVYTgy%Zr^R6c6Ct!U#fA=|aOSvsK;Byzm|pY_Qy*5`t&PN723wZmNI z$u=rAtuUm~<>VpV4;!7XGJkD@?@U(g57}dn@)Q^An?W-0M`q8fxe;z(Bj6=hI+4t? z?)aJScHap$fqB@jmK-3!o#Uuro*jblEQ@)fv4!I5%7gr_!ngN5afH0qtHo6@As zxQyBZPx-Y;W1{r|4G)`RcS>7B-_~3Hm=|A~4me4OPVx6wItV=`8@%0wf7BHH#5%?8 z8t{wMh!|KPLsSrnE~{MsQ&KlTn-ca)1H8pLrXYVDz*!#Jrp=fYtzasZux+U3<@IMa zr0xMdPcId}!{@PTIRA@yW6FI^L|6C z1JeX`{|+x?QrB|oWS$S2pkT{noyByiHYJul>#l9d2%GfTXQyL*8#O<#{kuNDi?35B zv7pE!`j8`fw>H5gN9l@pOO9I9eHLO?;w=vbfi|Lg_py zAn9H!0x{vN^D`c>0M;fy2v#%#_2P$hYLQGHuarco7^O$5*vr=-8V@sfXNW9U$sG^| zV3W}=1@n=Pm-WUevcx^N#ptZVzkmS8sLScUfQCZ1?;h^Jf+NjQK+0}o#jGAn)IBx! zY-i(gLRQTAq^sU#A`9t1|ATV;zDh~~a#}8?N}q1(Hy`Fun?&L~`fIR{J*WKsqavKZ zM3<`?XU=HfW%WXLHCRB(_KU-XP|9v)n#gfJZ$2l)LYNS38VJV3abj#tZ%^b^iD`oU zqQ9J1)?`{t*mn#miFTJ@Hy9!CZo0LC>$#)ju@*dmgf|S+j}7Lq)>W}I&Nbh$8-Eq) zmATl!f^* zzxN|lYZy(c@yQ-jW@aV4J-wudPfgGrA?`_!hM!_%we%G@Mbw` zOOyl#B`%9rK6c@4Zq%7US*E6EeP~xjypyJkbr+`W?QO7{v0prOeX7EIs{2YSnG&aj zAJQ#+pKK=4K-?u{sBgN3yT6h;|9V{CBB|r_%>Me4qxAVh#T|L1F3fn;LiStu7raA+PWnm7iVXt!0lkhJo-jO{*0BupmAdDY8~y?m9q#5nLLdJQu|ApKc#~4xeHfNJ z=-FVaPudr2$TpIav;l4xkI=5Z_z$R| zNGxnIk)(#(mQ=X}TwebX75pj?^eCtmt~YM^iI{DVRg=ST_}W}xqmsMr(^|jw?Ue|| zW>v+1e=na+pXja_Z6Ij?IGFn-J>oY1i*9m1N^y)=@umm)-8N`(lv863TQLlCEpk{Z zV5D-0%xl;j(`X`^Gst#FeWB%Ssrcna_lTD-%iZmYoMEwzZ{1rsF`jD2lAexQKKQ}= z9C2_4f{o|bv?6C;$GW#{9{Zd|;&dYNp#n;== zU(?9yJzdm`N^}B{H4AH-AE?d93`ZbFhYClk7l*Prx1cL!X=qpXvyTO5mgX5bCan%( zzZc$UgnS_PQIXr}?Ebz1TWP|2>10g5CQjueMlWn>jD3Lt#(_rJg;=v5ucXps#Zh$u%HE3(8f^nMdidkz_jE99#r5K5}?cbDLsp!fGqt3BN)6_`u@}hkQ zYU0-ZVzu!MaR=hBj&_bybwZ~8}C$MqNL$#!_^ zioNdv+KOnDqUJB)QTD+oL4L8{%JpHTYkyX&R< zt%O@XdK->+5)-Eo@V%slV<`7SxBSq@Xl8L#lx(p-D@X6%&6#zksiWN4I=7?nq@=Ks!))ZJ`7ew*Z0uJ%4}M)iGu_g9av z7m5Q*%X39#Dj_Q7xfR%`lc5@EU-xEB`=0rFeHbghJ?TN~&7Vl6pc^Dusjpw2 z?cAuE4PUqYQ<(CKp35h~2}MvNl-9U{Gv3xyg$dd8HO_B=xv09=F%0znKKjp0?e-=DSg^Qk99uI-%B1nbGd4l z^!I#&hm1T)8`Qb;Dr8vhrCyik0n9N!1OQIVwhM4Hhfq4(bG^6`+1Vf%mgU4~zxo^k}7Pe`s3IwMBz*(p{;;1?DostD6L$V!7KLqsGJ8OqQNDhpT^nJL;jJ!Wl(KhojSJ5-*w4G+B z+QZ3ps))-?I4n{4UY7QEV+MIL@q1=m8liNv;kmr|%4=&argi?hEGwEl5K8ByS)q=x zjGKa>x>*DjB)c`$*^IOxrKw`9YHAVYgZzqAiQZX2p7di;e+dW|MqLq)+DQzrUW3cM*OA!8%uh;_;8dNp*v^i zcAQ=@W4px}>v*_D7V0xJ%#~Q4Be%i?{y;HGDlF%=0X^p2nV11TQ*CnlCcX5zV4k^P zx=Cv#IcnK>*z3w%nuCp3%SnnpbV}juV758sJ}YTM#(biCk&_eSs+2e-bCW37+>ctO zHJzWvCAr}(K38zS&13riZaZ51AUSd_UX&}qgcN^YFM2WC!mU}%0!hC}t*872SdX>mgdQj8#+{a?CW`(A zh=IN^ysUUOC>6aY>9d{cAT|P9i+C9J#W<-ZFxc^_ot~wS&R$E;gw`mD10y-|vz)58 zD%bYuqM+3sx-Qt1rhZsZ`TTtn_;;eTy9xsS5F+joC9PYW&IaF1D4!-YUE&^0Al>9b zNa&;igTs=MH5wTD?2In1h*Wf(wzuK|+wz|?&VGf@KeW48FK-@4wat>+RkbieLVq5a z)^DBq^yY65#ER?0>=Jb~DLQ;Hx$nb9b64Ii!$9Pby8c>JdUpf_`Q40aavzPYz8Qy1 zoibvjucYHhY6y)>#Eok83@x`8d7Ux;9NRDe!J4{ylOQL2qx+m=@9Hl=v=*Ubdl+66Q?mEp%Jpy8$9{;<(%e%- zx43U7t9&VzXZG@Rp^Ug-&83DbM-B9R31|!!`r; zo`xhYoTj6nRv%rbPvO^}n!YjXKy3zZS^*oC$l(-wMth6FxzC#!X?(ok>rVm9?4P;o zChLs9uXBGk^11Kx{r)FlMycPReUq5yvkep1WXI(T{@FZZ*(APd#ZUf+17|U(aerif zE!2tKfj(n#(yP5*8(!PYfiroA2 z*xDP19?;p3bW0=|srC2!e6ow%Y`c@*DnBBzV`@rW!l-imm~#{_U*Y`VExy?2{gZ@p zmfl_-99UcCvoqBGqCzeXJMbZ(zUAoAQ;@u9J}zTjLxjHLt`zr)<+Qclnw?%(!pu=w zkbsq`2=&9?^9)t1?Z>exL|n}jMy5-K9)XTN(`{@-8g}da8|qEbBMMu!F)`^w9a(je zlgo81z35{;bUV!#;0<9=v|wJ(+if=BFst>LVDb?=L(v^E%*VO6n(d)GS=8i)&MD`^1a1EAw#>nu zyG0tL_f4kn7JBGQ>r5lx7?+1B4KASesqz`i`-sgk zGu7jJs;6{xq@K*;Uh;SFuX{o-_QuS!!=M}BX~nUj}ZwjAo(Un ze0fyzbiF}E&TCRUb67Hjq{3U@Hf^Ab?-2HzE40ef{ij1vWpPiloWkYVv@t@gh995A z8~fVcx-N#FJ@YOR96~1oU+l!w^&~^q^=PYPkBUPhX{;n@(=^RKp9Q>%ox&U3j_BI1vmwTf2^Awbl)}8A{P& zo-zGCOg4J(%jC^;SBLwERs!$4Ly?$~-gZNm;SHfutUI-bkT;Q%R_iY|_TSv~WY~Gs zlmV0geAKf)m@WN*f0QquIP4r0&^PvA( z3PH&{4FFXA=y-Y`_Xyk_r{rQcjWic(*&u<>!$>^vW}WGxZLoMAmFR zh?jpzK6@Sirsh$vg+j_MZrMW1j@u2Rt(d2KND?psAYe6-<+z~=mRa|4SE`*#=udIY zM#rStgjURpS#`zg>?1R}p9#0<yw@WNFwc^Q=+asJr!@j^_jgRcA6ncvSeNp^CZRGlJ*rVf2nauA5RaZ z+;htx82nl2zg~g;V@j)=y&U<-zKLIK0HP~ zTuk1!wgJ=EpF)PWQ`qdO_Nk^}gtP^+cSVBHz!j}yY>UV=84z>}9Nx17t=zFgEQS9{ z;{QNMVWb?ClW;~|IC*U5+%$f7>7!{^M6RX1Uc?JLRmCUDy&<3Xk&xrkib>9;@dLdO z_h;xir)VUY$2JMn!xaKaf>??OzlYOb&K5g~AH_|!Hs>TAW`rI?r^3zfUu>#BB@Y5) ze{z7YKGl-Pq38>K67g3+|?lDwh-IPVot)g!D?6PGjM;#oMn zlMGsj`#&b*y_FX!sr>4czQ7`O-m|68H=;+&t?#1yg)#EQ;b3G`blJ_dX~+5l$ta;2 zR<;Gy;^Je7Ojru{k}sE0CrQywB+i3?nlej#3qPY@A0SX0KpT*UOa8Xwoos6{8G^|jRj!V*$7U!Imw8HcG2CUHGp&Efq}_jAf;*-D ze*t`yTX>V0qK-#pvKKiDo)MqWu6f%s=&g2>6xx@f+Tvwd!Y*eeVmLHpdz%ND;;Ve^ zRH~c*e$WeI2Pce2kJ;ocSVbBu@KhDQNNUYo+czHc-pLVb2ND2j1e_Zdv%1#5E4|Y` z&XPqNHTSsY4`O?1$-JefnpS>*#CynqAB9%*ukgJVbHeql2oZocOwid7j-Lid?PkR-P z+4@d!z*gth&D-(z-=PsxkWxMJ^nR?aLsMH^aD?$H-hPlMfB*J5Y z(Ae0HW3 z#`+h~Qb-Ue{^@x=1Aa?I&10TB5&D#9N;Euy=_@F;zt%iFKwv$yFK1K*wN7T;it}{) z?2AuLQ^i!dLZkI2mlwu+R2mYbf3Q_T(c`;%@%s<_NAE}}H>CmUe^n@NIH++&jb31B zNhm!co%hU+C3Z6YRY6{xYU^8M7p~G|f1O_|�L)kp1S05+XNhGI(P4E#Q}z*Z2th zy6Q46OVUZ4y`&zYiPABNj0ubFEU!dvPnZ(Lv1rov3s<0<>#tx+dzt-N_?6l!*tJPj z<_=21Y-&NSLLP4uwsto&?B`FB9rc%szLZs;hC$Danlj31G11TL@|iU+NllGwZ8VFe ziv2xHH$+yQEaxLK66Uv%ptKDfCuy3g`JZiszs&NiPLY>%t9uP$&j5!@p9u6|cvShe zSV5F+>U^8i6$MmtA8Y0;O($rXr{V{G1u ztN)ODQ@z>^x>+!1PnhE9RUX&^F-IV}tpn%V;*ZUjU-?#IW}kPf{t~eGmvMUQT?OXi z*9Y!G^gz=&fxpLB>TIyxQg1>@fKfsOpVzCi`JN#|+B}w@nob*`bthNMI%PW?9F97V z{sOut9a_{R#MLM)A6X@B%J92t+JdeZ)q{LB1?CwW4)OsOnZBVn4+D>E{5hVaT@L^B z{26t0UKvq9Dgn~v$ZTg8vqrNxZ2tGPPtf7Bssyv_7!M^)nu3GR^=8gk=#eu638Zay zSBm}%fRG;cscV+C^%JLMsc*^6bT8@2a7}%xj4nRj%KVnOhKEI~EVbl^%u(@dtszRs zpQr7<)yB(bs!lv+J#&KVgT8s!FRA{;Rn0<=xW<{vT+0F|O$ovFNDD~`)YxA@SRAd1 zf|+#D=TsV}O$~f#e}4&?e%_-CRnHH1TVu@b z1Y6rSxG7k)CmSW};u0-2xN_Ix^-a@aMb-B5@Kl)8GZ7d~*40f4V$wP;Yvrdfv3Ej^4{;uv+25qQo9O= zd#MH98M3nH>!9FDBwQ^3z00F1yGxoVqk=}Jw81C8ia(kkx0tv6q>}qMb36SmXVXIx zoFM{>BkG%abaL~WXTjc*2WH2-Z?56Q8kpF zPWu-i1qNj;R%$oCHujnP)L{62Vc)~@MT`*1N{az=%tnL$0K9DFU71P#qpc<%r8gaN zWLuq&=C{p4TpWnVE2jt?yGf`W3+|Dx@Sv3s?Q)@JPKFdu!{Q?UC8P8pdkH?3-Ir^X z-#HNAfjB9<94Ix?3QT>pxT z1VaI9@CZ>6tczsDXa8%?|0K!&``*tt3_yjDe2voo8bxPn~4cX06h|}PeKb(KyWu=ofj5BRZ3EhW; zUC;1X8-1*<%i;0hU2`S`%5My?vzv4|S3kAJ2h^4W$Qb><8t9-EOfQsw)6O@JTYEoC zdb>s3qF)SNq?3iw9@85*h?yUQb6iqik41=v{skbt`1IK;ys~z?3(x7}b*i#}kawed zN$uPdG|XcbWFdQ#?)%c>uJTz|9~-RT;j~La539h3+%)gB!7W-dg|&Z(&Z`t->#Fim z(Je=W%!_+Wk9oPt?BZD%_?=OqbyYkeT^IC~WmC?xTV(C-q%PE@k1F1;ReWG(&|qaf zoRYY7=k=8_liZ>L>ElOPD}sf)s%HBl-C)PDm4X?CBb1}tNwFh7TavS(enQIYmU9l9 zMz@PH77h2{I9E|A>6-&l*#(>SkFI3dE33zC6ya?Ug}FtTPcgy2I`p}yR$0S>Id5tY%x_0;MczdIXh8N-Ni zulqs~+I7J@xa623>Sd|L-H&)) zA~7ju13bPd{|hj`aubhG+ONS`HtvL&J_eeTw~qb=sPk7^fXqEIQO3!yN&>L=$bgkU zxt1;HkiKr>cgAVZ!y6Zs1A8NGL`c-O%XV0p85T@95?@0}Pa1%wlYP6`@9N}_9jr(^ zAWfPKCTsP`I32OfUMu4*x~d%ux&rcJ~aEREl^UX%h JXZ`*3e*j_iS4jW> diff --git a/v1_api_demo/model_zoo/resnet/example/dog.jpg b/v1_api_demo/model_zoo/resnet/example/dog.jpg deleted file mode 100644 index b9cc33cf069da5c453b97dbb7383838edd07c199..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 71483 zcmbT7WmFtd)8_|wXK)J+69{g>U4pwi1P$&GJm}yM+}&l6Ai)PG=-|OMxDz1D^S*m_ z&)HAATYbB~^zAj=jD(h|h7gUd z6`Q?_wJnVoCp!lXv!}bAvn`E+q8yE)nv5Kc7atePdm2|yb4NQ5Um6h_K@RkPs{kng z0zCYG+M6N1H6&CdBt%3cbQBb1R7`YCObm1k3@mJXTr6xnYzz!sB3wKILLd-`i9<|6 zL`Z^92qgT^P2dpTu0cdXLqb9$#KOQL{C~E8y#PE^xMKJ`1UOm%JRTeZ9^Ah{0QDOr z65M|o;C~t%{M#6jQBcv)G2S{rZ~^de2ng_q2>%)NTkpWP^8iFVB>Z=r637JV<|wqT zgj^v>g{X9rjeS6knG1Su3%5`-bRuFBQZfccCT12^9$r3v0YM=tX&G5Lc?CsHEo~iL zJ$(a9D{C9u&vy3i9-dy_KE8fo;SrJFqM~DxQ&Q8?GrnhL6&071mX%jjRy8%ZKw8_{ zJAU*J3=R!LM@GkH=jIm{VN1&^TiZLkd;156N5_{}*EhF!_YaRx|KWlIApAGh+ws4_ z{tqs^H!gTYL-=(sg5fEI2u z=tT59TMU=~LHjSV|2weI|1Yxt1@^zWRsfg?aBnXU0S_Psc-(%%wO;As6zN>hchW(9 zL`<63?)`iADbANEm=$v3n+e#N-~K5Yf0nq{X~TAeq#t}&=hS~v-97zA$!GjAf8aF*z|c>np`ui~)U#o$&% zsCme0C%Fn{GDT!X;pI7LbmNxkq{}Nl(l82P#Vz|WuU>4+UCEX1c)40?G5uvt=Vc>< zzUev#+rDPeF{YP=H#??-SfKO4(03=yByeY0=eKfBBSjr((Nlg4FHvstbfxR-Hd4b6 z1>Cq0y=nPowAO%yZ4DDyff2TpnEeUsc9H5Im&V`H+CzG`+qZ-HyL~$;V2$u)LjM4) zZiO4-=SLc-2_(SxYCB1zgeHB}WI}L@MM<)2p0u^!?^q_aGWJF&MLuh=(58)Bt1{(} zHP+Gh2~g<7?`FgGo$JAHuBn6;8+7?&-r4;~QoiOAn&)byy!=F9wwbjtyjB*DAFwoO zDv4V2-4qk+v*hz_&2GfF#WMPi$T6B0;z4EQFsHgAfMk-PF#A_>KL@lduxeEn#mH*W zhzrsgX*#~xvfUYDfqmLjuo!OSE|>IM1evw;Vi|83+x|IuF!sK$#_%KrgqQd6?+lTs zLuOrPcje(EHpkLUX<&(MVMTMw=r*LoyTf+cPV}0>Z~hl`Modis)dA4l5CbZqw#*iF zl0L2YP0*QD_G9$TU0M0Ye)y1^AO*N9&|aPa@HIk)D`7aR*#3H@Rs^NPk2F&Cx;9YY z%YMg00#nx7+;3FQA;yus$k}(GzD;oIT{!CH(WkbLt)V{WI{8R0EsU1)Lq8Jirq#76 z`Qr8=&U9;jSFELj{<>5>-44=!46Nl>_s=yS+bQOiND+60HQbXGo*jHf);}hHn?gB~ zvQ9y$m6l%}-Onkx1gDh_p2=#3B|7{PCGY=flc(DU7l~3|>!OGC3ue8Uy%)M7--k4C z28CEVDSUb&?&C(Yi2DG*=`;zmD74m4_Qe_KmS87;*P1j8S~%TDJytFC^YL^^ksUh2 z^sVz&cQ$~0aqdx180%O)x^JvkFG-rpy%Th@8{{II7F`%Tk>;>Lsr52=dQ55+ysx9y z(9xo!zcd0GbS#kH2#Of)wWj2x41>adLd5t~MIpb}49G)e{mA+jcygTk4vvhtgz$!; zWc>=gAHZcLx^(-9k+(p$m&WKOWX}2d^r&nEj_F#4eNK(DV~fW3-%`L1?Z3K+UkaVe zRE(Zr?`&{4pkQPl-an!gxuR1chB=z?+4IIK2t!l|4$R{PM<$+Jkd`uj5-9K{va!iu zP`Fn}H`M0`Qq1YtPS}tg!#tmj+;;@e?dplYIeGJg3_UH|9lXRO6T7oqmoaIUciV{t z!U#Ej3#Bd{$N~tDn*F=(i#2TRD`@A(ED2P7P#%5cQg3mT!U~)*WkW~H*{U(2r}+k2 z3-_$Nu`i4gdAF#NLU}BNPUk5az2{vbl}J{PjSny*-cBE(v7Ea}pO>1tE>K!6$U0p^ z!E&z7UeNh-+2XgFfgs`T!2{$X|JswVJXLe8m}VAq*OM5J26^}^&W#>g4*7kjxbD>P zsCid~z4~-wa{JGbV$3ZT4U8HRj9-r&J@gw=r z!b0{T)e^i3EAtT>ib=1=9Qf!knMXw6G)lGK^2f|<9EP%yHIqhpmSCxD%>?KVf3bh+ zXphBoA9BqD>avbm+0eLV{o$F;YzT7n1kMpuv5e~&%{t+Z6o4bUi3g5$o8rrzTw8T) zPthgwI_eJOjlscpwM3Dl_TECCO@b(-wTB+snOwxk%{*%8Wis9dj*553fgtAyy}jQ9 ze+rVjv{`PftZCwZ8r-P9Q*=)#wY29u^uAmvENt=l66cZLlA89n)!PinP`KT0|9Q$ck7?(7YOy0AFS2XBVEl^1VJ6`7#%GHA{dVUX ziKE%BBN1aTQ-`+Yv_U%WiV#^4fJrPu&_z}V$6&NGJ;}s5wD}B~dvzXtJ{3t#C~Kng zZ#hNSMwL){+{a%^R10aYLk?^B3L-LAYp$K(+hqiwGh4;wy9y}%+}`=H{rhX^o-vAq z8$33r0pFwJSE2E+Q0}46;tEG|D}~V+ZrtFFdHxAIME?xB+tRBXujgv+Jn8qHL3yde zyH+mtI;Q>K$0O-Is<>#qtthBi1xfPD4U(WtHx;=mEbkH>d%sL(L5i)Kj?F~gyDYUrg>!!d9P|~AQ9Wuv*ZW}4lC7AGbnNqOCvn7ZNfe?&5feMK<0w)e>i6fv=A#(4Lr(hInfou#y5$j9gJUz&`c}3OZ5#>ss5Otnp8PMz-V_S2q_OmX7ni-<0%Ac`N`9?bjeTd% zVuu9~;{y%)0Zrn|+I7}BFC}lC<0|Dz^gPcfn_eQSh&_XO;jmMT;YZhB`>h^dC?H`@ zULz*lS-TAFj<}QB9fIc?-#5xj>`n2)e*v+Q=1A*B=j(Up>y!wF?HfsX->bv>Ry#F< z@TETU*-}T~8{Y0*RJ(o{5u6m#CJ`)CSLC7DL5`FNgT-Vqj57U(7(}jpB!Gc$b?J-u zo(qEI%f>o15AEmg_TN0;Y5Vk5sm^YVv2X^mh99!zo^%@p=GTdSemZr9W#d){Zu!Ga zCBi9jS?M(&;~fbO?TF}Lq=gb*05@fC6T%Bk`@Vo*I^0wK^KDiaXM$cF76wR2eM;W9=yAdRc!gb+p1|NzW&~WU+8N|UX?ba zrmk9LG&jP3b%%zy69+xq2G0biIFzX@d)!yby5+9n`bJB_K9P7R}6{)fNLv^wy?1rZO0Bri$i7l#VQJA@#fb)RJI@$X z8lKwbc1Nzkk;SxdOt)v1qYmN$5b6`A-bYU+C5S_L%a6?E{}$(~8-Iuv zHOw$skE(N%XnDFeVOU+;chIOV`Zltush3bHvWu8=xlw4@SIaA-7kU`?SI21}>?&-? zq}@DgGB;Q_h_^+JO+TB}c}m0$%f_gTdu_3+ybi>E%9(#)d@}qYrv}U(PiN_)wk;31i1~s(~iykFx4oC}5 z8>LH^A&)hmRxGBMkv})k#Tp$_%j^?fP-x~EtK;_+{;TEh@^LN}GwEqg|C@bvoHLi5 z!giJ+%-Q4^eZ%RHUB+?L9EAQsQ6LMG`x+iaE-HO+I# z^mw6cJo0(fmm7B!sJK%mM|G2I6yJVZ!SLNm$0?d-#WhiG>(ht)`cjc7mZRIMi3br^ zX?lY{DoZG$5VyW7EsCV9QQzy|`366&{vs;RXZ_J&e!|%b|F|8s6AE`^^0@W)>sq<} zMM8Mf%yk;78>!9-!}5r7$g6cRiF93^sx6nlN?Sl?3@r`rgue$cRi?s&%fu6YP7j zudS;h+C>(iu9fin8*-5EuGiEFrJ)l0v4xKFJu?Iax zm^NJBA6B2e$8T5VE+1t75>qNG-Px5Hh4kG3DT_#*fquz;=(m(8{fdk4)+Ok58WOk( z(vtS_4EquXfEKgj$lgJ~RiEnqevmu8T_}?81tS`IUcq8cPEq&dI_tdYF>|>0e)l)o zixw#rMCPL-6Tz*{z0e147k8880-sP8r_knL2YGt7<^G4&0lIOED7f2t=6q$Vw1p;A zf%<|jy9xt_V%+g|?1NgLp+5by^;AqU#>C?8IM7Ghj2P6BdY=pQfjjwZu$=R7SCkaw z9xjIhroQzLfcMn~$ojieL<>eK&s~H)aj#zxw?Zheq8oQtN3D5P^u5?MEUR}u2;__F zs6C1Rc8FN$p;Pu`wtPNG)3p6mO?N~-jr=~Gl|@Z%z0TC|B4F|B-m^Wa5S)?Xh?9PGPYFOOU}OzZ1MA@=Q=#oa?2pF7lVyF0%x51`Dtpd?E=%@nhIaUL$5nTR+-vWb2x z@?k(g>rnuq3}mm?)X5@Ys+sr81BvA2x619((qVA0*Y#@5O2aQiMdTU7#i!5G2%qZ) z<5hy@26_{{7Qa2ywF4sRLT4vvcD1*NDQmwpdy5*Ez!neEeKc~z#efgs! z?JQ6%E}VX~oVMzv?3(q`IokPo(;rrUY#@}p=4!;=QMB8NvZtXW>UrteGPBk!`pI+m zm92vnM-u0%z9_)D)Y|HOLCoVh^sx1sWJc8J`qW6sZ{*kmFUh(#+#5s>iLv=H6VuoEj@>RKa18$*ggSM_*OhQl+p(S+rMD%xGt( zAUVfW59H%Wm8j-oQ`!h2sUa+DXs~ZRMVa}%i+x-@v=%h-HnrCN1DsSsqUEDkNhhVP zPG`S~C`%6R)GIE~LNhr(*|pOaTPXA7khR1}3U_HMWYZ*mWz8Ly(RM#Kke;Ef>DsRx zg+1Hvh9N#_>#L~pHt|Z6PuX3L$@y7g?kD*Gb(lL52l?C$bei_6sTlWL*h;2Aq_hTn zh)W1SVRzheU>3usR;3_I+#(V8=LN9iyGaGi@~;MyF{UXHRl-cD=zQFoG9AlFN{sAV z1Un=tdQV+S*GRHMn-*blkb9-=OaF&EOMDZ|!krY7?5uPL4HJ#`|rhW*5vjgh`?`x z3ER}TM3Iw8NScio=??EPm))e#RsvmC?_$7*q;B29lbFq9p%mdeEJFF!5(+9;?ei9a zOp=0OQ2l2_CI>Uun9=qqXBtd)2ew$@1skIA zyCM1HRinRDX>?(&k)7~T6LqgQMw8dNC7)S9KStkHrJgq=A4`1szHG`w+g2N(%8&)O zj~6o{A6w=+TWvI`mY{M=9EjdnSnE?~xByB?sI^-9&0*QTtIdLJO4%%n-dO7G-v9xJ zd|7xko#xFK85nxEoQS1u7+=h@e6T*L=`Dp(J?t9!>Y4pvVvQmu%Vzyyo-O`oyyblH z?LKrM)K`DWruD`Y%{jGo>UQDqNdOnKOb=#Rd<&Z0Ftip$bhC^#qE~aQ*9_GsR z{GD3y7O#X=R(1m-^!Ydj)kMZP!ZNDyq8$*Q`OI=JN@+?TA53<(*^C$VmKDX4Tt#$U z43G4=Yug&qmtVb_eXt#f?fjB4Zbl0`#ASd~Yh*7=8qhhaWpJ{i(`*@_-xT)C=oEJD`_0){f1~Q5l zpwG!byEWRwSRE8)QA)MjH6(c8#_YWwbS7Q>TB!ZOA`ZXm3)jeL+t)2t?K=vT^WN>W zPq>T`BW698Z{vTLciz3udh&)g5X$u)SB$5;`Ul%Wm4Y&CuK`QQqGOIqrYhk3UFDL$q31h2+vufc)7?J{2iZWY-hl7 z*5o>~TpPSh(ueid$cg;b=^j6&56K$s3>@*_ZVh+TSQ~J&eDc%I_QHwtyRFJl9UZBx zS;F+#@1!f7+cml3PZ|R0qm0tgl^VB-57bW{+8-?UP06X;mhHRQg8Tt-A&Zr#9}|Fc z_AB0L@6O^7SSb7c0R(F7YSCHDQ2znMUMw}oE%Dnef?7Ba?E)&v_|3wrZXI1j1S_Hs z5c7u%qu1hBfhLkTh(Vs9*1$<=S@%;l+RnERg)>s18Z(oUl`Ds=Fm23srI_xjn%?S# zm64ROqI9Wl%{8X(C`Ff2;z_6c+cR<1L-Q1war64)$4^C^$-z1Baz_3OmGpI!7THUC z3~EANRms>Y4&|3j5$6FycNWJ)5}wY@Wpb6)G*7r=15^uBMvSARdgiHDpwFWU#>5UE zxhkKJib)P)+$}FkPEQO{MY-U1O7&wtQQ22rPAZa9+Ut$?cvISIeMcItLRRh1sZRkz zaw9A?(3(J!tE4p_?G(O98G0$|e=D^elPIZjEAIxyF9_@G2D@f;6D5d@g+|GXpcSVI zesXC?$x0^~C9Ng(Apb6fAO6=Ozx?Q`<6Q>AMoKgbzTL0gIvJ0PRrnacCv6O69y+*0 zZe!VuvX9Ig*c(fA_NkAp*qR*=&J%fhqxC*BB#(;F*{+OUKMPitUh1iCuYNIhJn(jR z03l^kcnb}T{x7s*ziZVbF&Fv^R^q^!f;Z$l9rf-*9%#P0h!Opd5@o(QO8lT^9z&+A z_>iT6J+(C>qp;?e0O`hefH?5?Ix2_Uq$WA??d|tpsO>iI>JW5c47%2u!TvC*?QSiT zwPxfU-l6p*L1Ejq#Sc0nc-bFYYP|84MVG$!96hrH)Yuk6{lEy(A}pomS#v!l7YtahG#gl z=j_Ishvd_he3>)5e6<3ecVi(%Cs9GiS5A3~ zS?M@86fs2zcJ})Yri4-s!K)I^+iAtbP`S5R?#5B+`oP&i;twsDvZ^3aXuefLcWJjA z5fdANCujmGtq>>7nxHx;w~qx6YFIJ9LP!(q?bFr6h9o*gDp6mq<~+91>jWmfR-AM2 z)D|2>-h)1n92o;!;KV5fYI^IOh+VdGE~>z3PKn4>-p#(BRqR4k99cxFxeifWH%Xxiky%syCQ3p7z@UvhAwUI6#JaH6}1Hu?y3Ibd(|vv(J_#QAJsw z%)n(`x4H)cM@HPnm=KDj1B9o!v7|UK%Vni$;Z0>fzL4cbok@}7MCUg;5fJ(#Ix_qZ zXdJEtsVF4n>*u=iII;y0P3-*EmjO@^Pb-&o@HB1BaRbTPleWM1(&p`?Lm>8$KP5v7 zCbMvo;iAj?J*~^mUsj7m@zIR&#*}@pBqNBgo_|D19}GP!_1S*uV`3I2OB+w(O^JGv zJ_bTR+0=fH-gQd5QH{7hexm0`omn5eZ0-Nndd4M`kWKR<kUo z^9a`%HXH&4LNC?r?r%zA0;kfh=C4-Ncn&b;N#6B|BzpBa&R%CdY!r+oHx#3k{dvy{ z=Gufv2s4{SJIL1M9uHpJmu^XQD@7ai`J}q_0=c_x!zi9x{>Oshgc-4N_!QjqUxZRW zoDea~`dBKljtD1VHrKKNMt9Guuf?=*V0k+4pfNvc#ZEq7R{Ss0KC#%`>VhjG)n_97 zuV@OF#Oh$`hf3j(1k|^s8v#rWFTW>mx6`Q~7&5>-hM6`Cx83kh%pEZs8~f7ws!o4q zLaUDn@%^kkYZuTO_Uu-m130Ae>9U)mUf^BqTjSrro1cRTO9R{K*%jlI2@xfWgt%o- zEBjax;+rz40mESh>nTv@v};&Q08^b0znVXrw|vjc*<1~O1CKI{6fSt=zGg+Y$);ya zeNleYYke-or1LMW>9pPD-vQU&e*lKjbhIM+({yeE{PHX6DB!*c-khl(?9ZnBV8Jw5 zyA3ORIv`mB5iYN&=O5rD1_Sk`=Bm~Y9EEG2Am45xH_PaNd<&=_@iiVq9-*nCeOWwlqPl6zn zhm*cUqZ8407eR$volLprp`=?Mb!je>j)J*-E&OwK#`i)tlai&al2l0b%)$<3{9Nt+ z_)hW`lOYRTW_j5arB_B7FP|&c$6c!*)Ymfl_LiFQd$92-%bF1qm?RWG$VI#tton-4 zl9t}P&N0H)#(R`rO(@?BH&8KKc!mo73H=tNAUwK- zn3K$%)md{iv#vh^9~Z2u`X&YmN?%kvaavg+$uZ4#lR*dIL0bI>A;X}=rrsYiSkFGt z9g}5C8-_?>xHnbU0)k-_*5{|1`X)D-#HA15Rh;VOQ8q8L&#e3ymv>t_h`Hr>qDa%= z^i1xpYndXEk#NV@=7R@Q*?;{d^=g=Z0?kydVHqQ?pY0~5=|dIu; z_ztG{G|*;@S|H>|ZR7Ry>g{Qj&6X}rzb~HRhKBQv$qw_E??!lH| z629ehb-L*Y1y$CLZuk)hzwUwBYn;O_q7b5H`d^<839EH9N(T_qt$DK>=Kk>Opq6aL zEusl8{9U4#eY^@h4jHM$qlhwW1Cq80?8YsOM82xlD?*U!GxG#0od@J!qYGPC++%)J zwwCGzLkYp+K!DEoQ`5Uc~hknTWgOrhE)uTd^zkeHnr_`K{q9EJez)t0r%3> z_Uuf;*IM+cjRGg1r2hfzHgJTWv=}uzY37+x1*y{%Yj7a&OdE>3Iokf&rQ&YB73|Kf z2yGCnHN|@rx-9rRhK_lzl3I7~>t-TH&)Pocsc&>y#hzx0xF|drnqyr$#^auJvt5=$ zD^?l01$o^khxxdN;=>dEuw(O8wLXU&NnaI=72AC_ynj)k)}VJ}lQx|xB&HtaTYaupX-Qnkrg_j8 zHxy0qL*2PlAj>Upf=;M{<0v!}^$~Xa4?yd!UcYpSw780l^fY(+MF5p~JK|}q5syjV9IF6?? zBU&cFD@!pevZEkh?}Rnk6-O^T2?;jO+wNo|->i>(Hg%RQE@yZWT)S9nOrp3@fJ&}k zx%vC+W;~NYIyxEZuJNPj@sSS_eu!QE3DUl~A8-QJp!b2CWfLYORaJ8v!~kCno}L-l zn{-9pDFte2jV`A913^-aF|!*}E3+H;n089LzAV=7c%E!xzM6&L7RYeF4 zNjk$PVoOXb-e~lUJRWJM-C76p73%e>vBnSQQdDSn(7R{lI%9_I2_O`f`yP>{({zYc z7EPjaAA{eDQu z68@xI#104@+}-9SP-RvW3H{KUxa{|a7hBj3Ic*~m81lmjx19U)M+ZYY219#9CSLqm zoa7&Xv1@vVsI>BsOF?AkQJp)iS@@I^?Piqv;Z|nzE5UdmqSU0`y$b?W7^NyCpw--@ zjz|&5^4O)_Uc2%~;@W+zf2S`xR@F8QkT5Rf+;CB-ooEsWStAO^>1sT)aD4{piK5Nb z)~Oq*G;x9TlHy*tjis|%h~gnB<1{aSF3%@fc|}UJvIVOK(ymH;q2Om0|Lu0?_q2d;(M$iN? zt@P}4w9R?)SSRfm_vQg!J(G~?vWtz(q$%9^6_WJ=-Jh!qwR8D>n)zjOcI-gS7qIQN zz7N0vs>QSSyr*2-%$9DIzIz-o5=)_Sew!U9F)uR@8C$TKmJe#~k2B_mi?`8KIX#FUQ<+S_S&IYR!b#`4vV2Zh|*iKXRGT{_0FeJ#$w)XliGxEU;NMcSW~$X;kV^htxzGoULt(eBBCsqC~1~LWsuD z^>tj8*i3Zj7pALi&2S<1BFuqR*=;aK`b6G6R^Bu)SuTr`{EiJ&6vx>xvgp>;{%k!* zza}eRpYzPL?Nf6$5z=$+Zk+ZHF!P7yUvhB~TsrRZacwyQwu}1n3+? zU4-uHvprs9sav2O$Wn2;x}|ncN1UPYAunsUHS-GtmK$Ayekl_Z1r|R}M9L3${gD^E^;l8C004 z{QHS|vYU+MieY-LYaD?wd$S|8KuxxcUgPV1tqZT0CzE+wI?9-!(OZ!N4#9FjGSzoV z(7bb6NXHVHH!m5e?!N)>YhZSuN2e@JP8>)E zkn?Ix-a7VsUGa%>iYu&7@x-hg5Px2l z_;$*8H(fO+CXyk~Hdr-HZy#`aoyNPD;=b6!=F}~%yV1y|^$(kNYK}#X-r=iXXtBt( z4%O%;J@Zr8ko#l?>aAFz&ZhRqO!&%b0zM=wd(BfR1JdtgFL%FssT`8KQpehBMV#HP zT7`7pCuG9s36z_Y8Cc2Wsv4X)T3#)<&hbth(T$sD9liUS=pOyORTt?5+aC$OsD-Ij$XV6b7?ekza;o1vH!i-(zAF=&n?ezEuVqw6*Sro#>bS zJ~(=qg|if-xCpdUo$%YvOEUK@;oSApL{{s1j4C%=#k6u)&HK&NZP?Vh>G~0krt8#6m-6lV* zq$x0h?7Ls05M(~(z^4#18n<&%4i&ULffm-u+9WknWV=rTEwC65^d8wJ9A2MeeijN= z33*I76WB*u$dS97glrj0>9(XXMhvwRmL5s<55n{;s}Q6!(EQ)515gA?3bx!gnkgP4(d65(d1C486coA!SJ9%Ah_R z!}xIQ^9(vu4L4x|J0Y*Ce%=MIBTIr!#3akhQLZVU;9wEqEWCseBHEg&vgNZN(z$nY z&ISwG-&R5}-rc-M8@Zjnu6Md9olm4!Sp$w7EWxI|HfVjUW2aoUu!*#V0|S4O-MwHK zDa*-3ZVRIk28@XrJpG|BILo|TIvl0<`y`ayvzFsN$#s;pyH;Lru;+A3c}T&JlB6SB z2|;6OBdzPvEBBaY+HlY;)c2;0?Y*mR7$r+IB=5vg+$AtrDmf^X`4EMMh!E<(=oaUn zK{&H2T8O*08ce;xJNl46Oyn2XRsVh@mV|zkWlPRIW;#Rt`VRWj>{VMyH$%3$g=IIk zGyQl7PVgxOy3fKa{of}6bFrhY~6{g>xsuw{M# z4#kWp+h~cY909kmW!=nu#{mUc6mrvc3fe5~oXOK_1S`-xrv0@TqCsij3^UCaWor~7 zs;2j*-B&=^xi7)*$?Su^#TZxmdBiJIv!bgXhTJrbDqaX5bT3ur48aVXCH5);->tC! z0la%ePUd;nO~(9*KU=Ma+`~D(+CIYefA)W&UO%im6yDZD(oNHQFLjb*d9>Kmb&((K z%o~g0Kd#?85&L(^lDw@|=yvDo-1H-MfN(B<<%v!?S@nwa3EidjUAz}PmtaEvZH#N! zxv{;^Hb%9ymnT>fj+BDh5ta?DvgB#PBVWU0Q-#8k!xHAE`|v&wuRHX zwxv(Rao_}TCkFn^HQHnTot8PwnU~=H`{o}2ml5K!xVMeZE+LazAsT+ebX`ZhJsiDF z-;^8q)7#SiG`CK&m_t zdX|{wzP4L}wlA>^;{Qle0!H7j9yR%>S{1?)gr-CH-@O)dg4-DhRkBj7G*o-^=yav=wGt+ufu&b%kgRB%=6TZW0 zziEbMa@qc39htc6Y6kD`^@m>{&=pky(@e$UuN&i+pRy`_L;2gymyj<9uUDdbo0u$)0z%P)V5DghJu zgU0Fx6QW*TnPFsZsH29=ySR|7SPq_z9l+*((|P%OX#Uu@9Pl?fpw7P80DYq0Vr&B! zefw2DII_hFLK$vlqod*I4Mg}o`{Omv$F!tw~^cjUH z4Ojbd1UTsMD=^E9FG2C18Xpmi1tlHE^3jds)dpXaQaa?eE_bJ@|Mm9W?)*k-=anEs zQgd+mE+sNG-{{)Jx8fm(Cj!{3zR;{~4og@45oD@qax$zI)}kRyZLVQ3+C1}Hw=E@7 z#w}2sm)z8#*cJVYNTU&kPrR9FYTQ47Z~Kt;7vOta*1zAu3PS;&!OC zJFoPoRg?KL+F|%VfH1{E|4$x+R7dQb)RgY2yIDZ=)#a%sk0r&N1%o(N+|z8QZOpcR z?nt3K)t{YiExskjmUUT{9@PL11P(*1xvV#-)F9d!$DXkbIYe(D`S-yv>t2b3-PO>w ztEO}?j-Pp>*P08f0tyh5KA}{Pjy6dayDn2|$X6F^G02tpk?eL-4G`HiFr4%yM*WXn z)Fc14_F+rtIZ7Nw%)^O6;Qbu#*QkF0p{Mg5wrm7;V3b+!c8~btX-RWuD(49OmBVz^ z*^t$g^OZ)O%2(DJ7awD=azy~a5`70WPze*nEcH$PmV zgn8YH0PcjpQfj!;q~)gHo0@TvT?B-!4*mgPvt9QCLKw%>i1-ufBP9IaRkm7ogfWwS zc7SI+7>ivWH}}#(PIJa|h*sKPxa6Q?f&gspW8afWYmdwsm3;0_FS#=NFDQ{W9-+Ya za|$#{ElZ|_&Xn6aI;qQr`7GG7rS9KPOYugSsiWlCt8ttD{)>@~?(_y)&GP)lYXMVA z_=J96ZzJ6Q0kF?WZ$DP5VxXshzgzZFc=o=?*ZBJGxgXQavm7S4hr-4#whfbi3)r~j z`{X@}sCMvPWmlJ!lX|n z9d|2xuD8fNDQ$*<@bmo{uHot-sH_7wJ2$ngZk4|qbNSY=7b$hr`XaeRb+GaRqv{PG zw0D>pTdsuHBlL=8p=qLNS4o%SfrZj$@!$48-5r#_iR1aPC3BvL-7Oimxr0?Wm)H`| zE??6}h+zF+MFgZ$}`u7ipJA0Ql5rmI}VQ`UtRQ~{V z4nH08GX(2>EmD(H{qos_RC00{e(+@N=5e2^XS$B^=42SQiH$ zdCAl^`Wd#G_)eP%Eh2Z+t=T2ERHLd1_00Ye4c1{Vzl7hI($U$HliKJIG2EssE14wZ zXLb{k_jx7;I+yV6Hw(R!{bHh-f7w&;bfri}e$vqa&E|-+UDm=g#SHr8rOQVa-R@w= zW}6UNSAuMGgF*pYHes6cs8uAOlSXs7X8oyNsnF!4`dgTcEieq{*|!<@c|`sJXyvv{y`R~VDNXzN zbTRCmJ`{+1(_sx-&IfLp=Di{<6hOK=^%O!7_6ptfIRbIi_d6- zD;s8X*yX{{kbGFJPiC5^9H~fxC6eZfrk%6X!~VGDO#MGV>rHICb5UYpdxJ&PK@O3I z_?l}Tcg|fs`$OjTsza_HC?eZDU0A;AAK-m-Fjt_8`x!E*=^>l$rA>V{7WFDpPgNKp zFF)$Vp^vxP4iYPfebwfbOhNUCLh%#|%Hvp0EnBQuX3}WxQi}Sr%XhU|EQo~DNSE{g zt6pBU4`R6}j9gVS{-`ES^|4xwYtT*mavtw@qN@q1L+vznHd!&rsN|5uAEA(&J~72x zv2am#zI(jE^ZnwuLZ*c4S71qYIg+|_OjA`ZpF7pN%i4^=dh87!xnbhFMJnbwdi7EV zhRdJcsC76Kz=SdP<&bIM6-qSQ7bo8APGbp7@5L(~uIj)&Yk{{sSVDi#*h{@ABHKok z@YiA~?h=LUo3?>Rr9^MMoYV0j$_q|YDxYuKfnJx+`<4vjv^B|jb>aw2zhj!Yrlc)p{Y6CPc_IGT`G6nW~`W}3}v-Q9}Lw*7t>E9@gr`2;3 znD4hcC<8;CcP)Ofu)~Gb+i!KL2hqw2)@xUTaw^yJzV3j2CuKC;zsZxr5xb7qX-6g> zehLWhzBr8EDhOXeEBCvO>lCs1lj8ZGM{8Fq(30 zTJNngTPx`Q>P*&0)Uk6wnM4X)31PA|y!{Tl@`Lf$W^0>1t}RN|8>QD%go?DP4`3IT;`B7(SGytla5Z!gd5;?zZ=2yB}tTiRT_~f z{d(s?LlWm0MEm=W`608qkf0V)A=W!JnJ}WG(WV|~gCXtH7Td!x#MYJ6U+!||Fz8vU zVYnK)*Pw7}YP9bdshoO_x&bM{l z{OMXb$`pB(w@^6$c>XCPP!%a-v$c{uU?rUG3_&aQB6yJB=r=LKgSvdZN51OFQNH5H ze2t+E=Ok}r38cZ^cQIk}gFh{=kN{Lx=h ztF3+!bzJ`(!a`#VwXt50yQxnOV(^icA#5J*a+8Pp{nZ@Sq~C+pxeo>Wy%bvf=uaL$ z#+N2+(O$(~R{1%=u3{*fJQ=NQML%5a$_m`y`mAKudjI}1j^&TPj#{Qx-6ksaz=>Mg z3^r58Gv$o1UgD5z=`!NnvL1Q5X9T$*>6-j|xdwrH=Y*{rp`rmkh)DPCw&i$x@V51> zuK7Sh5S9;hf4AiI@Q*Ad|7}614Io~&jD`v_-=n<%26%vEC=mWB^zg}F3bAS3h&r_; z+*;ue{Ds&w?|}Z&O~d^5%A3PQy;0ENBsBdhYEABfgIoTv-7E}pZe4`oN^)j$sRkr+ zK3A>xBq~o%p7u>7CwDm4pCnj5M=+-GcE?{E5@T`y%%roerr47E1*c!>#%|+J)Dv|R z$hT~M@mEq*uPgzD($Dj``4I*+VQWgzy38FI8Dl$rur*KQo)JI)eF?my>|UWHA|CCH zmfQAw{W+pLN=?8MQuZ*}vfRfvyPiqTL=F=ae7X?Zl+D@mE#FlZ3zffDMLxVs*fO-E z!8G?66MTJlEK>dbvUEK?jUYC*s0yy1JDobXvMz{5f62#&Pk~SlQ&3<_9O3q8w#VSo z8+>a@Y2RGi;NZZQSdlFIm`{-c4t6(cacLTw9{JIpP%V39Tf82i-u6vXe&q`nuK)qu zbvdjV{mhYfnY};$SPScT%%`u9I#Rs;JJZF>UU(9LSdNj$ir*&HPe~!zj{t^?*TVW$ z8Ryc`72QHV&YOxiW7H%W?k3aeT4qZvQ`Z(lpqRfdI0L z*azGGUxt&f?L}daa4*~sFfX(Bt3tX?sEWDF|Oc&>H<3-s&ZuP@pHKho^x6!SF$ zdUA!*zWV;!>oWbTpekDWo`FVMLrbpte*mIDUB7b?pL56HNA;+5FNxCXGNghlbYgN9 z=8%$q@305crF^k%7OUca5=UiW3*KMdKGZ?f`jhMc$uzL)n%04NzURzXLlM0ns0XP0 zsy?S$ta-VQP2(uhpCXC+2ch`g@2yrZw5_~^1Lceu$@V9aSpGAyI_;Ay`DWk)w8(&f zarjrxe-6GT;KrupT?}NYY(M0B*RR<4>f2bhG5LtCx8ynL?Ot_fT8rhIx;ts8Rb22# z%bq0B#*5-zT*Yd(HIr#VCXhv0o{ z&b~0G+T)V@Xj{;R*_nFqaz3GlAO5=PJ`wB5W2eC-&;rJE`#r%u6F+snlw)M7VdF0{ z@M*%N(mgN6-XL9a{{X|&kOYltVw3^?*B{orqS1`{-OAwn!Qy{%r>%M={+QO+){`@C zi2)fLsQ_RQeGY$G^Zh2~1=S_Xe9t5G>+?7#^fkj8+>vQ3nkA`)sv*jcRs9}Uw`;lJ|vFdE#8@PrOb^Kr{yHX%2fSNAFY08+*v)i zR%I%WlDQp5YxF}(x44Vpw!IDZ<+b}1@w|j*5*Ud>KQGe0AMsCxZuHqAiGw`8PVxxo zGyeeBuX`z_Ds)^~^I5FpD$2(aBHVqwX*_~^R?6siCQ}(gq;PAb(mW3LAQl9ZUi>M_ zZpj`Trx@;VW;_fUXO?On&)r44>V-?XxR4^q_FiN(9KqZIA)6-s95>U zbT=Lvyo(>YBpRbWk#hjt(GV*K7Pm&c$&}+zYTQfGpj2RSf)CQJLYM<0q-R6m$Jwf; zJx4Y5r^B1ZHnIMd@;`?1x|~?`?O#@WIASd0&N>dY*@xWas?t4POR_Ef>(BgGae&#b z&6s_gr#`j8c&`R0!Ryc(?x`y+4tOiG%DhW+=LYAVmB8u}7~J6X8Lqcpc1fRc;<$Z6 zP4YK9171wz+dT-Y8PlKQKD^a`ED~|fYQ@kdJdTxJ19Z+SB#SKDLA})h$tJy*;m3mF zxQSVpB=cTDq_A6oy9D+ozJU03t4AHQYq)0sb6Zo5-llSqyQp|75pGm3Zk6I65cCmY zE>U^Ta4YE>y+$Rr+*lmfhj{Idt$fQE9dTVUib#vu=y~Rk;ps2V;th7XPlMJVGQF!q zM$sp?v{ec^bBgOU8!2wxV-b=;#&KMaQ>He0vMndWkwmMyFhy@iZ4?W)XaIWGO{e&E zj9J;uHuuAxJXS7O{dT?8wf2!LJ>`()qmFB)B@a6UU;y8C?*U~zFf?8~c zRmZrk7=9ny!zkStZ^FGwd1XegGs~-wi*||gcY?kLY91q<5W@H*Wa9$9#P}`yDd_ra zP{}r)v)rDqGKR}MK5YOQ_#JpB7vN9TUM+xE1I#q0z8A(wr%c40nKOa zHPp3E(Kd(xXT4RL=q`Dvq`wOmb6n=7uB1%BH%jPLPIpb3(q3ac)N$Ve*v)b>c#TGK z-974x_@zw52N(y|oTg{WdQ?|~j%r4|3*NYEUlJ>GxMH1o;&h2J916?YSfv}Ci|PyO zRIW8uLVj+Qz+L#!5dQ$I8T!^8-;U!~!Ekd))i#Ygo{fL5F4KmriFMW5!*B+;w(&CD z#hv3P-npG$#6}X>mmiH!JKS=oriZFU;vlQT@l75nRAZS1cqXawIt?~59^fhZ*DrJN z%EAU9-HrX1HN0xZ_PNO|8T8fnixXk_{{RZ&d`aSjbURy`@Z<5e-t6Kjw+~#^TaSoQ z&6jzVhu5WQ<;}1!GqTmS5hh*R^RJ)&Gx&}-V{ID(NaO?1R-cT1EI|YgvCgUh>RP^c z@n4RYRK1bbPcez>=~+th(G`76e-TcyGY>)f*DUjHKDg`eQ|h{ykRO{RzY4}{p@IqT z)~+zJCCcYt;LjGXhV>M*yUf;w6TyX{9Z##kxr?&+lV|cEKt zr~|+60RBdzQj}DAyV!`N$X^trfdEG0x;tc&^Ggh|{eqZ)1+W z9#!c^OLNM!4-?(^k6g1`wt=mdMDq)xMmCIbl07)Dw|`}Q6H1O4jMzgb)87>?Kn8a|4-Hr!A{cH5I;RnE<4ty-oZ7nqW z2-aB8ZkVXaY;tqgpK@#3rH5FCrO_Td=CF};-t6u4+Z2KYRRjg;p4BACS1h9~jx$=z z3aH=#O)&yaE7@2WwRU+QSk}gQLeTQOn)&|#_J+`OeP=_yitVC`1SzcA%gbyu>^^%%x*!<@GpDlFX27FA^ zaTAF~b!k^Ts+Jgx8RG_s0u$s7r1%t5{~Xki@}@oNXi#f_Um{sraAp z%UAFhiKer@xt49Rx054eNK|74pI+P^-MFqYUk*L}z?xugq+|0uxn@67Ys*qp?-?`E zoe5NGQAHW`ICXh+*=#hW(=Q+_(_CB3G0mftg{|vP(JbX zsk|NV&hJr`T_7tT2Lxk}TJ`z7Ai7S5*5M;}+4$!b^Vr->8&Hb5@8EEmN4f_5@y*J^{h9dDdi>=_^RTF%X zZ5jF1NkRCM1#uoVw;C<2w9?~nVyq8288{!274%>1OQA>NkBIj2DkRWF5u4{+;EWH? zn(L=enxrFr-HfWwl9bL} zzdmfZ#ny~K-C_1b|9r_yji@U0$g;RB5w?@qRn@?~mcoW9bt16R5GWfLZ?$#0 zE|(N6g<}ApK~B^3A0 z9u;hoGgV(%k~6S20PHG_#?7?C7A57-I!jkSjM))}x3JNK!fq z%%8+V$vYc&(z(4uQ1e)}dB=Ljb?l3cCnkCqguG2K)KLJ+manM(8(qB17Eh-^Uln*y zQlpAF{4`Rl2WZ8H(8yoSapP z^A^WoQAHHMD!hvzY>Kf(CawnyscP#BkC;^^*W*~&Zv^^gzJAxfIa|0ak`^Plt~yVQ zw-Vf!68ygAycFBm_FGrb`eygUDFH6h4n6CZ*1TYZhgkswuQlP<9~DjX88cw|6I_13 z@iXko`JV&6O*PC#NhFV~^e+^{c@P3X51}>FL9T^BO95X2S$uh$NSS0=OUM1(^IgV| z`$r=W1cmV(VtoCemH{KA{IbM_pdwFJ|L`OM`z=;c!lT1bY)_a znRCe`*AwDzi?F4;g>kzb%@O7=o-cFOb?=NzV=-ozWPX|JP0~IsTHYPPaf8k`SI7P) z_=6^?BOf;aakoFpuwd~;^@q(A?-xav!Fs`gTSNQGpX_J!}os7sY;85gGP z)gSm;7P7Vy>Nzpne>L%q=f#O{Lt^Bsf3?!EuKpm(icE4DBt6ugw4+6%Sy$|R&vEg) zPOx3FPvkfEiQ>5b02BVuI#f`$%ilUeI167A-*}T#)R~%FsW5v6BB~3yi5ZcP)aJKz zTQK&rXSI08<0ZzYJ207*Gr-MojpNy-oXFE3#Mc=sJcA{(Q0yT3eQCywVrMBfcQX7_ zzP3~3zs>8K%(?ibtlW|wb^!VkE0$=>9Am9HZ3zOmQAkhB)w%JOp?tWOLhGJ{@l{Tx zG^2&VsA96`1an*3c8ne_SCiJ8YAWm_c{?Kv85r(rJhty%kKr$ZJ}vRp+Afo#+{pv~ z0LynmvE%*YUsU`j{{Vu2M<{~l#WPEDA;C7#orpb&>VHbLCD6;B`bSgYYil`f7$kAC zvFH_YYqs#e!p|1!@<%PVhV@IB0m$2K>KmTTj1R4Ty8Jx-n7kk1D`=9(K(ewA*$mMx)

*RiFYCaLWx$w*YST()sTciTj`p2Z@H;}65DO$T1Fyb&x`I)G>=kc=ua?PK&TsgJ_G zWbw6)y}#Nm?Xd#OAj*M&ib*AZ00nrQjuUrh(@?9;1sev52+@2V>l29O5n-ic_vC+> zoAJ$fSH`?u+1qJ~&4}&%$zL3G>-hEjtJL-VckO;1*=pWF4lqar066|t<=!!hmf}?r#>DLT&j&fd=cRoEXP{eKvGQXH zyROy7YqB^mX(dwnGsu=d*Jaeizz!D@w2&xAvHF<)CbejZGcJC$JXgZ%9E>uN^{>re3;xvFgz{V2YEt9Pa0JTTNjMev zPr^?S%c^*8+DX}qGUPLN73WI7?|B*2sM?=7!T!>J2JO6cq#L;7Nbha_WR(UOj_P|i z(Eeh+P=~@6+AZLM?j(xoIcVilxgOc$^{>%wYTY#(DPrM?74r|q9|zlO3@t9AHn(aI z*)J4)j(@sNIIkBnk22dU){@Ng0V|mTSogRR9th8aqIM~&2nkyx07+)nB$y^ z8L3*$Vg1lPt%H1$5N>eok%c#>&sz9m^n7c$2C zkdxoiy^6|WDc47n(4G1OlcRs zLw}@Lx$_km{lb5{_;nTNRf?Qp-6PL~w5NTpb-pcm>!+T;MOw<+Jj*<0JL2u6_e}aOu&*s#vR8%|Fd0s*Ir8aLhdv^T((@)z?Kh zIJMJL8nIG?=F;cco-n!^j+(NfZY+QnLxxg+rF>cYLg_}@WJ_qIyt$dlK2q*rJw1he zW25ScC6X+PUviK~I3v=y4~Uw5oT|wJN41A7-9>$DaVbenpD8Rmad1b#xB4yAF|@Ns zh9@nafc6!T@CG>yr1vUo(Y`FCw_3E;R^Pd`fZ=yG-RHUd>(1e|k~xAirzaQpDpUf3CISz7K=k2*?IzG2Q7~E zR`>+L`F^z>wft^2~g_*&5|)d1iQE6#0oH;vVsEsE;&UlcL5@8uOf^1wo#DPEA52j~o+>Rvp~0A>6qw=}nK!lOACt{VSPP ziftP@QflRWQvAmGRv4~g{{BtmbMvHKnSV6akS$`*s=OzmH419~0BGZ;Y8_Np zDR%QxXo=g~(wPRh2*dTHhS$&ex{9)NxY!Stvb@{Sc8Wh0W%#u*fT6h#-nps?@k&D2 zGnFH+I0n3?T|RUt^EiOmDKwh?H9t^ zisefaTNeHWx4BQA5ThQIZi{^ZUPlFQ@gq}?Jb9!M=xVj!j<2o>FY__#O?m|X01vIz zi1L(=an_z0*=;YNc#L1N0G7m~g3!2E$@JGQP5qRZRTT5{i z2azPX0R07bQvSz&DG>RB;vDt?4ShZEYg5x8)1gLq<5d82#e3G7b);XwtGy4sTZM{` zz0~~9(!XU-8ei*cV)kMvLFVY3s2@(X?w%U`f~~G1jNjkgyAktwU?}`JHTI63q}U|o zhQd8-thakOQ9=MuwPw;Evu*p#_~!fe8}O!;qP(6&?I>OyrftOk04k^O1NLsw&armd zjjVU^$2mk3s9+R(8v1+Uzm9CQ9R}h_HibkcxZl~v_G`^Q?b0A$wKbak5cmp2OxS@rL57idVOl8 zQ?XH}Z3{47$gTiTqO>ELNe8H^3R7^wts6!_IIPs)QMe>{hwVG#+fNN>FzFY1boVcQ zrdX!~_efOw^~lKJc0Tp-g}$K&iKM@dV(7Na(is^40DC=sdSmgg(+?5&S4#0Lahv-) zdzXtLB$^d=P2m>3iF@yKNI^e`PLG$j9 zM++#m$d;$h+V-aedYzw{Geu|SDy&Lzznl-l{{YokUNZ5FF=@YLTVx~dHsZr^2abe# z6Pn5R!>8Rsx>fDFFwQJ9V+P{e|*sY^$O$EFl5ClONE;4z`x9ML_>G5DI ze>&y<7WfrD8T=x!@Vb|I)SH{8k%MlPh-TyfIKWYXfD{46bg@geEQIWe8=9r%i_M38rU1r zI@==gBeBALPjAM%NNxfST%X3Xd`b3O?JD9Mg!5r^D(Xf-A4;k6RD+9FGlZp1YDu3X z_}Mipco~H0cW$KZM?2Vlq@TpslWQ82-@?(@A-B9Ael57;p&)i6(!FQmMuP^AB&O0P zh9y5J4f5?hi1g2HocZlU#=_TJtsl5*CO(b%fT8jF$XyecP4&S zdXK~W%WEsh*<%=OndI;@`Eg9w?gqJQEKsO37?qrkIRhCLk>L$G<4@IYAQGMKf~)Tn)9sO+gl#GS6YkktKp5;iM1PhC@qcU@&ru%MuQaAPNuCuyqYdt^+<* z05~5uO=-@#(?&eEjgOyxCHxb*yK9;BjYsXW?kx7#0}j5+$4c=O@NT7Vt$8xSQK4+e z2q27dde`4c;kdj}WVZpzT*NWC4iDq&UOVwaQU1>t){)qxiws3%yp}KtY;b`0uf3rv zQ>3RI4~?%1kf`A;PmpyR)`tE>8@F!82*qb#Ub`s=wRPH$hmHNLdV--vS76A;mB(Cq zisa_E61(JeJt?Qnuv*0{T|vWjIUHv-u$r@o@%rYo;?s9G0OK`rbdsZxNc5{oq8me! z_{iY;be;rxBi5>G8iU+IK_qe3sB7BF+ej5p&7Rdp+f^02fNHIej4$;W7aNr`+PK*?voQsZG0@j%;d?Bqg*sfd9Gt9&T#b)I@l#=Xq8g8>2oCMFlYVF0^ zJ~M&rD~qv;TTprD)YY4t={&QU06z5&QhT#xqI79-61H$N-mNE>E(%~C_2+M_v~ys% zI6s|l>H5<7AnxXpoR))dOzd>)#?$5una*=wr{M34t)*>Ch2eXXUJ5nTMj4Q2>rl_) zm5Are86NeGXv#|D#!-&!`|H6!7PReh6j3~JzoQ&id^N~skL8hDK8C+LJSXDmqLe&y zZ5ipD*S7d~;#{j7VXhZFj%&$kOQG8orK)GtF{9l`Fmk8fwr#YiiUP~?dsmM5Q{o4l z@v}}C9-y8p)-=Bp+1s$7oDYTj_8HQWet!wUeCh{*e?7R7Mf!j4=JxTG6 z*kU`5m5=?Y3$tQCeKS?$)FKan!H=jkl8YC-YFu1gv*U4L)}s4liNohVT78AwOaadC zQ&ZgpWZa4cOQ#-!=aL{@eoJx!vwbsG;PE7bmN`DAv1Eyv@>c`>nwf1FpeMM`N}pzp zRN1w8s=;{Ac{{Vzq&gq8aRv$`q z-U5svbRc7(HCn>D3*7{d;>*fGXMOJ+p-&Korv8#_$U5A0~Y_!Q$l~0);@R7zVma39Ar%pFM zgYdoF_m=48+vsa<^IdH@8JXEaisAIn8d}<0FCh*(S3ToDh;d78BB2KW^fl)D+}fUx zEIEA6FXLa1j=8r?hEpe);5XAX?SBOBBC*kJV-6Q;0guAIPP%v0b&oZUNqqhl?%oOU z3hBCBDjBnnN=g@%%agK5`cZYwgXL`EufgJV2r|RyO?>fj@kR@Fc+B`=$6Cj@_}dPh zaEQ{36UI$fCZT4is!dO|1gC_#9C%(8clCxz`ZnQ=a zr>a|;N>i4S=z6m1!s;di4hZ|8)+1|wXOW?mN@IbZE1S{fxh$q4B_G|8e^FhOx)W=* z09{6~$N?)Nl21zZFv?u5K=Cn~rz4lwHDvP6)1RezMyYqG%dPpO80^&Ypz`^yx5auM zr(tr@dHds0kW^zl*EJrM=0}+sne*KAuQL&esR{Dfs#-?ro_XIroX!do^Wjv@e4@A_A~T=*8lQ+?M^thw%eJt|9Y0_j>^ zxi+@zZVMl}G0zpp^|?h^qpIn&?iTha0NoNg0thwPX*Y2u5xzmkL&b3a0NK*nJFF5q z#4*QgcB{#%Pi=zHhXkIUm72y|pyey>dNh{_7B@Eszffxz#9G`|wh}Wi364)s*13zH z5J@U+RdeZCmv)!B<-MMWZS)ub*XfA$Mf@b+! z<{YW@_WpJ2UKi0Kvav{^1Iqw<9DY@qr~D|=?XFf~xujOyLG|^oLbJ9=V?_!8#w*H# zl-qJUtJQ?kP$*5N!Fg*nt-=W*nHo70`^rHCen!0F>-K2yWzFMUTi8u+BMg4(b^vkM z9OKuD^_E2k7^{|~M9a8WY*aRp%?Zw)uxHP=zp~}!n^?A=sld+(%+uT!S9R<#0qI?y zpWsL#iq_U!Xf-SQX7dfhDJvQ6jP*X1>6(rDC@Q}@dS`+4uQ%~uihr|67V`O%K1LuT z_b25woiY5dgyfJ)SZ+^bnH4FgZwjG9?{NMTOPWkqZ>2PA6hY&R1ED@Ri!NAs?m!j=(gEHj8@Bd$&jTxBOT zjAt0eH#=Vi>i%8%h1iIs&e9J`!T7P^Ht}R%Wy;4KXLkJKKVM!cC5~MuP>eAksmiz< z8r;;~wHrn;x!gx%-oCR3iExJ_JRDsGIddcBKa1LZzMJ-24JtefbuResPf}~iA5DG< zL%q2Q1$`;wKMT5D%X4OKVfN|q$&9dTIa!c%FJ33I=10N15nY6dwO zSui?o6_0l5bcd7pSDh%uosnld4xq6{%QNga#tka#jcx#m_YYA{y72m5zVhxi_5!o# z@T`OuS9QluD>oT-A2RA(pT)O9kwYGO@me<8r=74q32t#%R{j#Zxaj=*jMdlho}p>B zLglg9MHZUWSBhs_X{9aO9zY)5>we!zYj{GzAYV{>*CU|GJQ5Ez+owwG>?K(gVfgiG z-U&}aWh)ZLa%@O03m;z8$vi>>ec&-t+*$?%=R9|=ZvOyBl1JZ?pmZQpl^HFNqgh*X zsw?eROOkARxLE_#;Pvn?An?xe|h)= z(yARw8A&Y11KzT3?EsJ>muAm6JPL(Cw~T8tnhkKrb^8^^kf zsPb6G-NsKf%Gk7J7@ixks%fg5YofC8lfkb(6&E$hjY+)|J(t10JJp29CA)R@-HQ4n z!T$geZ_)%qFn&6doY%}~73O|j z+S<&MlyoN@c&i$Qnq)!ct_C~sYtk-!DCr@}Ad|H~-UpGBbc_QIm^JK5lV<96Jo?v1 z+YluRkTbYdn`MxaHxfRa*JT%lWk_92ti&Eh+UF+Ir?kCqlYo6F_I#k{bQ)EYT-}Y} zxc>lZwe?E~?F>XJ=h03ols;@MtQ%u@0Am!lbIWiP5?P7kn!OUSH@Q606Kj-ajOX}H zMPkq5E1Onrz>Ie}=C`d9P^y!Km3@1PyL;fTx7vwfLRYvo9Iv4HyO@@qHjtqH06UuA z)4X!m3Ik;O*DrtJV{C>t+)rWFuIPFaTa|g-p&!GFi_c=;E7a|+d_xq2oDg}fYkh9n z!BRGoIOe%EOI2jU;0~iTaB09ektXEw7POB(#5+ArJC=p66Kmrqn(cJ2117bntHQ)c z20bhL4e4XeI zRja-n(&W9Fl1R$3e+yU9t)kj$wxUp@<{p*Ij|atRs5_znPjTL&c1Y5j)bqGmw8{L3 zY>$E|62m%_~~>d7ACp1ze{FAuf4cNp(jG}La+wY`xY#og0BJj2hu zL2&Hx9qdWRHD*{!CJ7|gP4wmz%NpZ$xz!!DB+>0W+rq#c=e2iM%;F4is-y9)8$i_H zdCIJSIQOpJ2VI-D;Rxq6{hXe}^TqCAYIZY8AuG6{?l?8)o;C1Rt*VCm1m-YTJK4Hd ztTv$nxxoZ~b*hy5y`(ZXn4@D15$RaU5>D);%GHmS=hVDo@bc_OCG*JvKPggKTicrN z{2%*6-6V-`e-w&O8^=oPJY%8QYr#@A4}w@R9qZ2K_;^0htfK`Ec8=n;gO^h&MMvRh z(jE@@foHBPWKOM~pzv$Axwy6v`H{F*_BHZphWEpUk0YMy1ANt(2OZR#6~N?^iLmN1Ri3^hU!l^uGdZRJ~tdelqz7GsJ2B(RJLN)>H zXpsW|!>Hs`c@J=z;33X^YbOSVPbco1J90mjQ`D}SC;>n+PZ>DHT)1GdvysL#Sx&D$ zcZ~DU_N>}!_cn@7#}nfnUhYL$BJ*NWaK%1VKT}+!xNmeMKcZy^nhjQaqMMMxAwgXoeW4+Q+U?eBY*O>^D-W z-r;0!nQXV{O(JNlueoKB+qvml)*4LDjDUTQda7;pI%O!MDVBTp`C=$NcHm;VFAB`| zm&$`A=kTp-Jq2O6+*CUsL0Gppk7aw+VMXeDpQo*M#*CxyGlkbvmn=?)#QLdC(TMQF zkih1uX*%Bg25!7{sbO1fa@Yw7Ob!?~KaFDDT1Tf$HcW(r*!8Z(HnoV<6|BhoX<#m` zq?c<*!=?sEuPbPrOh(pFJ7T?_JCwMDD`A1-70v7VBFnj>Uz^cJE9fv-)d=#)@Nu|T zeq?#1kclMu!6P`vYG|~W44{<36lz*c+!rjs4Y~Y{H_n#YRr6S6^uX(0oG}XR9v{N& z%?YhmBC0cry>Dv@M!a?Av!K*I(<^b$a^lNwg9oo{ z)jdYmJB1`xMdzOj2Ip>hsqC(qh9ouU(yXeZD7M=uCxs!NLL3GS>rh?z3MlijFb7;# zvg%)Fm<;<>{{Za)GLgqV^>n2PCvdxaoFv*Dc7tGy(V@B26tqa@KD)8SYHdVK zt^vnhE8ToA;awp#^olKkv5mY}4e>MK`&lF1TB%S5NFD3ZldTGT&D`^=$_wFI9&tE< zc`5)L7Kdv%dh1P*=bUkdqfDy*D(d)2=e-0!wb6#yJ#>(aWaSB*+7$kD;t z_bd2n@oseTuA^xf=AgM2(A|)sM&L;mO4{`_?K#_U#yVFQf8rByc3oV7oMSz!=O>H2 z6lKufag-vN)anpHb0}!eQAQ8exy@9IMQjqJ^vy|adh#Yz_x)<7tpxYWltNpl(z{`Z z=7%p)Q?!@7E{S~aDUI7IdR47j!OW{FoPp4Hr}!5@5?pV&1({AZp4Gjr#SNw7#5RN2 zQc3djGdRV{rI%9w0EVO~ZxC)+kF9rF46<$xR{(LvaJq1g+7w_xBb*BAbnS8!U5LQ2 z73S5dqWPN$!cmihc0EH!id*QijKV)pTG!Ha_MFWVoyA)uwQ(K`@qAum+sMEi^v*L~ z{{V*dNTj{FW6*($`CQBE7}n)WWNS)_Nndm2nPxIS&l@P}PAX-*jZu~u3y)g0ajDzi zTay~+-xS{rM+~}-*$Kve;|l$K&D`{P5nkNMC}r|TzG?C37W0GVCc;KddQ<5ZF9twR z-rUx0)`1(LXw`@ZsI9DFbIx@S2E@u$W6pXIdsj1i@b2f##7xM05;I=F@uh(tX$pN0 zrAMf0wrd=2La<)E)-@uMxVgI?F?r!_LrH`)hILVr-73zT;~3&Y4Dy6k!00nxm#XU4 zT7(8T&=(^cK+Spi_;)?!)Uv}0?I$N9p~a`}51B2>lj=725*FO0LCT)hnXKJKBw=CL zN$xXSZQvW|SW=9{uHo*^xccSg}(+TBdBmXU`hpwzT! zrM!;r1u--Yk&dKSZ9I8WtI+T`rb8c>8+@CI^%dV55o>)8Sy$6T(ta9v>eAy*OKXV~ zx-J+o%WayIm_yl4l`61oK}z-uz~=xz%R1M+B=C z;PvLb`i|Njq%FHUo6CSRj8bncr!}4*X|;^o-+lf1P1EW+4#d* z(|kFiu>}DO!si*SXwh%po`}k&RkT^*J|tA|rOLVv{{SyMnyq>9PFrhVkgM^HwZwSa zQ`I&7YD=lfiawFXr-fRE%TIflz}x&eBpicU-K`>Hey2kZ#0%@$*@I#~?y1fyQQ~Wx z$p@Ld?l={Trg=Jb$yWhE;eoD;$%^Sv+I2kEGSOXwoT7`iy8FShHub@BO>Z`(3tR3d z3^9|QD;CegQp+A3jAti3Yft_Vx$ZzA@Pmv2jE_ov4VBpYTgyAlZeURN>0K_7q-pbj zp(7xiZX&Rt@KQ#FLTw(EyQ5ee1w{;1Q>e{k^(yw4D>oa#dVGpp5SvfpYevsWhRrr8 z481{7&1WU#i@8D;2cgYt=)zT)Jcno;ZLVud`W*_^xahTei)s9_%@k*i@e)}34lCJy z9(a+~*d#D(#L)*m2*|8!;gukCt^2EC3~Hq9IO$mTk?sU`uFA^j@+teWIorE(!m?+8sp(xD z>b#20o-xZ;gH{)3s|I4og+S^_^{R423uNSxT2m>KUquw(Cu--pu3l56GdRRqhJKY~ zu~xwQxTmshD@i1H3PA0eO$oNgAW{kKRb*2n>JZ^U$u#)Xsbu`BZK5R3IjkiSAd~?T zr(9yI2=ECVdexj^Nj+#FZ45ae)-qhq<)>j5+&bWnDy`;>lds(w`c{->N{{DK5OqX!Db`1c`m(dZnXzEXxNj0M%?~2)mjQrO2fj* zsoY&6;jAIUD!5->zok{z^%&hl#y=J5RX))^mP$z+MObkn{{Wt9o~@~@kpf+0RpjxC z@~SUpk8&RSENS||`-O_I?_1H_wZuG~+;tV^zixPLL&wZ**c{hcqH0CcB#=nySW>HL z?rC0{lr}8t7I3L)CBakCdz$C#kDPPd zo@cm_#5p)rXp2!vkeL_;PNN;GOG>**(KFMz^sKqHm}Ix$jE)F3%-ib{TU;N(7cX#(Gyfb*K5E#yWaa?KR0Rqg4P8>skipJ_zF@1Xq1b zWjgS&j1-%8*v-;(x$Q3ehQHa*jicJS|C-37aahseQ#a9)7X8T{JeG*v8Y~070Fa>&N_g4 zRu-*p*HWp#&MORUNhQq(vE|BglWuQQZ{gp>tzz3)iaVw$$ zdRNST9EuUB!U0{t^T&Ghy-UW8qFAg1;4#28-GIiwX5B}0ZjzL5(DMC!C9c174nm=R zYeL7vP+dl3DgZo;)-JbWKB1~hexo6dRC?5QzAT>BMS?yJ;pt2MkxIG@VQ{T zgXvpFpR#I8bIiMKw8!Zs^xNlD0LDghisrmWJoD>mE0R?8$EA00+Qly2+;M}CTFKS) zzbJ+(tVcp?sxDH6+R(}~aj2VR3rn*!DvC&Oa8IpYNUkj85{1C`J!>Y?1!3uGfuD3w2vqy~__g zSGPk_^H0C9d2U$=u~CyH8g`}owse^7Cp%lXu6I%Kz22v%3lx`XHw})srL>Z3J2=zi zm5)|pDyUa=XDF?aJjzQ$CtHJ5*EJJoY(hp!`HAEYVOR;_ol*%Vx44oTE6}wNiyS z9At`?Ib@C%U;!BVRYtrs89r3cOnTRBYDO(siBfN=HOnQ;qh`_dG}$L;u9n_Gv^OI> zReKedOT@%~AUQbbE2F>r9m6SRMRXkJt!VKHR8cYZO{ThyZ3Zc0Srcg|(y=v}UrhjX z9-_0gj~Pd$TohItRB^D@T;3^NR@LP{>erLHxHAlG7edi`wupFooVR-&j-RND$kaCg za0jh#X`UXr)h19PDIZ*sRQ?h}VXR1(6G*Pz5i`KAY+3Z(JqFn$1bSCP#YwMp^dW?b zUh#vYcpl!-6{L)0bCce>P_f!X1_WrLvTuB6VQrt^C(1K|YsP$k`$XyXcQ2>exSs5t zp%nD`*AtcByw1ONCXdH&6kXiuGgxU+`PVWHi-UkOUUTq!;3bZaBI=sa7rK!?VPZ*D zKGnD2tyQ)CMds9-d$%bbI2F?By0o?uv=Oh91~|uBS|@gll$52)*{R~c9@zLwz{diI zm<%E>YVnKj6yJEB{iVE@E$*l^q{7B`dJeUkl6r}* zWqX!2pNBTHTp1z)Ec5M4pm=SeylBD71C9o2i+I9F?RQC)f9|p3u6SF-QY@`2Ddgl} zXNu*V)U1(&q?DqQxz^ojX4){r86B$Kg~}oT*dJQPzwtZE6SguGWaEm((|lKLeR^%C zC1lS4oL3ZaSE}QE3Wgy{c7fO2PR2qvW9w0`%_uvwS@K=|o@Gtpk>`wwM+{z|C~k z#6p}onWbw^JL-)QaAMjOuqxad!$JTllkZrv1}wWydJ&46Nkk|bPnX@RUS!?RHwTGy z=IK;tapAp68_R@_M?mGUs@#4VucAI5Xo;~~qo+kZ{j25w01I5{^UoZH;xNcTA&(XH zcf%-r-8w7VcO(?Vk(mR0!|BCz(rRaQTCr})o4T}!306=s?Oe6|fp--((Q5YroRXwx zu6eFr_@iX2eiglWD4aCoc4S;iaC>CcIozL=9C1#)kRjyLCFU^80Odd$zk`9+yqSB) zr5|*F#kEM!H3Y*WgTSdtV^yc!&!uowZYM-p89mQhR&lfg>C&^NxhVX$Iq8aDw5~Is z*11&fXwhKE6y(xLa5mr^oQw+1xV-@3c{r*~sQEB7j&j9_P(3JZFiEp&>iJ@e8PBaU z%Kc<^Oy}CU*{-)ofX8=S8iLZ~q=Yn$yX(zhME4Fp%-Ft-mWC8jz3zX9jgyt#MAaPN0mQX>8CewG+3YJQ@}Om{xY|3 zF|dGho}#;pomDMYFVo(*FA(^)#^T|llHAF+)lc)ULz+Cgvgs*vXpSQH#EOnB-X(Se z8(0i}7_L{ux}znj5kyftozHtPt47?#yLR`et}PTJSL` z-L~$JprY1_+L(6w$eP_%8)yTP+#2esz)n%X=Y|5>y=R^~G@)h#!CR81xse}2 zDRC^fxd(YCuS&}CozI)Qs#s?r?c4J6TLgg&9s6~IF@4aG0u9`uQF(u zg^8rk1df$cN52r^<{N&sp+AW2UQnTc9)rCKFs_8)wsiTx&b65ON zuKxgN$p{0GakHCXNS7q&x?F@y`wYo1iz9uiI6Gj%qX zD%kE}x`nj(*e2Yr2JE~1Oax`>1j!KWbc^RtOl#(nySR_bz2hzGJ z)=E09$&$aleGaZ2NgGMV;2J&-58kgZ)n&H0)a8AJ7?OGIU0s|qTUh+WmB)TfV{2X% zGT6y>jlXv%2Ax`ya!SUOrsVZ{57Kosjxh?!yBuvbS5ea1E1`5$Ix!gowH_YO#Mb-O zvpD27cmlNxO)~j%5Kllyde5G4e9vZ8YhT?qq=FmG7=4|$0;4$<9*?Jeio!`|7$=eR zthBwB7=%bVAG$crbQTxse`Iaklq6sST~Mm3O7C+GC9#vEUWUG!Mr^~L27g+&s?8PS zNQik+)Ea|b(VtI;F#M|67#&4c(Dc{0Wiqo2p65JO(Nl_WlF;UKn|DVasVtVaKXH{$ z9M-m#J3KpL!}0TO?OT2&(`{L!U_lH?X3tMySm|yptig;GUr-KBVNMZxqqVgQBSr|) zSeiKiEJt5TO$zPw*{6xe$<Io{Yh=#y*|KxE_xkm(Sx*wCt)t#7%sxZ=Hv{M72FP2Muyey6D?%_P>(^f>gh8djlqWpDz?69TMz ziuFA!OVcIOVx64`BRIu*-^9c>7g6eV!wSP>gb(LZ=$f;~rr1Ev!~!yXDO1KQE0bEM zWQ-G3oRd3W5YMKqjkUZhwr6m^Ddg7)<85B!N4<*D*Up&8gn+{*isK{8MNqnlC10%~m){oM0g2=ia!@Ys20s)$Zn&7h8K_$UeT+>hO3j+fLJ-=~oj2za;Tm zu}YU(P>MA{GXS|^SX6(!bDQX4Qlh0c`y=M&_;s)Amt*&ZG4it!o@)B|FIDjFlW}Qs z=_HSxug)vgH7!*5T^K+5-@I6N=CC|ftlVoFTF(SxM@8JhM_gBh8nLMu(v#}FPE}ph zN(UR^y(h$a2Ak&ET$Kdm?L3dAbee9v;w!fb(uRvT^BIm!VQJnbyu8rl%p9T4!NILR z2H9#?P`$*fh)E|0y?qC@bm}IxT~Vz{(v9x)BGzoRcAi3Y24gQ97k~v&@Q1?d9Xj@H zPg4E$%!Io;o(Iyp5pX8dOmQ~tzbgjyt@X8sRGJ%E$VE8F=~WCy9#fS1nJh$8*JCPO zde=|6MYSL*o~kRM)@B;MjJGh2(qkhP+39}=ygx0fyRKb_Qfra%uAIIHnA%(9#V_1b z&MUtVrAjJt*mbH|s*y(rG>fI(Tg)5GW+N(k=DkA;f^YtVa+c?YL(lMx_Z<2(?E?HDIOm%{nhVY zbK;4tuda;IN4)0?fsE6v{1I<)eww_F%3@MB6Y`VmRBz;a-A*RAk8CmEeQP{^CMt?c zOLjRGSIClPCC7(n)OR$HvLbR9rfZ|{mXenCtO)s=-aC{cv2WbMthd$HKKI) z()8D6C?*4eo|Wap5$%*?3)tDpNkv6!L9bX?=^7hLG>R|@89hyTuYqkM)_hGQ(jjYL ze*1LKYRmDzi0}0!#QTy6_zF)46?4Nn%iG(+Eu##?XFi`=nZgsqDaBbO_nA%eC;2JtlO~kG3E0^A6|rhP^6wv5PQ# zNqGW7>Qx;25t{LD7Eg6F&E-iPP5e8EL66rJ>K-xGtnH&<9LX<0vGf(9=e)K{pZq3Gdhx2b}BM|m8^7m_E(Rz}GDIIerd6KXoKk~qA=0OW)6fn6o0 zff9Vkp_KEry8i(6s~^O+*9Jc#+s#ef6$c7`8u{rdHrqYa(rF`@wzn`_n3RcJ^NtalUF6;9h+ZB7-AG8$8WezmoCD%5?PmaArbHEJ%D z>}KnlQs1+0?0Gq;EUk^I8+04M;15As8rGkEq?qNENQP6i9=z0X-@|1Akr#WPy`Ngj zQI$G=_U1`z%O{~m%G>N0goOv5=cR6Ljbq-)v(bp=vz}vjZ|4QU&V9ve*p z^_Y_HR+{b3<T#?s+D#WVf`igoyy$a&eApqZK!01<=k8{;a!Y``s!g zNXQ2reJe8P%l4%>$j^Fhzlx%g)TB#*zO=f8?C_(3%XBs7Qk0$fx(=h+8WP{Cg^zGv zF`CiR?bhQ;yjE?>GHVJA0kug}%5Y^P1RkGS>vX*$O*Yn8C2;%K8R?4lVQ5pl)~{1N zd0R_knD8Eu`mnl0Y%EMvW2vUfl2~dNQCtwBKfAY@>HGl!(KUOeLIOzHs^1bcFWpIg zs8Hht)Z(sDjOaOaN2d$ADrm8!crEX=cfHQ$#z(z!hC7>GNdPSxqhy?W^se{9ULx|m zGX=RkmG9Q8U&n4)nba#cY*bD$g(WB)^&=TI%5(bGtoO+bF)5UeODL+EgWL;s-5`f< zG3!1Vl$Dzt~bY*w|4kekj2Q#uX^CAUbR&1r*bVWZzHA_miJI2O)le% zo^klpmm2Psr`fb*sbDYz4A&i^YHIN!H&Qy9W&Mzv`L^ZJzHC%Qdl~ZIb9bt`M`@&L zcJlP+(S_ytew=PX|K8vXNZhiLXtRg4JLci35TKuH|EYPVu~& z&ck1XCpr5xht#NWVRPJk$ECW&h^{0=jmN%hi)VA zi|N;TB<1ZjTX4ATCIolCaQZSgdZW+ArHw@)i&SSLYT`Xno+PbJC4 z+P6mQnzf_r6L@PwH`icYT#>ji>CGmCphd1reXHL~x6e)S##;)!b^6z%IXO2MzvNXq zt*dTb(tJ0o_=iXPT(=_H*%#$Q)&Bs(xeYqz_U}=i5i&&#Wch8Lq;;sTd~2x1;mg|; zyf!heN6L9&>FvdNwwvO6i7w-WyCNczt?R{j^{QG?<-2URD7YoN>UtlJ;DYK1CNlVD zN&6`EAxTL%oMhfI7U?CtOi(`?OIGfKDlLLD;Ik3+rJulhWz=#@Z7RhiTrw{>BC{^; zTH5m3+CHU%XFV%}6Gp8XeD~Hvr*DQQXL)%Rou)nHl5(Jsdk>{~to}Yte(p%*c_Bsn zt&%r>KjU4e#x=Loz8naw|Anv&2at<&{pS2w>OZfxF`=w^_Cl!DN}t8$S-2~ z>KM1W70#_Um&-uS^1d>2TlX4#eiYImNa1FcMqH@m`&5^3-szXfma*aza9sZYPfF6d zGU|HmzL(`0O~hdEGlBK4D)OsA-Ws#D^48_PmpI#7=rswXlH>)?A$!-b{5A3Iv_2n` zO^(U8N58GjA_^-qhi=|}3AUP^KR=$;~MSEpM+)8Fc&Kv_?^x zm56MQT#EVI!}k|n8`A;1kIY+x$s+}M{#EpU?BSvPr%=1p(RRTcU<`Bw=DjQ|+_5yG zwZ8sGPq*ifEaR`-d!54qxCihR%j=PqC%HA$N}D&a#szZvP=C}GCOb&70E}e$rZ~&2vtLzp4``qDJ0KLBh<9} zzd8ucFfr1h)#ozCK*0mBtgR1G`%Ed9&f$^kiobBXl#n^DhVLB=Qna->$f#%aOugdUxF>r9vAizM@0@`-8L5le8W zdgRdtpsw2$zNWFYNz8HX2N~d2c9(Qk`9ro5xjFCYT19jsO|#J8fo6nA#|zrDY(m_x z$-)i*`c@n2B3P0buyesRt)wGHtL}NlbttWfa?sm`-)~XJV^4cnollq)kIJEEl?GRl z+*Mn#Gp0}t-%=}~D8!9fM_bcv9^LlDgV!DF!hAJ4LwZdCo$dN@rdt1?zpLns5~8T+T4)YjJMAu>n{j@dj` zJ*3}k%V2^@1y32{Kb>u9wo=|9jeb<=-n?|&mC@-*q;uM`$vn&zK*vliL2bGu{{U+o zfGe}U@ZnGxjP~nXwy7WXT#`uRrFL3wruuzUacwV9S?xaVELqqJ~!0cP}4=C z#-RTId;7liOF{5mxU&y&IGCPtTNyPbs#fA1%6^B^scFv0tnWE|bW(T~+ftNgFKG49 z`S?_8S*x^kaA>xZTFq|>O~a^B{c8(H(dMN{65VUph2BAv^JJmlvFy$`_N39{9!_gylGQ#t3B{{R79(S=HtFC8pQ+~m?q%%^r| z)Y!GMJpQEyK(8{_wJX2vr)4 z&2uZWlEsN!A4*7Ow9=!Apu@L9D&t@I)5R~Bl^wmPm6C4vHG^EvNaYeIgH>n(GJ@QK zE2q@et!(Z01JkWvzq!-pgv=F0;CgZ^GgG*d?@xrN-hBmJBNr>d-$NHJXS z({kZ^wQUZc!`2fnxbn&Vq*kwsZ!GnlIqjfjX&3JS$*woVdiC58{hre*?z!F56^)=@ z{hH~emO%2S-x=#&5yV236q4N5gK&?%@Uu?R=leb&k}krWY&=$OrF(hi{h{))4nW6R zpIS0&R}tUa48n{zfzXOyh?;~}T1}!xzcb)sj5Hm1N^^Ryfd2q@QZaiRPNA)N+Pp1n zGa>2+t!V1{VAn2T`%4upM&$;R!hRSsY9ecx#BCzwK<`@`pNDR3ZxZ5Th=Y8%^!KTq zHyNkVo$2$ZVx8B+@n{5*ONgTx;PIO0<<;P_x!S}44&3zs*LC7h+RPSOeUywIVgm&{ z9`*`EPO_ROAR#z($!*1U?> zP*5Hz5SI;-YVEW-j)fc+gvl%|fK&|QK9!rVS;=Rp84OIN#tLBZRbE$wV&kegWzEfg z$G_|E;h6j}aj2Bgi(^ zG0GQWjtcskwGF|uk^^rinHf{%NT;`@PBwnYbJC5{-7Wq^xz11brZ$Htu+$+IMn^_` zD3gXBqNJ94IX)(}hNeQnBnugF{{Sr5@(oQys*=JSsl;fA4)OgYhffOX3Tw z$rPKl2(OQB_2(+|t0+2m(T!@3yW(z%Y2o>!wzs(x0~|?L&l&m9grIld~>@6FDIvjtEc1B$3x7Yea$t7|6 z2amNo{{RlbZ7jRDM}S!5n%uYX-k}(V+_{a^Y!RsIT+V^usWh;*w;p7YTY^Ex20d%G z@D7D-scYYAxL_99OR6x==IkptdrI?AyVpZ2#y4r19w1AI=eM)GSB`t3xHEB)*0_HU z-pi@#?*uEokZnLZmOKjee~VU=KnO*3tX%BuJ$D*5yy*kio_ZFr>X_mO>cPF48y=$lVd2w~A+DQt-TPGmpYV-bm?J|pEeOO*{59vkCh?P4 z{hlap*5=`xYM}saKD<MgD5 z6G*bOYzYiHfDfm=MW^Zp?$p}C>|%_NL-juOrKh%&;k!R2uvGylamnMYauz-&hf(np z#}b(L^y3-#t{Rlnqt5$(Of|LcZrr=v+(wc~x(sl>gjH>OQyO-JzcRD#+N@4$mbhe& zA2v2S<0k{ws6MM{tG&IY*eK-utT`F?sl!TDsKrBmtcZKj(HkBS)nT^NZW`m38EwP8 zdMAVR_|nZKNl1o7A=Qsc^BbE5h9`T5_pnm?1@-e_qGqpPo-gamr;&wUKo=Rupn{#^H#h+G&aQ+>LN0^ zE_&9Mq5l8~{MOLl+D2PcM%aO~w4UVpfm|+L-khaX+RtOPHA~@okk7J3w?;o`a6r!N z;87m43Tv$zs8mD%sN$;Ww!hfUXPWoz6Odh(XbPGC04AyUQ$m}>mp3|tEQ=dRPV>U! zJu7@ANX{J8HhX-GF6-5dTRQ~VsxU~QXC(ClIIl?f9VVZqYnrFp;YnIDp;^G))1^_- zZEb8kJRT)OA>%!>^`?9v(QbAB02y7ADy6$kwUGzRPZ|0SmF&@e&a5QX{{ZA|QXH~T zi@QFQ(=WU`c`n(!j?#Q}pm=A-{v))% zksj94Fp;_b`06Xud>!K}JKH#}?dAd}$!nYb*#U0+QAln*|_dnXr zc=yBKhI;0$;CrtWhLSgM-)B?MsPFjT*UDy8YDW^59VuB|`FUtz8mrp!#azXc!k1e0 z^gjUTcFV)O3p21e+{`o2=UrJm4HKI$;YOLpeC9~Mg3Y;XW3 zyUkYRZQ?4Ub9$bYz-#WZ7I|E4^sl7FTGmHa8EbZNTBL$oHrz3hf-B9Pz?x<}wn)u( z{vf{z4CEc4azCYc^s|lbPw@hCUprHC*NQtI4C^Y_QN#-Tse|oZlTW!$IQFj-@ZPKC zYh^*nVT@O&X_}(k+s63o!5)XbbkmjNV<{^#?Zz|Hn$Nf#;~A^daCei(Y}Re`-dYk! z1J=1}M^e)(yo#XY=hmssaAhMLj@1>!g@*%!(xOQhJ9C^?4ke1PF78e%*fpIxZB-<vPqTjCMOH&NSlN+~KeO7bTUf6GA{L2Jw!juIbt`TA)>2 zWY?kUv5zCl{A-v137hN1YUn}@IHMI>a=qD=tk?l-m0ox?=KdqmB)znsW)Dz7>}%QX zVnb@Bxdh;v=C1TK^PW^AWSo)OqNX36opdYmBg6FH4BYS7U}ee1LE^mwK+$5;;zKFx zz#g@^bhf&G$N(&Q4_d^IS!cd|z~OoU&{reFMw7eMo!;J6RhEZRV^|#=9lbb2+ zZZ2&m!3)ukddkxDnP#|^V=P>fLf1j#2_}Z(G8+AFJfFwI<6`lTW*p* zTBIQv>9NVamg{`B*KuqooZ}#hp8LbnK?#x~St193D&D)UeX4evWb(-M>MO1Al-8Qo zkgmCIInTFR#bN2npRtbU-itCB)Q?KN;!RpxWuDhfpJlVI zAIm+D@PqWOTjHmN4uf>Jnsh7yIbegKtmt0i?@UMYjxoyDBr4UR4o0tUywa>)HOVfW zO&dKG<+Ncexm*%S@99aSMkBc!fM_BkKJ{t~g}1Y{Neeniz#eOR#uirgz8;QSW%7s_ zW&;`aHQ!A-iA}2{aLKv5TwQ9)-^6KeJVVP7cO7dtS-gUMDoEpt{a-BD_NpEc)Y?Q4 zv+%_dW zU%G3im*JhhiK$yj8xpI@2b>z^ej(agT-+3LBMZ}?Nsm~5-e9@NXcDb+b4KQpFy{nM$)~k1at53IcMp)$4-D>*Tv{2^* z1Lo(EQ20www2IP00*tZhYm!m@&A#JE$tYUKqxefzjXe7zpsRz7j{cQbTemm5t-Nxs z+m91?rryHdc0`M2@nqFg>f!wSNpolKJ*zAbv^qrsGD06gjMs#SBEDFM2v1 zEIR#+m+5edyVLkP5%jFT5qO1leM9$X303_o5?gn)vROyYH*IN`rhO|RQEoHU~fY>EhoL4oZX^Y_f5>01PcJrP_7P@Y7 z_YK(Nu&&$VABik%bxT<-r&*ej9v;hb(pI0ONbKT7XB zJ@HP@#__?YMP!gh7a(`s2Q{@fL1E=qQnxOv^Ev8XT{L7~JJw{jh7h=l-Ow{m2V>f_ zJ{e16G&9}7F_{A>!5s}vuXu?(IiW{(bg%7Y5(rFNHxZM^6|HZnNvP4lrsd{U zayZBbzu{ix;SEV3@g0zrGSia~K8I>Uwv?8!M&KZ2thVVOI-pnI=ZY(~g+$US*+pj&yl-M$=MwZPdc!a4(V6;<&3n z9(a<|#d2HS>P4)ZJ8g?_MOe~2QrB0LLkujRy=iVH=@=QTF$xo?s!7?|-o?dAm++4B z!uB2=*6gm0ybX7$%H`${$C3}eJ61o8B)qXpeLF>nn{<87S2PBULzTDX`-_Ra|e z^&r;SG+Yym`M`E)%W!@6+sM6Gh$ zZFI=g{_(~O;8!~|@LlT5YiEezd3?Cs_FVn%#+O*};@)_E%F5o~>}SjU{@yS<)((zu zHCg3-J})(*1LaaOqt?7EWa(6DiuiYv@8oIkNuBSCJRSXyq)QwZ0$J_=UqT1@%~_t} z>%$%lhfPL?2^|9OJ$e4MPr*JJ@jt|m8(8T!Hj@|w2E4mUf`_v5+ltuikM zrTAOK@8Mqt$l`z8DBl`4@Sjd=$hAKb>OLISwnJt1pDY!UHXSLl_?l~|gqnlzwnM$4 zP6yJs@Oba4QF7i)<&{ao3LOnCUss>U9xaAT*)Z2qsC;xp&p)MpfOrR4@cZ~?&q#@5 z7V*NdgdH)ASLY?A^^L~4XMd-;Hq#u1W6B>xUa#Q4i*xv6#HprgJED$GV3_ni)%4js zqbPGt>n&UJG@%Ye=Fh1-U+_ysmU#6&J{6Fxism0to@_?@qAYCt+)0YM#fV4 z^LLNCx3((={!s{#NygEDO?G}L#L)@B>}$(4tGQW>Ym(2$(!2>iW_otAHar<((!JgM zop*Ur{{T~6#*Hh%aV%}XEu40)GfAF3J4@5!LR_<*=l!hLX9KBZbv)yrt#`(iqKu|= zw-;(8Ro%x6k9y42+k2wtE8fir~CS8N6RFIb85PYo)%Q za}YZWRt57bGcz0k(9|_^NV^dHJAo~{Rcty3Y-6{jeK#EJB!(m$W1Lsao)D8hE3-bb zE(hW(>a8-+5*zr471f8Qb!2B%>vmZdhD63Ws#kXNMgiJzJBr`(jA_OPv8s0xWMHj# zdml3{V+ssL$8v5Y^#ZK4m|jLHE>j0|XCAdiX_-D;g6FTbXDGNZPUT%$g2e1w1PbKL z%ja>^IO4h;RyCRfnTFxW73cR_kJ)Z!QsaZh?kXzilVws@Z0gO%Apr0NVC!1McF{18 zZv!Nrg0q{&Pvz`b^4$T>YJ|%z`~LYFL_GO%jx+B|p6lJ*>5V%($d5{~ms`1Osj?#I zneSajm*Lo!W_bWC-Gxn}+epzWm`q-ou8u1biNV{R_^z5bbk*Lhnzfzmv_@U_$abQf zC>$uRYs415XYU-~4%q8mUCKuTmSSaZKsr_yxYFE0{#0Ob&hEmhtu*v!Gk1-TF1oRX zTa$G>ISZT?T-QA`kVAhbl7#^$DWB8Y;!@;*!+BEmKClWa-6GgV&SN&o8znx&J zLZsrJ_tfQ#ii`ns4^r@F zS_yB_iOGsAv8$dT^C?N_j9e7!D8*{Xm&2NFiRI3h&V^1)dEnGuF7YLa+8sq@mz+bs9rs#$Ju5)Z~*kGUJ`>|M{^IR+j zCEmd8VeL>yX%&>>;naDcecJO~MG8uzcT1<4GImxpbX#fk`{<^ESB~;Swlb##SF&na z+}7F@P+myVT$RJf+}D@*cTlj>HAwFDFO(r0S-JyVKB=#bavSNQP|)>^=e2qmJHnjm zRet=sb~~_Fg+}a!p>CCE@F8%L7zAVIc(DSIjuEqI!o39FwRhCz9}xG6I%Ip7_e0cd_WB;faRI*H_jqB%VC-?p6_LEnE!Sq`>=BOzy|}Mc@b`?R z@t&bPx2_WROR<*f2xJ4xm3Rkf^`pcZy!U!Ex~08^lH5vwtc3#-2TN4Y^T(2w5wIq))qk{v)id; zbr{2Bde!0Udnz%d6}F4Ljqr5i?S1v8{{V5*TkBuh`lg*amCTn(7zeO5;C~XlL96M$ zIn=+it>$o(5YHMC4{TSZ+damot=l3gZz6bQDtN{ZHP3jr#LHo<>UPmd7zS36oz!RV z9s&F-(!|NBDLd(VSi+C+uWRaguZOg`XTQ7EFH5Ed0GICqde>3m`&l)Yl`SA;)1t{c zN6kC?`&NDatEKC$5Svhu5HRx+B_BWIU3J#AZ*QfWUk^C3iIfj9z+yg?;@6sX=arNB zvCdA(8CSNy*sw-*Lkv*6h5I_U&Q~?)9ux5c#x`j1ptOX6IGM};0Hj};cHRrJi^h5=x73TQ7S0Pv^f?%<@R(;u_fMmGiTkUWoyNK0xh=0DytG%JOSwTb z6RPDx_fzOW;MOeZ7lzmSapzD1FeCfK^cCn{4cD#Zo=*)zOi?stpm|RlyMydSd1uF; z3u@X5M}2D#pQBrhJP~D#FWsO0pF>yuuSzvu3ZwnxcI__D{QUMajTd{ayZ->dIDJP| znmhHpl3l{<@5Xm7B*u5Ur%tB$Dq?p5^3`9#;&#OI>p`CxsUAEbNuno3FicHk>BvHE5tq$xw0!Pl1p?EvII&Ay>d7vM+(usrc-5_0#I_TWA_37`+P#+NMW4n#9~!LIF~;aOKv>}M--`0> zJHa=f@P}&}gpf18mk_qhWtn<&?OmUSwVhwYz8y^->I=(eRPuzV3M%o8=BAEe8dYjL z=$^e5^na<*3niA9E=ad1qAAU9)`Z$s-}Ml^E4r$-yevgH9*Ly$q_SI9pO zv}>zvLI~~$nwH3sO7`b*9B|e3rG!34fot<;5$AWUeI7}Vt%!uxS9G1z@_*Kb)Zxt= zqng*HG3}484oz_yn1x$*4xGOu! zyJ#bkT=gMhqFa`I(PZ^w(y(>QuPP#gl0{#R^(2HiZ;&^0Rka&bo=7AMyK@X<>sWG^ z9Ze4f*pcHK2X)HE$@-pa>Rmjr01=Vc*Uf$i4XJqE%GLpr(gx$Qj-=Pnx)+J9wJUu< zM1@+)>Ptq+&f>kt*EQEc#yU5-jYoMTZ$0i8Yjx?9S+@`KpOYU>^#s#2i<}U1ky9ng z7zraJbsfcasX1uHYivT!o>?RxpsZWl={j>YrOG}l<6`_1hOJA+R6{> zSk@jI4>6Rc=?EL29e%aTS~6>v+MSenWA7C_J)y~YwnE%{^K?XOMf=_-Z!Qky@qpF4*ajFaW3@g<81MQ4sCp`qc0h*~S4B=DS=J zZfo9*B9^Cg@OIamp{6==Ljr5BO6^ z(k^GcxsF@NVBA|h@x^kwb<{d{fy|y)`d!?FpwA;GkLg@MzK-%AE_Xgv#|m@aq0}{7 zn6)iF(Pw68NZP0FjB*Wn>lN*qsRd+{``M%AslJDx_}cnU40uY?$>L~abO$GZax0x* zwdr~#zmqVU-Ph($-1RkcTh}kVb9EegpO(>Sv4>!t3mye#KC=G+V~#OhWT`rXQy!pF5i7~FB`S0CQq3v+l>r3#X(7t=!T}v;X{KE>+%wF8WUpGD*T%zJxi+?ti0ti; zI2~(#$?UA;mD&$6r5vg4TzAqwj3>D>KbmF@an=u zM;eX9>%$Y3RzB5%;SU>I_@l&+ZZf4;sq8DF@h^m|-^4c28~2+}7;VThOL}Irr-pPP zDx0>;rj&WPm%R2l?Jrciu~3pIK4JbIm^CkmuFcKbpw1OVUDvHsLxv|!jcqL0Mh7Pq zq}nVx#hgx=_lR$<%qXDUG#dcjCI{~E=zXe-{2kUlAEY;9RgU%NlMfVevd0-CAsLTc@-vPq z+3`idv7S3fBN~iE0TjSwI`mWRn(xHbs-WntW~|qzai_}kcSjTP55zXsS8(atOm{kJ zPdb^SnaNnx;D9sF8PDTeQFyCU@T8Hz`s3YcQXy8h1A9r^0D2mtb*~Fp$8jHvba?em zR!K~f-A6k|Ixzrk10_fuffZ|3@a5&~7V~J5103;rklEa*hT~^JfziJMpK2T=sapEI z+7zOlmqOQr{6uwM65PUywz`D2Z5@@Nz~7IX%jdZRk6JY?U&b?PGuvqTBk7xv$8gt@ z1^fz=#bIe53MIVOV7u4vq|&WkHX=@vt1lSCa&UUq)|;m26Zn=dw%uR&h=~4UZIuO3 zV32y45>A3*}fJLs;A7fvFUJHL#k?;W~roKs@pxn zJfYtkkRC_Z+PKe&zYKLvW=s3~CXCGqk;<%t42;Er$F3@9bV&RzZBU0yI}rcSpN9BR_jTm36)z@lLnne+}Emdux55y2#CLtmny+Krl*!jN?5I zdQ~dJ4{DVo?!UwO@*Lcrs^8aUuw_3%flm7q-a^GLc8EGK5fUCuU)RWq}`#*)+ ze}?1>cdSk3IuLS$bK~6f6{-6)>DM}4{;vkNVhbFnoR(JOYai~Op!PqFS@`=k z)|-2z#SM!*jV!3-akOK1<$!BhRFa|XsYjXi*v7v~ar?|m8~AKA-A(MTrMQVfw?JPi zr@!M~Wu#eYUM<#cqttGe!pgn$r9i zV%p%hwg*(Rxf^63?C1Eu#=JUsh|0IOYSPnA%eBZ&C3ayhqjmA0Rd{rWRxLWkgm%`T z0k$#G&!%x+fndv|SjS}?K3IpWYt`TS*6x|D=sp6!x6`z13rHlmA}N*?0LTf!`ho9S zy8i%#q0#lLiJn%yf(b#mw1DNJ1ZSLcwzigG7gBBALG3P$u9y6~l_{;_k2cnId;b6n z=Tx`AzeW=vC#eUg(Dbitfuiw_k7eQ;E^Th$R+bj$W_sj$smhIQM%ye#Vg@iBgf_r4+>Nf!yF9z*4~S(X`U;ykXbmpvy}{O z6oU*Dj!}oUYHtkL+4u{?E2JZfw79vIqDhET8U@_jkGBIp_2%W-)l5zjt{W1sRtoF8 zUxuje!eTw9ir4rx@fPFZ?}_gGE2&?TV6yFo?T-tey7=S( zz>NB5*w^S4aWrF6l}W{WIqA)&kEQ&X>M22T%Gy7{o~h#B*w5ith@wOq0n*AH(VaeL zgdz3{c=}hDd`ta|v|kH&TT<5Sd_!?-ZD9*e>ntKuwR?3L{OjnO-x%9lMuO*6WexmD zoEo9yTbpke{4w!XmuAx3PjjYR!x(?!a~o~_abBft9VyyQH)~^}RADH;boqW?f!-$Y zr^DEFyD4pTeHpFI+?Q6*_pQqA1S#jHGAqP9S$TV)l(>#x^fyqCu#vNp1AFtq{44E` z*_Xu@emwo2#)UhFxV4JoOr0~56@wHw^w{b@1L<5x!T$gdcy90DKZ>;97U?i+7P=BO z#J2J%-e7(kY3c}NbCK#RA#5@^qvRsE%vD$sl7HZr|x%c+&3zdpI}M+E5dXukN9N&0E+(r z*xJ&`s(2e$Q9hrjV{s82ZIF7hWCsJN!w%K-FN&wI*7P}|FCEo|^aXcDLoAWH^#pYU z9<}zFWf;PZMHgvy?8gZECCHB`n*PtkT7ABuX#Qoj$_WvVTpq{Vau2w!GhDGVOagt< z-80vU$@oWoHH3QahA$fzI$hJ4!R~~R*!B!F{{RCO(CU%3y0r2z3KyrnbH<~sO-|mv zy-#9R8@~?+x&Cd&Xpx$0PO50hF3)>aw&|fTu7Z3MFIPNRJt=fA#hsZI!XTJmy)Ee(} z{{R+5ws6Wu6rGH7-`>5dPBfyS9TDlLO(@5wL%O`y(o16$hissb2dJzqdsU7bR)ri1 zpMhDDYxdV*DPY06VzK3c66RTwa`F>}40=^gr!(a%*u^^|Zb?=@4NI5u-*y8Iynp)X z{v7c%X#j#lye@KE`PN0Xq{m0Nw|Da%P1s_6MPfsv-$Hj7*<5uf*#@wL-Pz4X4s)p< z%=#zbE{?4}udHNAD=@c+_f_hDK0hk!ehSKNr13lx9J1SSL zqnTSPFm>JZ>H1dR!i(c&;$IWSc@z0laoYvI_^P>1Fs(xUT)!iVwc}Tm_G#gYFra~q zV>OR_M0r*G52a(;c*@&Xv5Mx}Z$8>mLcdNyAcOdWPa1hJAUcuA^{!u7zK+f@I1L(y10I#ry~@S%ImkT#t{>uUz*}o;Yj9R+&IU(0Bq;u7 zqG=@BXFei)l;3xuJ+1rd_R+knlPW8z`tmDlOt3}yfCJQe8fCtemW>U#+6n-QGuHx{ z;tgdyGTMBO%8<(`NpsXLGDsD?r5Y67TDvXFQc5Z*oh_Y|WQ?c`KU%%ytZH$BKb3d~ zjs7*5F z!5r@mI%4AF8;rn%IfRf`aW%T7%x)Y7`%r&whV;-0C@JQclzi_fZbSD_Evrt z&|22eyu~+2@=DoxU%4i{u5XIA`o@B~oGc>ADAALWjt&Sl>B_XBq>W!L$EEAK`Pe{w z1MAwnGva5C-%e>RAoEH`$I3V~-CN>FxwKOmEbed!=QY6iqr{RWgv)UlDi?9jLQg^c zYljnyQdYi(kxG<#%nuO!XuPr0V~|^;T}|^SVaft{{W<(aQt*Go+nr|e_8Y5+^y76H zM!0Tv``0z%O$Sl8vw~Ue&zo{Q((TwDf}Ny%KGaMR-Oj58tbmQ|aB=jnE}N>>yY6P( zR5{+~x@qm0VT+N)c4=Ynkg~Q^Xci>bF)lwipW=g;Y+Y*8ZvC z+r2987ru7E_2LFZgY(T@ec6I{>55pChv;fuohazhfaSGa$^!@C;xDbSRl`Cgxy zwOCV+Dvwg`o2XlOdr!8qmRROzEFLk(82xcfw~8Yc*jME|@HqVI&*apVwH+>df@Y0S zmI>asBKV^_={J$tX%NdQg1omNeLZW=oeym{?PX}LhI6Ow9j5G&2a7ee)1r|#gnZy^ z<(T>k^DFIRQn|PhPLe{>I8}|o1P@bMBV2=0*R3w@8LjWFj^>6@#z(bu;{O0t)I>U# zuWbuixH3m@xX$0giWC(XdqlLklwzW;)}~gOt+s|H({?1t#5c@38qx6|#QC)uwEJtQ zg2xULC}Q9bdd9xCv(yr4?Jkg9G2gJ)Jsz2Lsa-AZ=+ZbF8>BoF?On00?zrFGJ1NGL zV|gu4O1ZkS)UBd*X=4lXG;P!MtcEankg`o|*ER}yiF)8xEVq};<-r*1n&K~` zx$#UozL|3ukt)biGmHW=!4*G-ynkV)Xm_b}?G^3dD(wi`$~}!<(Jr)=5nYW=Lae(- zoVh0*^It%!Q<92JT=V4z1ns$F!T$gV^lyfmCElZdAbmzyP8t6IkgB>oma_O{TFUAg z=5~x+smVs{wX5Ts+n*I_u|sa~-b9JF<$s&PB^*B$09=VceSG z##HulN-IV9_cw$09Z5>E67j;$Opyj|*NVv!&e2b8RAoQs;UwAHrMw zd8qtf@gGX@y`(yYhld|P)!OA&NfzLXSqM09m-t`Mj2~RrXW*GN>%Bivxr%#ftu4?! z&7J4YlOZ@K03Lr1^}n;`PEB3Jmv}pOJbaBR?)oduTHoxK>VwL;l`;VxHxA?8uWG(G zS>cP!j$8Ql?2WP%=blHlYu0r?3;aIuC6=Y7n~3!NT4?;+X=ato2;^}d2pIbIuM*Qd z7p-`E#M)+)b!&vTxOrzhNS;VJR}4E7fDgTSc!)xAytZC?7kIs1=;{1_rD{6Ho26;- zUx?n;)FVdZvrj8yVtI%cYMg%W9IJ&H$3xqmDlZB6XGHj&;#u_jyZhY_UxRZ&4b-ZTl>lW6 zF#FjD(>~SFg`%laii-EQXH_WEoOxd7)gA}eZ!}*8&92>C$9<_AUo^oG#=weppdaUx zUnbpMTYN~BwTUk$u+|#lMtGuxvMFaIo;r1}YSCZDUIo+I$5hi|@fM9Tjj~7?B@6z{ zshp3gqr-m=^fkTIFElMu{{TU`y4@wkh03g`ImscqjIGEz-+L#9do^ z`rXEvd~WP#TUjAQn&H#sBdV_M{ZC5td0Iao>3`WTGM!46%Z4ZLGB0JsH5c(qxwx5}@IkPB7lRb6EG9ezD>E886n-cy^R{rN<)- z4n`~Ar&^6DH3?lW>r0#0!BwYKMMqBkKK0P*JQ?u6!`?AVskJ0qlt&OHoc{nRO{3-n zao)Lq8b{%;3F~)I=yu=P){GJv{F9I|!PqnYaZ^R(d!2hzyVNc0`PT63^g^Q)yvEN z4^xRn%iKqQec>Mq+seiX<(gS7mQZ)yd<^jgww11Us?S%2 zto1gLVn`SasAO3-`fy8dKU`OU{?oq)?sT6MNw3;pPiKBzUOOXf$RypCLKycT9)R|* zTh{eRylvyJi3#DicKdFh2BBuim}i#Rl^CaRNZvu~$gIn+6nKNhI&80C=NLisj1b z+Rdz27S|yBqj33i?yZz>#=TbDCb6w}j%lsKMPXnqRU7zbVZHwVEvP<*y!*yl8(4T# z!}>{%Smo1aTW!-Ez8yhk&!+H59)`NFgPtU@)R$ATl2i8Un}?cZllZrAq<$C%z9&7y zO9NBu*mbR!$u_U}XiX;LB_*Rdj~HKHY5ooH#njS=FbQn~qwWrY!$e8-*c6|9(WvZMk(1~u zx5E#HcW_N}bFH#l=t=~$Ta3Jow1N%?q3i9%b}SYaoH?}W((a|JR_nMZHzagczZ!Iw zy|=Q~-E`}zfeAW{#UTTs$0~nMr51k?Mx)~UJ2~c%$10euB$;r>86Xqf9zgonkp9zt z9D~CCE78ri+4P&Jgx0sPAxP!M20=XzHj+L4>&UgOf5d(T@gjN7K)2a6L5_B*}%Jhl0U1;jTyM9FNM3L@4D=FLX?~ui@EwcVev~*j=Pzv9(Q#@noJ4K;V0Ga6D1-wGv3;s0gz8l@0J5iQMVn%E( zN@{kORAu~4xjqZ{jc4MBQKPrI{ngrm!Q&180FL1OR-LMAHy$GK1@(kvt*y1J6PE0E zm%;p(`Bx2X@f>)=;YPFJ?Qk-9n@2ISG~*8{EQ*bs^Swh5eMdgEZ{RzfRCvDL;&S%` z$bnWy%PNF%j(Y=K&}r9jgZF3edOoZ0Iw@8C=ANd9iL}#WZe&r6FCw@ddsdgqOb_-;ER89ItPMDE|hpz-+s0EKxy=Av&t-Lovi2)O5S9P#<$yu5xQ zbor>!^1JouXVT@f?=-g<^6fM&JXIPp9Ku z9--kKKgD`1?WUKt@n(|lw>DD}KG7cUm4H1sz{ukxo}ky5ct2Kx+r}2&VQ6MI8(KLR z7;Z;Cgc|fcgd>Mk?R8?}>CT&`x^8=2!^h&CG48Cu`*dsb#y^O9e~0B&t+X9V(?ym& zLfN28l@nXJAch2X$KhP%{ruh=@f7z^N#w+4U{^mV=fAag*8U=kSJIw1er&ihKpQ3( zKg0+3Tfg9aYm$R>VeI7Z9*?(^{SH-Dbl{WNZ9`d(9cAzQI0;fwMxnrRMyRF{{R5$c&=qt;7f8*xRv~6GeG386W zRio2mVU?01&I4q2I1E1;^{;?vEPfo@D>h^@J0DU>uP5r2hUX_Z{8K2V*Q)LzPEP!j$BLTsde1wz8iRg=SKK?sNXrX)LPm`ai5ixK7Tg- zYuoga(QwE}uM5%Pjl40aMQ*cZ^8WzsK1_-?L{&mEpYIHgqPmY0{8OIFD|>A_%zKA~ zN98~U{{S?QHiCL%rh9UGR+MpcXw!0cZE0n_`y(h;<-ejj4~jl5{>8u2lTM7^>To;D z_9*hq#HJBX*C&eQv{SF@{wmUKd_{R3&FfCLFAOLIcFK#t%LEQ_j1$v6Jx+brfj5Pp zOx3UN8uMJ9EQbs=Ec{RWzU9_v$AO}x?f-Gp%RnQquct_r>$wD zOPXtU{oE<@i3cTt=lNHu_$I>B!PW}Os@Hc243dtx>K44S$MzS}Xj*tM=4*SUg62Wg zCU6P%=O2}8=-xbwS@F$=g9{drlF!FQKv@2O*Dgs(#`Bu@qo>dO&dOY>E^nde8qT)U zU%Ux31Xau9s+@h_ulU!F>8_9MX7j`Q|8vzcEUjxf7U4Dz3eJ;}PQ`caVUcFnZAs`O~3{iIX106A5p|9JuiJET^>o&Ld2@9iG z#U}{l{G<$Vit}9~O1RXoV~QBsStL7A2UGR-t#P$yNv(A3V=hiq;rE;E@aZ%k6dPz< zNogzj&`&c0#v{m89;1<;YFn$@pB8J^dM%(ZSVH^n?vw9q0AUyF&*xp0wZ*~k_)FVX zT~^O^y6#LC8NkMS6VvNl&w#Z(b`K3*&1`O7cttiA&XAZdifFdk-+FrTB~*N!fWfxcd*o~F0JJ_W{vPwbJuC+ zuHN`8=-S}cbsbL9dy+iKr<8{B`0fpAco$B$llx0wlsm_35g_e2V~(JIhplPsqgrY_ zr&V5PQgf2j;C>4P|H=ESgoESFxWqd0P(<;PP=^ zi?2niT3ZjXyRGx>nM8vHl$_%S^cA7 z;(Kon_&#lJA2qLUW7`5ckCl61dz$dsHHmb~T~u1#Ji4Bs-|4ppk{AAYsr08^)26Kz zx!p=Lr9=0RSNM4yZjbSd+C!pvaw#LUw%E;bQvkbXkF9c^DENsLt*zCy&7pllMr2!;~a1b1WUR;1Ar!qFcXsq=M zas-nH7;)TJSEbKqW-aY)WAm)7;|+)9L!O);#MeE2qRFdUL#=K%EedAp)={~Y!0My8 zt8Mn{e+@uwV+*KU5aCMnVtB~*6%vf1)FS=e(T_5cwe%?Jeh9vp-rcR7HcKm(^GV!D zC#SV*>Hh!+wH=0lkfLUM%n&OFWk1VV*c&@)d z@a*@R&i1wtLjWdc$qGLnm9VyYb=B0LV6%?yEL*^{W!pP^=xpqkOjeh>-`aF4L1MJu2du#R!HTyedpbeic9QuV|*!6MgUVq?4 z(WmkBb~c)I=AU#fj7&w#s7`R*4@?93SE^|q9k%hdp(xd@A-U5oZcXj%bIel+&6x3m z3t$D~(xSaSWT2L^X@8jUPMhUXRy?0avwsiCZ-1g{5B7^&gFJE>HslV9eQTughsQZ? zC)4h~;U3YazA$-rm$K>L&eDU!mON(|=i0VBPvZ{@d=|c%!f%H<4aLEf`BB`o^AfrF zNi5|?Mh+{p_;K-lej#|y-@_1mHt@ZLpV>{#-O}2teUczLkXHocr~^Gsddewja${0a zyzQaYcq8JJeiQgfCyIPWr>J@4NHp6x7s>P8xeMj5T#T{wQR;Z12gDx=Yf-G8A+xyB zWt?sk+8|k!da@i6K9#BC4Sv>H1(L3rrkyoQX`g7gFB?L+r%89TF*F?F5wKC{>V0A}3}#A~N(-X8H4k&fXK zSYx}zvrN9KU*#>wt$62+KWB+9b)8NYxshhm+(~+>Lo!A|VlumzBy=R#p0~uG6}N-* zD?Jy)7e>$-VtWjN5UU2-bDhKH&o$|u53|-hH)W`5Kid}%WqAyO^3{ky(h;>lLD(}K zdH^dLj+|U?qdhsQu&2!#?0O%<-3EV!Q`)|eCb-d{w_Q`qyBTPSl%WKSXQpy7#(gWB z@Q$ykXxioFo~dx}cJnl*c4BEjz)aZ#c&a#YfAcbk_a^zTNZaL2n!;dgl}yB05j`eMk=j1Pmw+Kw`-m(r7ON| z-{w;B5^25*)}K*$@2C4>f5f^=LKR?`9*lF7*QdRE&%w0u&EXdpxAyC$x)v4}2(2sR zbqlxUACKTGknj$pb>Yo&#(S%1%xu9~o+X$-=Z(vrhtr|2qBM^NS;$)Y3+ZDmxXXDq z1cVN|bHb?g>GiE}w4peA-u1mTT^l_NJ}Rsg ztJi>dAe`3y$BeZI{3U57jH)eCE%MsJL%qisZRDNA^{zYOFU3C$_~XPkejm`y>p`JD zC%n9ua@@0Ub}wZFb;mibYr(qOTfN1M=jBnkGt{ns&+A`5nYzlINKtJl+3D3as=wo* zl&9}Gb$`{)y7uxf5owll=_IVS?$ga`{4u~Dy)l9@?~&*%c<)=gxA2s4X^Z}idcob) z;DsymDfT!Yl}X`y4-V>=ekJoYB+~V0AddRZCSq3L6=NiM{`U)yujFV6_WuA4eVS;) z%WB3wg!C$MztX%(!lqd&Z)oi7*0FotcU}72#?Mq(@Ya%^0{E}+uI4y`Ca0j!IM zXL8MsIXt)@g>`=xd;@bnovT~lYJzF(q=xF+`f;(Xo0Ug4MO(f?!_UFOPGKyE4X5y zE>(W5z=d?{5j@cA=h74@NLuHEKut@dnmeGoWIId z9S=gSxE%AxO60sCW3g}TM~X{RZ6e~~c=JK|Ohoh~@JDga=jl`GwvDK*uBRjoX%x;r&uJrNe9UsdU||=xxi#@Md9YBagv7@8 z(mJPQ+x)u|H_X>A$m{+Z&nJyEZxz~H>{)JNO-99-kQJ23q!Z{sT>CXHm#=tR!j`u; zzB2yNZ35|zoeY+7XUm%#w=o#U1mmwm=~>rWl=fEW*71?PFS4}2{n)?+jDNILbM>qZ zS6eTl8!KzwKFak;F$&cNPa*@Y`H{qfB{UGC%6H^+m_G5UzLW%y+ zx{lT{$q8-5OSI)!atQo-5nivYYZiCMM4^CevYi39cq|IaFs+ zxmP>`?lX_Vx_b`hg5JIBLuf!Ls(R#=~hvie6Bpced7L3!i?Ko&sDMHms8E)eQIS% zSzZ=bSrv%ne9Cj{{{YvoMDbU}+pitzdOhvPXS=uk#Ep(h?`7qoOe@8geyto5y4!^S!^sTHl(&$pP*-UM+V9myOK>PNkEeigkw6Ia)C z%k-K{$tOvoZ@jKCfJde_u>7gEo-=J%!q%E+*-f>-p8o(Ws6hY$&JSU@*Ey=+T6lZQ zisin{l1Z^i8Tp%?$J@E~{3~31)jGb?iua}XKIFQb()U)ssj1=@y}0n@{m|X^r-`Ok z{wL~5A5og}O*6&f9}6wiNPg2L*;rN2FDC?kqP>Sr)t=(Q-6t7!+=D!wqu24suP@cU z72J5ERc$KT3z+QSnXW_H#x{(flolif;P*YNMHN~QgVpJOk+dYDoi_Bn&0ic%c$#L9 z<6RaX_?TNVtnfF=%@YhVKd?Xg5vJVyKhk_*t?9lj)8t!pm6h(I+8AVg;&IW3L+jSM zO-j^g7INvg^I2WpGJmAnLnwAw@$#1gp>BG92BYxaouyd8buNK#JX&<@3Z$0>!|fk4 z07=I@b?sf$<7?5ixqnlc&Bi>@dwwYAylLS1CA$}TOl;{e2=GWtH11DPfIvO4aniZ} z02Liy!X7DyPSU21@^}PkcN{T{tk?zOC%7F6_BHNa8`0Oo{vFpmZt|gy1oCWc%Y{d{ zVSsRY5KrcPtIIV#LMy_TmsT<9`i#JfslxpGN#q~%oblSQl2qH}Un8nA_H@&hw>2f6 z)9q13b+!X34{vvX!YAPK*r_0XmCyLY!*g89E$(JpYx`z0E3eF%9OMz$jzJme(zY(r z-d#4{NL%|t1j;g%>Zh>-(~8SkqOh38_h#-W2y0}A_p+k(+E3lv$rvXi(-p|$?kD!5 z{{RDay%nND%#woU-)(*vG@v7F^fEn(w(>utSv>Gy)Y?!U0F<~9dZ0wtJ>=~kxXW`l2hctZJ#YjWmaN41!I%eyx&pQwRW}| zRL~}xCAA_bl^J$P!yUln=Nuoxx*rbfQt5L`Wp5;=8At}+H^%S}Q|vmQ{{U5CR~uTa zp*b#R7kH$+wtDyL(AHHR?cDTRC{o&6h1=&`!6|4MF#vBQZXEO2dJLbfaQ-0i#n;21 z5H6u=kj$}3J{%@${@oE$lU@HOpK4 zf22U_g}XANI3xZ97xk<%^*Z&ICVEcWbXL9o^9!WCY|^01o9 zLG!~XRp@#Z0RI5v!>w|+@mksVKU27}mTB&^^X4u1Y~S zz_Gq1mNO-+Wc}C`vF10swX)SkMO`GYS8|h%3h=N!E-hbK78}M8Y!m>O!;j29ZQr7%NCzJb58wZ80WM7m> z#1)Ykc3?4&e!iHN9wNE%RkpEnbNi?w{{TRS9FXw8M&s$l0q(@(0CUHn&0+jb)1!}7(=BXaw~A?lO(}>1&pQ#haysJ!A5&hP;j07}cJXQf zA`mfYk|rP~dY~tts~%6}2VegH3r9hUSmF3@;7A4JA)U9S-d3u) zoUE7iw^OPUZK`osXCt6|BYzI~S{sde=y-obOu3fp3voF-ossp=Z|G}6+r|>>xUZ){tC*qUJz&dS96r0`BV5;NMaojFx@ zgLg#TMpo`q(mXL~dp3~`+*941L-&^mKJwtK_zo zWQ{ECGJftgA9$jGOA+Wl8usYoRRpP1Y0uMdQ#q;CvV*!qbq!9_;!niuN7E-?u*DIU zS(sod#-wMTb`{M0N!6|9)shPd1L^P`tWmg-oMeAL&beV^h?029m=>D;1n2 z6K|TN1~~)Nscd?EE2Po18#|p9uQhUE((KSnBCJZo!=4k;^oyJMV2VvkNwOQlh4P)9 zPs~^Nh{k^!SZhX|ci5vsw zASFtED$IWYS2~ob`?2?={{TgEwzYRC-R}PY5BMhqqS;Sf1}#uP*czMY1uX!EDSPNZ9m!v}$s<2r&}P(igJ9Ov zEb|3Kjav+Uhx4kwEtGf%Ru)#*aNXTZptYo9Eg9$mtt}4j`ukow-PAX#wlaYtb!3p| z2*SkXHC%91o_O@dcG`}-EpI%=*wBHxRn89IY<^Y8>7EZwI&rDNc`TB| z4)`Ahx#K@U%{yPxE@zeQkg@X-V+zfM$6g24xav1aKYHDaC0eL~LXMoC`M zdHk?X-i}lM04&?z(DWnm$FXUCAD_b>9*=Th3TG(~gfFbDB9~4gT}srw?9#JftYCqUbDW%gYq0R

yKh z6IQ#`^{52%$OXEf+VEgt{&mCaSLQdln&`@sD+oN4ZY3b_J;AK`b%^x&{I!xOuVo*- zxnZ_AN1y<9t)YsHsx?h%>$ws3kc&;~cAB?|;?b?v&g$ijxZy0JKfIXwxvZ@t#hQiQ zryYg0yn1YRPO(QEc-mRG17J9s9XR?O+=HQ~+5)Sp4 z5CH?%7#Qeyt<7iP=AfP&(JeH4%PSpK5v0FssXVzPX2Css1IOiDTk3FYn%(}ba^+#N z1MJa8y-r8WK4^+I861p7Xu0 z_!qCdJw1k{Ese~tAc^-XPZ;xU`u^;=CwFg~kEKE5{{R=-_;EBjR^6u62BQtrEYNwe zAc?{G*SI8f?hS1CQuj&JHT^tmdb;U)b=#en0YrdFw6IXU2qbhoagI8PJY{=rqj;yp zXl`sZ?K0k9Hfx-iQKM`t7~^leLE`|PhXjF>XwOmZ~jLWI7geA>|FSZ z@b5!|&G?hyF%^}btdd&aX?wEPSI)mLm*2{5%**N1wR0LDi!ZcutXM!Jk4$@Xl24Ft zl0ZmB4cGt)!1k>#7F^lO;oC{{_gA%{ z;?%S|%N3sM$s3;5w(mJ*OYj@tUN>F`-_&=>}9)1w#{#M z9Exp^A%JbAc8=#gjaCN`@i9IT_s?V*qElz^?TCFZem7 z!5p_5&DOE0+(yqW#mAD6MQ_By)S9ha)J@9UU;LRUbn%?6~T~%CZ_OQ(H zU0q3?fTSOEMp$II0ATTxiq^9BaBfMz$n|Fmbk{V`GVoj)joycUs9tI5WpiyINUcVl z8_P2|`L;$BF#&Uy^cCqkm6gVy;I)=WV};8s(V@U%+r0-MoM&!(iq-gU;*CGT{xgc( z#3=GO)`YxZ$#cDjLBZQWiQdpm@9W?*sw``mHP z2(Ky*raqi!hq7IDf05-%q-`w^E4I~6ljE-kqBM(lphhnRxlfUu!ve(h1U5+ib$49& zbso8Gs%jBE%sP$Ct#&-;+^r*Wz#gL{erCB(A49EJ-Ct{Q=`U?@sN?K9sdmUkAyg{o zXvpW0{8hiB{?f<5eizd1^=l>6?l~9nhf)h;f>qJ6kQeUp`BznHYEP0+mOCOS!Bk2z zb~)V_;#Q;KyZGjk++AJSe7DdN2$J6ibbyY$aDM~GHR~QH@fM}6=qq8N>WlrFTkRz= zkt}WlZ7ey!B;&WtGAqM04-t4P#hy3RukR$f&@JVV-0Ab!zn}J3C+3CMXx$mf<7glN zJJ-0w;K-rS+8r-ahwKcWXOC=(l51Ji&d8MP!g+NyaY)a1_hUP2M4cy$I_BWF0FMKF0G)Fe=7yTDI;%i ze_FBd=U2M$55%o+OZ#ljE^U`<&nEKhpcM{4=YqpMtI?(LoVrextGqg8y_DLI?j|zZ zr<=WhiClFUC!cZLajh(DF*c!BNm~19{dCmgdP?#!^%(Sz3QaDtGc1yMD-uH)B$mlI z=c&oZ9C6;S{5aJvu5PYH^lNV=;{B#Ll;>#9Fn#fZ_}8BJdd7bd_<*~Ubs~}>g``v& z{uENg4a|7SJxzA8&n1=ao2eMYP8%+&xcN71IUbJ6ZHQ&q8Kw7X3@_?c2qnQ{my6yrO9 zaya1N^%ynkc`QH&2l+Bb{w6l6Ja;5YkRsKt@R? zXasg0ed`Zg)Yo0{WHB_U3=y)y8FmVwlk3})S~~v#i09O2yVP!28;Qws0Ki{B2c=|q zGr+gLFxM|^buSES@I!BIw^q935V5y*l}N_IxRe0Fi3|wjabA5oYd6d%ieC!Re_aid za$J!s&7x|$eb%iAwU%qoF^WM3=kquiAalk5?b@?6PZnr5UL?`v@sEesP_@%zy^T}<3%3(7hhzB*41d_wVdrG2N* zX=|lLaRJk8F0DNH(O5Bm-QbWEj(v9?I#nMNBo=nl>Kdl0rO%>H6#_vVmhtb$1y^D6 z+ZgofE6m zX4UYZ~rmm7-zuWDs_Pk+iQ|oOxG+<dXhZ8!t_r_+k*rR=Cfm%gvar6&FzY*_Hh zY5xEhz6#u21XNc^A(MIB=_((Tk~)*}pM2K?;ypqwD?`#C)U^hCojMP&yzmS;gdD76 zISk3iBzHOOU6s6&Ufk-x$j}Cs%JZ!P~ihgAz9m626IL{dr@hhK|+fJ{im(R@Fn@aK7KIUGZ;`n@PX?3RE z*kezIMDnA!VrG5YhDiVb4glbFHH)G#wYi&52^m=?^KGX4thgB1bJL&)^RC=PRZ+<&j(TIQWZCL7CGm#gw!a%>9CA7zt#=j|aO=8zMRyJ-wwYYIbo{7_V|(M=3V8npAcccCwjc)6pWDLNVq@*(dAJ*5n4k3^=?u2lnm$uWfm9Z|_I%lIBg>IUM@->0O4asx7Ux zzL}*a_#=lVLUcOh-5*FzR#7RXEp-+mpKL`u_mKktW+udmV&6G?Ba&rs&v5fJH2fvX8wn zZdtR(3Ul~ZpXwUUlQgYt1M3qx!HN)e69KmqNbK7!k)CU;jn${4hTd#k_?Zv_naFTry z4APsFj>;yE7}OVQXxl0exg0U|>-DVtTUDP)vv@qje>qnkTH_q|tw|!&^xZ;4wYQKN zn?7XEmW^@3jB)8)hl^!Vd?PXlqsI1?v$b=^c;>nNpR=j#)47iOv= zX}4{$$v<_0;k}5hi!X;-jmE5Jw!eoTFBlp)dZ0$n>k9uaoJ-z+`nD`{3~O^+O%(|qZU_}8&*-# zl#$J6-|2ehnl4r}NThqCFWRS&Bt>^_Cq4Tv2W*P@_|vGAlK7sV=3#WznbW1+wvRpQ zOxyKav{M{;`;enx1MTxM?ULTrdObO0juLd|yR_6#*<_mBmXaKfSgtr6XLr;bck|65 zZKJ=tc{M|;YI9+wkyM05&p=NnpP=AZap8X&Nv6lB>F*gEiF%BaW4{D0>*V zC#z58{{RdNV#cB2>n{nUnzp5?t6kkP+r@5(L~uf&?Ew3YgB?cG>sCBJcWa?p+*xSy z8>}L-ypk-eWCVagz+CbG^cbod#+wJk3yWCfl_HV^EWz?uBhY*1uwjkz zR(~=pYl#9QoyRS_;ACzbU}v6~!Rov_;kz9(!{+)_^EJh31oK>>4-+bq%g@{jGsZas z>0V)@+RvnTiY;%>lgoqUgfRu@lqbwR$6wG_s{BRqzMW&>$r8>Nxzn|~NaeA=7%r~3 z-wflliWuRC-cySDJU$+j;XZw3ZC}gvI-uhVrnT6?)g6;rNg=sG9m!P?qI8kJk04>X zcju*b-YM1Xt^7-+Sl((;Y1aB}v{6fUBY+}iJBj4*PSwHb&2nB0(Gynqk)&EjD?P28 zOMNgcouyZH)9t?)2h>(ah;?}UNvZ1>jcE+5moBX#87~U~h}?HV0QwP-L8-#1(8F`h z-)Q=z{{Z2R6&`8X^wjUXNoQ%V$0o6%+*`W2DRk0E7ADBT%1`%tXB~0xT$Sdpp?F6| zwu46Tt*)taZo)=GCK1W|$fE;+zz4rx^;=8vJ9t{=%T?2(R?wY3%_IcwE6^BNV~hci zdxM%JUz2KWNlTup4^yA5Xa4}gA8DhkFRk9arl8{j!ZIdu zdkuhqKNkEdEk{t(d?BvH-W<5Ocp{7>(p)mDvmQ4E9W&TfE5C+wc+FwFvRN+Vl2Z3~ zQe3X_7#RUno~MD4QyE@f%Co$>K9BC|5saL*c28Y*G_?I@%Urr>qrHL!v^xw>4%P#a zmH>AAYR`wX+gSAri`AGQSlqNDBs=2(u1^D;;0k2^2hn^KR{sE0g697IbBJ_pI5LtH z4rQHzz~mEw-mL5XA&PrV9R`nMpk6w@%Ud~1?m*aG&DGf8^X*ewoUW(aqjzugUzwbw zr1@d79kgXM`-`@bRZDqeJx3WQ>5AF-ec~gk_;+5n@csOeT-v!d_fjZQaxzaNfPP>P zVOZBn@@WW=LmSN^85$OFigVi}fz#C1{{Vrt8%rCzT{0M8ht7qkAp(L=LNY$^;Cl4; zuDl%5nzvr0R!P%uuH`EoH%haYeb%6wyS8MwlF`ePo~&Jl2h`TTg)gskyIb*LuT6EO z7vBZBVjWljOJH%1a0te6$ge2zGkAg>e^&7AoYStDCMrRPA@_2i6&c0|>P>fk9FFHn z(c-tdc4r7xcP7$6z#LW;Dn^Y>wZHYLO70u(bUF>MhxLyNYkD@Z9PrH7{{Y$$$|Kxg zG3_Hb$FJvH_ltE)dz)K*HdtEMMzx7bTe;w-G8w&2Ip(XblXX74CP?nq)t4q^lq^{o z`ElxMzlbfQvAc|2DV3En>md#q3Blu^$kvtTDA4yEt!wvc`xr^{R9we7Yk#8L81RlJ8mC0F3EjYfdOSx!SPi+0)!n&w@YpdF5-a}p7y_&giBhGDsj|H~p89ggw;g`dEa~#X5#`D?h z5Sw@u#9COhmRbJ**Z{8t_4-zt_%lcFb(6leFZ?7L+;Ft3r`s_RBB9c9qHgXcO znYSD^KJE|90balD8q%bdIye5mBUMqUa!Fl&DBrdCd#Gx9MyaS+PaN95o%_!s-Q#HG zfcN0xa8Eho2R*A3O0<6z_>R&Gnbz7(K0?>Fs#Vk?F6SYXh*76!9;c4@p7`qCES~b^u(xq!IOIm8g+KSm&0N-R zMv5#3lM`)r^3qt?m&xO4$0Q%mxam~%xpaG#3FqEUw&Bn+LahG)F~%{TfZ*rapKp0+ zOWQ#A6C!-d*AWQFaCj?*Y>bY%^{yH7$~?{W)VQ}DBD5+;_PujowUSpcT3DtrD)Stf z!42*@^VYh^JUer)Y0{g>?De0Rlxrsu$&TByM_+8$J)n4m4O3A!QZvX*Tl~e&;1rJi zM*^+h_~l}0j*q9@TFB>Pt>v`4RYpGU(aFwv&v9B|oHdPD?fzb$AewJTyBn9yb>aI2 zR<$;IYJkOE4iJIBA&B(q1}df3h-B2fTc+N-pcaaSc-pCsc=}cbrQ=WSNTbs=n6DDe z<`+Bd)tyT(1A3eup68C$Vt)%C5!z2aiLB|@3_f5QP>iS0qhW{QD}G5%U%F1+zpwHo ze(kPib-oq&$eKQ{d8H#I?6+5McGGk77WO@`JJ&Vw3&g2;E7|FL!reYX7x{91Gthl& z3&ODKI!&F`-lI6$%%TZM-X9#0t)4PFR7rg`iXQw@w*~V3Losp{Ln`>}v&_<}R6LkPP zRX_uWasL1UHSJ%qF0FNSeR1LGl z60zJ#1m`Ezo;nQCMRg>jRw->`qX)dcf0^`0?Ee7p{{X{!cfl_P__hre%y^6YQtl&u z@dtKYpp1sdBz7cZb|Q}!YS-G1m8)xaHb&GwDmAH-Uf^@JKN#oN!O8KZ9^Kn@6QFxbc(^amB@pBEsFe08Zt5ke_I z7aRexXrjK(o4p9!QGWTAVWryXI+um+Eg}$0b-m_gC*~^P1CE2QYWhpV`mNYYW%Jmj z)In!P z_pAOV)UJQw9C^Iv^8^5f^vS`ZiuF`u?IkN~{{RHae+qtQMc`;-i^A}c8Zo?rUp&a6 z_hJd09&>^j6y#v>?sD$}?Sn$s;7H>U3n-i^Kz5f!W+0KqPdrgYVVOzBglzi%0FlzP zlSu9LDHeXOwS#)#?~VT10A`m+Cgb_7%l{oLpprINF->OoDs=VGBMcXQAKHz>4lR+=t1!| z5vS<>7@FOrj!UQ}^N*OsFcPxk1mFyI7|nBE4>gO+pBcfXOFKznD$8>vyI`Zq^Bu%@ zvIr+LOT5|RO6F`BokpohSB1qKh6Pdxts5Tc6pAeH94j&70l zJn!Ldr*{X5G_Nk=NW|0ZkSkz1l{^vOrFGvCE#tV>W|~FaDUc1>>=a^(E5w_=6<_&} zRVB)Gz9WJ=drMejP?01+DvpB!yo*&HR`Z$t!Lfw@ZVQ*979cpIW(> zRq@M3cIJ4*nF!rJ^%Pe`KY>rwieDU!kauVotJ$SiIOCDf{N!taURlT(s^<=yWsXSpIVUT;9l(1WbfSv%vARuL%k(;o1j8wAh91=dJiYqxqePKIve~>TYe7{38 z$1*jijWo+hPnB(M@vKp=0Y>iCT6?*q_(bT=Sw+N50qjb)ap^@Br5}ijU&Bw(^sj~y zt)-ud?qVA!8fg9JR^%}}{XqPy#6BS3TxmMQ?`JdHFqSiz&O1NmIO#?E8Zt=y_# z#eXBJ{hn`T)qF{-S>7 z8@Q6wT)&DJF-IKiQB#e)_M(dO^Izaa`WI2?-<$l*uZkCQ$!q5{@|j;ziWY6>lraK9 z{CoAPy5+QE#u{nC{mR80V{yW-lbm9TE2U{o32wa^&})jOIZ&u%>9mZF7mB~3fe(u` z)Ib$PXaEDCF|i}*nkcSnE%Qg!8Bb%Q@ukeR*7j`^lx7W*JNB$;E+h-4%eyiZVo4e9 zXrj7tjr&V^qZ$cvlxLnryhy~16Ut`V4msza&{sDsj`lZlGqX66lru3W7(7u$ME)23 zcQkrP&+#seWOQ+R_H-9b4osthPCB1jpW#$#CDo!bg1&3qKfBV1wWC~Q}|2J z^uHH5(`~MewjnPPmMR8Y>U}+`&ZXe}8sXAMu#8JBnun1IMqpP5BoIz$qPp{D;f0fn zw#4NrOA?dqFVOJ68d=5R>+>F+zS#q9!pO&rbmW?=;oGUDzSY^}W@NWjT=p3qD5AW$ zHt_0CeGZI7QgX684I5s*(`UVs+J7nj%lCpQI62749XRP;+3^AhtiB5PVk>yo7>rR# z<;I~`b3)8c2e~GSE9t0f3ZLOToKDnFE7SDLNN+qb6n4=fUq)rQ!VJpU1Li$(j@>JT zf)%py#<+g(0q;ZxcC=NgaKE{eGmT7w#KTpW-rre<0H6eyLCh~UA0Zt=X4w2*?^WMB!&9W#!AQAKl8{vz}pKjJUzpzR{vE;JY>lXJ_t z#^T2~!1trX`dzk@4wpTuG*a9tX&ZYnV}a^UKME+Wxc#H5%bA-tWXa%rNf?si{Y6Hrf$yu>%&)=+(VFL86N1rvCtZjqv{f#Q7ip*$w{L<^TWy diff --git a/v1_api_demo/model_zoo/resnet/example/image_list_provider.py b/v1_api_demo/model_zoo/resnet/example/image_list_provider.py deleted file mode 100644 index 2cd8eb8bf8..0000000000 --- a/v1_api_demo/model_zoo/resnet/example/image_list_provider.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.utils.image_util import * -from paddle.trainer.PyDataProvider2 import * - - -def hook(settings, image_size, crop_size, color, file_list, is_train, **kwargs): - """ - Description: Init with a list of data file - file_list is the name list of input files. - kwargs["load_data_args"] is the value of 'load_data_args' - which can be set in config. - Each args is separated by a column. - image_size: the crop image size. - mean_meta: the path of the meta file to store the mean image. - mean_value: can be mean value, not a file. - can not set mean_meta and mean_value at the same time. - color: 'color' means a color image. Otherwise, it means a gray image. - is_train: whether the data provider is used for training. - Data argumentation might be different for training and testing. - """ - settings.img_size = image_size - settings.crop_size = crop_size - settings.mean_img_size = settings.crop_size - settings.color = color # default is color - settings.is_train = is_train - - settings.is_swap_channel = kwargs.get('swap_channel', None) - if settings.is_swap_channel is not None: - settings.swap_channel = settings.is_swap_channel - settings.is_swap_channel = True - - if settings.color: - settings.img_input_size = settings.crop_size * settings.crop_size * 3 - else: - settings.img_input_size = settings.crop_size * settings.crop_size - - settings.file_list = file_list - settings.mean_meta = kwargs.get('mean_meta', None) - settings.mean_value = kwargs.get('mean_value', None) - # can not specify both mean_meta and mean_value. - assert not (settings.mean_meta and settings.mean_value) - if not settings.mean_meta: - settings.mean_value = kwargs.get('mean_value') - sz = settings.crop_size * settings.crop_size - settings.img_mean = np.zeros(sz * 3, dtype=np.single) - for idx, value in enumerate(settings.mean_value): - settings.img_mean[idx * sz:(idx + 1) * sz] = value - settings.img_mean = settings.img_mean.reshape(3, settings.crop_size, - settings.crop_size) - - else: - settings.img_mean = load_meta(settings.mean_meta, - settings.mean_img_size, - settings.crop_size, settings.color) - - settings.input_types = [ - dense_vector(settings.img_input_size), # image feature - integer_value(1) - ] # labels - - settings.logger.info('Image short side: %s', settings.img_size) - settings.logger.info('Crop size: %s', settings.crop_size) - settings.logger.info('Meta path: %s', settings.mean_meta) - if settings.is_swap_channel: - settings.logger.info('swap channel: %s', settings.swap_channel) - settings.logger.info('DataProvider Initialization finished') - - -@provider(init_hook=hook, should_shuffle=False) -def processData(settings, file_list): - """ - The main function for loading data. - Load the batch, iterate all the images and labels in this batch. - file_name: the batch file name. - """ - img_path, lab = file_list.strip().split(' ') - img = Image.open(img_path) - img.load() - img = img.resize((settings.img_size, settings.img_size), Image.ANTIALIAS) - img = np.array(img).astype(np.float32) - if len(img.shape) == 3: - img = np.swapaxes(img, 1, 2) - img = np.swapaxes(img, 1, 0) - # swap channel - if settings.is_swap_channel: - img = img[settings.swap_channel, :, :] - img_feat = preprocess_img(img, settings.img_mean, settings.crop_size, - settings.is_train, settings.color) - yield img_feat.tolist(), int(lab.strip()) diff --git a/v1_api_demo/model_zoo/resnet/example/test.list b/v1_api_demo/model_zoo/resnet/example/test.list deleted file mode 100644 index 30bbf630b6..0000000000 --- a/v1_api_demo/model_zoo/resnet/example/test.list +++ /dev/null @@ -1,2 +0,0 @@ -example/dog.jpg 0 -example/cat.jpg 0 diff --git a/v1_api_demo/model_zoo/resnet/extract_fea_c++.sh b/v1_api_demo/model_zoo/resnet/extract_fea_c++.sh deleted file mode 100755 index 5447aa92df..0000000000 --- a/v1_api_demo/model_zoo/resnet/extract_fea_c++.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -#set names of layer which you want to extract feature -#in Outputs() of resnet.py -#like: Outputs("res5_3_branch2c_conv", "res5_3_branch2c_bn") -layer_num=50 -configure=./resnet.py -model_path=./model/resnet_$layer_num -fea_dir=fea_output -#Output is text file. -#Each line is one sample's features. -#If you set N layer names in Outputs() -#each line contains N features sperated by ";". - -# create model list file. -model_list=./model.list -touch $model_list | echo $model_path > $model_list - -paddle train \ - --local=true \ - --job=test \ - --config=$configure \ - --model_list=$model_list \ - --use_gpu=1 \ - --predict_output_dir=$fea_dir \ - --config_args=is_test=1,layer_num=$layer_num diff --git a/v1_api_demo/model_zoo/resnet/extract_fea_py.sh b/v1_api_demo/model_zoo/resnet/extract_fea_py.sh deleted file mode 100755 index 2e87152f7f..0000000000 --- a/v1_api_demo/model_zoo/resnet/extract_fea_py.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -#Note if you use CPU mode, you need to set use_gpu=0 in classify.py. like this: -#conf_args = "is_test=0,use_gpu=1,is_predict=1" -#conf = parse_config(train_conf, conf_args) -#swig_paddle.initPaddle("--use_gpu=0") -python classify.py \ - --job=extract \ - --conf=resnet.py \ - --use_gpu=1 \ - --mean=model/mean_meta_224/mean.meta \ - --model=model/resnet_50 \ - --data=./example/test.list \ - --output_layer="res5_3_branch2c_conv,res5_3_branch2c_bn" \ - --output_dir=features diff --git a/v1_api_demo/model_zoo/resnet/get_model.sh b/v1_api_demo/model_zoo/resnet/get_model.sh deleted file mode 100755 index b33d8178ab..0000000000 --- a/v1_api_demo/model_zoo/resnet/get_model.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -DIR="$( cd "$(dirname "$0")" ; pwd -P )" -cd $DIR - -mkdir model -cd model - -echo "Downloading ResNet models..." - -for file in resnet_50.tar.gz resnet_101.tar.gz resnet_152.tar.gz mean_meta_224.tar.gz -do - wget http://paddlepaddle.bj.bcebos.com/model_zoo/imagenet/$file - tar -xvf $file - rm $file -done - -echo "Done." diff --git a/v1_api_demo/model_zoo/resnet/load_feature.py b/v1_api_demo/model_zoo/resnet/load_feature.py deleted file mode 100644 index 5d3d0c0d30..0000000000 --- a/v1_api_demo/model_zoo/resnet/load_feature.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys -import cPickle -import logging - -logging.basicConfig( - format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s') -logging.getLogger().setLevel(logging.INFO) - - -def load_feature_c(file): - """ - Load feature extracted by C++ interface. - Return a list. - file: feature file. - """ - features = [] - f = open(file, 'r') - for line in f: - sample = [] - for slot in line.strip().split(";"): - fea = [float(val) for val in slot.strip().split()] - if fea: - sample.append(fea) - features.append(sample) - f.close() - return features - - -def load_feature_py(feature_dir): - """ - Load feature extracted by python interface. - Return a dictionary. - feature_dir: directory of feature file. - """ - file_list = os.listdir(feature_dir) - file_list = [os.path.join(feature_dir, f) for f in file_list] - features = {} - for file_name in file_list: - with open(file_name, 'rb') as f: - feature = cPickle.load(f) - features.update(feature) - logging.info('Load feature file %s', file_name) - return features - - -if __name__ == '__main__': - print load_feature_py(sys.argv[1]) - #print load_feature_c(sys.argv[1]) diff --git a/v1_api_demo/model_zoo/resnet/net_diagram.sh b/v1_api_demo/model_zoo/resnet/net_diagram.sh deleted file mode 100755 index 1b06ffa44e..0000000000 --- a/v1_api_demo/model_zoo/resnet/net_diagram.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -:' -Visual deep residual network -1. Using make_model_diagram.py to generate dot file. -2. Using graphviz to convert dot file. - -Usage: -./net_diagram.sh -' - -set -e - -DIR="$( cd "$(dirname "$0")" ; pwd -P )" -cd $DIR - -img_type=png -img_fileprefix=ResNet_50 -conf_filename=resnet.py -dot_filename=ResNet_50.dot -config_str="layer_num=50,data_provider=0" - -python -m paddle.utils.make_model_diagram $conf_filename $dot_filename $config_str - -# If you have installed graphviz, running like this: -# dot -Tpng -o ResNet.png ResNet.dot diff --git a/v1_api_demo/model_zoo/resnet/predict.sh b/v1_api_demo/model_zoo/resnet/predict.sh deleted file mode 100755 index 2b67b17c48..0000000000 --- a/v1_api_demo/model_zoo/resnet/predict.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -python classify.py \ - --job=predict \ - --conf=resnet.py\ - --model=model/resnet_50 \ - --multi_crop \ - --use_gpu=1 \ - --data=./example/test.list diff --git a/v1_api_demo/model_zoo/resnet/resnet.py b/v1_api_demo/model_zoo/resnet/resnet.py deleted file mode 100644 index 6fdd97fefc..0000000000 --- a/v1_api_demo/model_zoo/resnet/resnet.py +++ /dev/null @@ -1,271 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer_config_helpers import * -""" -paper: https://arxiv.org/abs/1512.03385 -""" -is_test = get_config_arg("is_test", bool, False) -is_predict = get_config_arg("is_predict", bool, False) -data_provider = get_config_arg("data_provider", bool, True) -layer_num = get_config_arg("layer_num", int, 50) - -if not is_predict and data_provider: - train_list = 'train.list' if not is_test else None - # mean.meta is mean file of ImageNet dataset. - # mean.meta size : 3 x 224 x 224. - # If you use three mean value, set like: - # "mean_value:103.939,116.779,123.68;" - args = { - 'mean_meta': "model/mean_meta_224/mean.meta", - 'image_size': 224, - 'crop_size': 224, - 'color': True, - 'swap_channel:': [2, 1, 0] - } - define_py_data_sources2( - train_list, - 'example/test.list', - module="example.image_list_provider", - obj="processData", - args=args) - -batch_size = 1 -learning_rate = 0.1 / batch_size -momentum = 0.9 -weight_decay = 0.0001 * batch_size -default_momentum(momentum) -default_decay_rate(weight_decay) - -Settings( - algorithm='sgd', - batch_size=batch_size, - learning_rate=learning_rate, - - # set the appropriate parameters according your schedule - learning_method='momentum', - learning_rate_decay_a=0.5, - learning_rate_decay_b=1200000 * 10, - learning_rate_schedule="discexp", ) - - -def conv_bn_layer(name, - input, - filter_size, - num_filters, - stride, - padding, - channels=None, - active_type=ReluActivation()): - """ - A wrapper for conv layer with batch normalization layers. - Note: - conv layer has no activation. - """ - - tmp = img_conv_layer( - name=name + "_conv", - input=input, - filter_size=filter_size, - num_channels=channels, - num_filters=num_filters, - stride=stride, - padding=padding, - act=LinearActivation(), - bias_attr=False) - return batch_norm_layer( - name=name + "_bn", input=tmp, act=active_type, use_global_stats=is_test) - - -def bottleneck_block(name, input, num_filters1, num_filters2): - """ - A wrapper for bottlenect building block in ResNet. - Last conv_bn_layer has no activation. - Addto layer has activation of relu. - """ - last_name = conv_bn_layer( - name=name + '_branch2a', - input=input, - filter_size=1, - num_filters=num_filters1, - stride=1, - padding=0) - last_name = conv_bn_layer( - name=name + '_branch2b', - input=last_name, - filter_size=3, - num_filters=num_filters1, - stride=1, - padding=1) - last_name = conv_bn_layer( - name=name + '_branch2c', - input=last_name, - filter_size=1, - num_filters=num_filters2, - stride=1, - padding=0, - active_type=LinearActivation()) - - return addto_layer( - name=name + "_addto", input=[input, last_name], act=ReluActivation()) - - -def mid_projection(name, input, num_filters1, num_filters2, stride=2): - """ - A wrapper for middile projection in ResNet. - projection shortcuts are used for increasing dimensions, - and other shortcuts are identity - branch1: projection shortcuts are used for increasing - dimensions, has no activation. - branch2x: bottleneck building block, shortcuts are identity. - """ - # stride = 2 - branch1 = conv_bn_layer( - name=name + '_branch1', - input=input, - filter_size=1, - num_filters=num_filters2, - stride=stride, - padding=0, - active_type=LinearActivation()) - - last_name = conv_bn_layer( - name=name + '_branch2a', - input=input, - filter_size=1, - num_filters=num_filters1, - stride=stride, - padding=0) - last_name = conv_bn_layer( - name=name + '_branch2b', - input=last_name, - filter_size=3, - num_filters=num_filters1, - stride=1, - padding=1) - - last_name = conv_bn_layer( - name=name + '_branch2c', - input=last_name, - filter_size=1, - num_filters=num_filters2, - stride=1, - padding=0, - active_type=LinearActivation()) - - return addto_layer( - name=name + "_addto", input=[branch1, last_name], act=ReluActivation()) - - -def deep_res_net(res2_num=3, res3_num=4, res4_num=6, res5_num=3): - """ - A wrapper for 50,101,152 layers of ResNet. - res2_num: number of blocks stacked in conv2_x - res3_num: number of blocks stacked in conv3_x - res4_num: number of blocks stacked in conv4_x - res5_num: number of blocks stacked in conv5_x - """ - # For ImageNet - # conv1: 112x112 - img = data_layer(name='input', size=224 * 224 * 3) - tmp = conv_bn_layer( - "conv1", - img, - filter_size=7, - channels=3, - num_filters=64, - stride=2, - padding=3) - tmp = img_pool_layer(name="pool1", input=tmp, pool_size=3, stride=2) - - # conv2_x: 56x56 - tmp = mid_projection( - name="res2_1", input=tmp, num_filters1=64, num_filters2=256, stride=1) - for i in xrange(2, res2_num + 1, 1): - tmp = bottleneck_block( - name="res2_" + str(i), input=tmp, num_filters1=64, num_filters2=256) - - # conv3_x: 28x28 - tmp = mid_projection( - name="res3_1", input=tmp, num_filters1=128, num_filters2=512) - for i in xrange(2, res3_num + 1, 1): - tmp = bottleneck_block( - name="res3_" + str(i), - input=tmp, - num_filters1=128, - num_filters2=512) - - # conv4_x: 14x14 - tmp = mid_projection( - name="res4_1", input=tmp, num_filters1=256, num_filters2=1024) - for i in xrange(2, res4_num + 1, 1): - tmp = bottleneck_block( - name="res4_" + str(i), - input=tmp, - num_filters1=256, - num_filters2=1024) - - # conv5_x: 7x7 - tmp = mid_projection( - name="res5_1", input=tmp, num_filters1=512, num_filters2=2048) - for i in xrange(2, res5_num + 1, 1): - tmp = bottleneck_block( - name="res5_" + str(i), - input=tmp, - num_filters1=512, - num_filters2=2048) - - tmp = img_pool_layer( - name='avgpool', - input=tmp, - pool_size=7, - stride=1, - pool_type=AvgPooling()) - - output = fc_layer( - name='output', input=tmp, size=1000, act=SoftmaxActivation()) - - if not is_predict: - classification_cost( - input=output, label=data_layer( - name='label', size=1)) - - -def res_net_50(): - deep_res_net(3, 4, 6, 3) - - -def res_net_101(): - deep_res_net(3, 4, 23, 3) - - -def res_net_152(): - deep_res_net(3, 8, 36, 3) - - -if not is_predict: - Inputs("input", "label") -else: - Inputs("input") -# Outputs("cost-softmax" if not is_predict else "output") -Outputs("res5_3_branch2c_conv", "res5_3_branch2c_bn") - -if layer_num == 50: - res_net_50() -elif layer_num == 101: - res_net_101() -elif layer_num == 152: - res_net_152() -else: - print("Wrong layer number.") diff --git a/v1_api_demo/quick_start/.gitignore b/v1_api_demo/quick_start/.gitignore deleted file mode 100644 index f71662563f..0000000000 --- a/v1_api_demo/quick_start/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -*.pyc -data/dict.txt -data/dict_all.txt -data/labels.list -data/mosesdecoder-master/ -data/reviews_Electronics_5.json.gz -data/test.list -data/test.txt -data/train.list -data/train.txt -data/pred.list -data/pred.txt -dataprovider_copy_1.py -train.log -output diff --git a/v1_api_demo/quick_start/api_predict.py b/v1_api_demo/quick_start/api_predict.py deleted file mode 100755 index 9bdffe1006..0000000000 --- a/v1_api_demo/quick_start/api_predict.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os, sys -import numpy as np -from optparse import OptionParser -from py_paddle import swig_paddle, DataProviderConverter -from paddle.trainer.PyDataProvider2 import sparse_binary_vector -from paddle.trainer.config_parser import parse_config -""" -Usage: run following command to show help message. - python api_predict.py -h -""" - - -class QuickStartPrediction(): - def __init__(self, train_conf, dict_file, model_dir=None, label_file=None): - """ - train_conf: trainer configure. - dict_file: word dictionary file name. - model_dir: directory of model. - """ - self.train_conf = train_conf - self.dict_file = dict_file - self.word_dict = {} - self.dict_dim = self.load_dict() - self.model_dir = model_dir - if model_dir is None: - self.model_dir = os.path.dirname(train_conf) - - self.label = None - if label_file is not None: - self.load_label(label_file) - - conf = parse_config(train_conf, "is_predict=1") - self.network = swig_paddle.GradientMachine.createFromConfigProto( - conf.model_config) - self.network.loadParameters(self.model_dir) - input_types = [sparse_binary_vector(self.dict_dim)] - self.converter = DataProviderConverter(input_types) - - def load_dict(self): - """ - Load dictionary from self.dict_file. - """ - for line_count, line in enumerate(open(self.dict_file, 'r')): - self.word_dict[line.strip().split('\t')[0]] = line_count - return len(self.word_dict) - - def load_label(self, label_file): - """ - Load label. - """ - self.label = {} - for v in open(label_file, 'r'): - self.label[int(v.split('\t')[1])] = v.split('\t')[0] - - def get_index(self, data): - """ - transform word into integer index according to the dictionary. - """ - words = data.strip().split() - word_slot = [self.word_dict[w] for w in words if w in self.word_dict] - return word_slot - - def batch_predict(self, data_batch): - input = self.converter(data_batch) - output = self.network.forwardTest(input) - prob = output[0]["id"].tolist() - print("predicting labels is:") - print prob - - -def option_parser(): - usage = "python predict.py -n config -w model_dir -d dictionary -i input_file " - parser = OptionParser(usage="usage: %s [options]" % usage) - parser.add_option( - "-n", - "--tconf", - action="store", - dest="train_conf", - help="network config") - parser.add_option( - "-d", - "--dict", - action="store", - dest="dict_file", - help="dictionary file") - parser.add_option( - "-b", - "--label", - action="store", - dest="label", - default=None, - help="dictionary file") - parser.add_option( - "-c", - "--batch_size", - type="int", - action="store", - dest="batch_size", - default=1, - help="the batch size for prediction") - parser.add_option( - "-w", - "--model", - action="store", - dest="model_path", - default=None, - help="model path") - return parser.parse_args() - - -def main(): - options, args = option_parser() - train_conf = options.train_conf - batch_size = options.batch_size - dict_file = options.dict_file - model_path = options.model_path - label = options.label - swig_paddle.initPaddle("--use_gpu=0") - predict = QuickStartPrediction(train_conf, dict_file, model_path, label) - - batch = [] - labels = [] - for line in sys.stdin: - [label, text] = line.split("\t") - labels.append(int(label)) - batch.append([predict.get_index(text)]) - print("labels is:") - print labels - predict.batch_predict(batch) - - -if __name__ == '__main__': - main() diff --git a/v1_api_demo/quick_start/api_predict.sh b/v1_api_demo/quick_start/api_predict.sh deleted file mode 100755 index 4d9aa9e885..0000000000 --- a/v1_api_demo/quick_start/api_predict.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -#Note the default model is pass-00002, you shold make sure the model path -#exists or change the mode path. -#only test on trainer_config.lr.py -model=output/model/pass-00001/ -config=trainer_config.lr.py -label=data/labels.list -dict=data/dict.txt -batch_size=20 -head -n$batch_size data/test.txt | python api_predict.py \ - --tconf=$config\ - --model=$model \ - --label=$label \ - --dict=$dict \ - --batch_size=$batch_size diff --git a/v1_api_demo/quick_start/api_train.py b/v1_api_demo/quick_start/api_train.py deleted file mode 100644 index 5699789daa..0000000000 --- a/v1_api_demo/quick_start/api_train.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import itertools -import random - -from paddle.trainer.config_parser import parse_config -from py_paddle import swig_paddle as api -from py_paddle import DataProviderConverter -from paddle.trainer.PyDataProvider2 \ - import integer_value, integer_value_sequence, sparse_binary_vector - - -def parse_arguments(): - parser = argparse.ArgumentParser() - parser.add_argument( - "--train_data", type=str, required=False, help="train data file") - parser.add_argument("--test_data", type=str, help="test data file") - parser.add_argument( - "--config", type=str, required=True, help="config file name") - parser.add_argument("--dict_file", required=True, help="dictionary file") - parser.add_argument( - "--seq", default=1, type=int, help="whether use sequence training") - parser.add_argument( - "--use_gpu", default=0, type=int, help="whether use GPU for training") - parser.add_argument( - "--trainer_count", - default=1, - type=int, - help="Number of threads for training") - parser.add_argument( - "--num_passes", default=5, type=int, help="Number of training passes") - return parser.parse_args() - - -UNK_IDX = 0 - - -def load_data(file_name, word_dict): - with open(file_name, 'r') as f: - for line in f: - label, comment = line.strip().split('\t') - words = comment.split() - word_slot = [word_dict.get(w, UNK_IDX) for w in words] - yield word_slot, int(label) - - -def load_dict(dict_file): - word_dict = dict() - with open(dict_file, 'r') as f: - for i, line in enumerate(f): - w = line.strip().split()[0] - word_dict[w] = i - return word_dict - - -def main(): - options = parse_arguments() - api.initPaddle("--use_gpu=%s" % options.use_gpu, - "--trainer_count=%s" % options.trainer_count) - - word_dict = load_dict(options.dict_file) - train_dataset = list(load_data(options.train_data, word_dict)) - if options.test_data: - test_dataset = list(load_data(options.test_data, word_dict)) - else: - test_dataset = None - - trainer_config = parse_config(options.config, - "dict_file=%s" % options.dict_file) - # No need to have data provider for trainer - trainer_config.ClearField('data_config') - trainer_config.ClearField('test_data_config') - - # create a GradientMachine from the model configuratin - model = api.GradientMachine.createFromConfigProto( - trainer_config.model_config) - # create a trainer for the gradient machine - trainer = api.Trainer.create(trainer_config, model) - - # create a data converter which converts data to PaddlePaddle - # internal format - input_types = [ - integer_value_sequence(len(word_dict)) if options.seq else - sparse_binary_vector(len(word_dict)), integer_value(2) - ] - converter = DataProviderConverter(input_types) - - batch_size = trainer_config.opt_config.batch_size - trainer.startTrain() - for train_pass in xrange(options.num_passes): - trainer.startTrainPass() - random.shuffle(train_dataset) - for pos in xrange(0, len(train_dataset), batch_size): - batch = itertools.islice(train_dataset, pos, pos + batch_size) - size = min(batch_size, len(train_dataset) - pos) - trainer.trainOneDataBatch(size, converter(batch)) - trainer.finishTrainPass() - if test_dataset: - trainer.startTestPeriod() - for pos in xrange(0, len(test_dataset), batch_size): - batch = itertools.islice(test_dataset, pos, pos + batch_size) - size = min(batch_size, len(test_dataset) - pos) - trainer.testOneDataBatch(size, converter(batch)) - trainer.finishTestPeriod() - trainer.finishTrain() - - -if __name__ == '__main__': - main() diff --git a/v1_api_demo/quick_start/api_train.sh b/v1_api_demo/quick_start/api_train.sh deleted file mode 100755 index 9b2a4e2f22..0000000000 --- a/v1_api_demo/quick_start/api_train.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -# Note: if using trainer_config.emb.py, trainer_config.cnn.py -# or trainer_config.lstm.py, you need to change --seq to --seq=1 -# because they are sequence models. -python api_train.py \ - --config=trainer_config.lr.py \ - --trainer_count=2 \ - --num_passes=15 \ - --use_gpu=0 \ - --seq=0 \ - --train_data=data/train.txt \ - --test_data=data/test.txt \ - --dict_file=data/dict.txt \ - 2>&1 | tee 'train.log' diff --git a/v1_api_demo/quick_start/cluster/cluster_train.sh b/v1_api_demo/quick_start/cluster/cluster_train.sh deleted file mode 100755 index a7b1f01064..0000000000 --- a/v1_api_demo/quick_start/cluster/cluster_train.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -# Should run pserver.sh before run this script. -bin_dir=$(cd `dirname $0`; pwd) -home_dir=$(cd "${bin_dir}/.."; pwd) -source "$bin_dir/env.sh" - -model_dir="$bin_dir/output" -log_file="$bin_dir/train.log" - -pushd "$home_dir" -cfg=trainer_config.lr.py -paddle train \ - --start_pserver=false \ - --config=$cfg \ - --save_dir=${model_dir} \ - --trainer_count=4 \ - --local=0 \ - --log_period=100 \ - --num_passes=15 \ - --use_gpu=false \ - --show_parameter_stats_period=100 \ - --test_all_data_in_one_period=1 \ - --num_gradient_servers=1 \ - --nics=`get_nics` \ - --port=7164 \ - --ports_num=1 \ - --pservers="127.0.0.1" \ - --comment="paddle_trainer" \ - 2>&1 | tee "$log_file" -popd diff --git a/v1_api_demo/quick_start/cluster/env.sh b/v1_api_demo/quick_start/cluster/env.sh deleted file mode 100644 index a404993835..0000000000 --- a/v1_api_demo/quick_start/cluster/env.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -function get_nics() { - machine=`uname -s` - local nics="" - if [ "$machine" == "Linux" ]; then - nics="lo" - elif [ "$machine" == "Darwin" ]; then - nics="lo0" - else - nics="unsupport" - fi - echo $nics -} diff --git a/v1_api_demo/quick_start/cluster/pserver.sh b/v1_api_demo/quick_start/cluster/pserver.sh deleted file mode 100755 index b187c1d9b9..0000000000 --- a/v1_api_demo/quick_start/cluster/pserver.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e -bin_dir=$(cd `dirname $0`; pwd) -source "$bin_dir/env.sh" - -paddle pserver \ - --nics=`get_nics` \ - --port=7164 \ - --ports_num=1 \ - --ports_num_for_sparse=1 \ - --num_gradient_servers=1 \ - --comment="paddle_pserver" \ - 2>&1 | tee 'pserver.log' diff --git a/v1_api_demo/quick_start/data/README.md b/v1_api_demo/quick_start/data/README.md deleted file mode 100644 index 63abcf7ebf..0000000000 --- a/v1_api_demo/quick_start/data/README.md +++ /dev/null @@ -1,9 +0,0 @@ -This dataset consists of electronics product reviews associated with -binary labels (positive/negative) for sentiment classification. - -The preprocessed data can be downloaded by script `get_data.sh`. -The data was derived from reviews_Electronics_5.json.gz at - -http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/reviews_Electronics_5.json.gz - -If you want to process the raw data, you can use the script `proc_from_raw_data/get_data.sh`. diff --git a/v1_api_demo/quick_start/data/get_data.sh b/v1_api_demo/quick_start/data/get_data.sh deleted file mode 100755 index a09a18f919..0000000000 --- a/v1_api_demo/quick_start/data/get_data.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -DIR="$( cd "$(dirname "$0")" ; pwd -P )" -cd $DIR - -# Download the preprocessed data -wget http://paddlepaddle.bj.bcebos.com/demo/quick_start_preprocessed_data/preprocessed_data.tar.gz - -# Extract package -tar zxvf preprocessed_data.tar.gz - -# Remove compressed package -rm preprocessed_data.tar.gz diff --git a/v1_api_demo/quick_start/data/proc_from_raw_data/get_data.sh b/v1_api_demo/quick_start/data/proc_from_raw_data/get_data.sh deleted file mode 100755 index d976eaebfa..0000000000 --- a/v1_api_demo/quick_start/data/proc_from_raw_data/get_data.sh +++ /dev/null @@ -1,79 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# 1. size of pos : neg = 1:1. -# 2. size of testing set = min(25k, len(all_data) * 0.1), others is traning set. -# 3. distinct train set and test set. - -set -e - -DIR="$( cd "$(dirname "$0")" ; pwd -P )" -cd $DIR - -# Download data -echo "Downloading Amazon Electronics reviews data..." -# http://jmcauley.ucsd.edu/data/amazon/ -wget http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/reviews_Electronics_5.json.gz -echo "Downloading mosesdecoder..." -# https://github.com/moses-smt/mosesdecoder -wget https://github.com/moses-smt/mosesdecoder/archive/master.zip - -unzip master.zip -rm master.zip - -################## -# Preprocess data -echo "Preprocess data..." -export LC_ALL=C -UNAME_STR=`uname` - -if [ ${UNAME_STR} == 'Linux' ]; then - SHUF_PROG='shuf' -else - SHUF_PROG='gshuf' -fi - -mkdir -p tmp -python preprocess.py -i reviews_Electronics_5.json.gz -# uniq and shuffle -cd tmp -echo 'Uniq and shuffle...' -cat pos_*|sort|uniq|${SHUF_PROG}> pos.shuffed -cat neg_*|sort|uniq|${SHUF_PROG}> neg.shuffed - -min_len=`sed -n '$=' neg.shuffed` -test_num=$((min_len/10)) -if [ $test_num -gt 12500 ];then - test_num=12500 -fi -train_num=$((min_len-test_num)) - -head -n$train_num pos.shuffed >train.pos -head -n$train_num neg.shuffed >train.neg -tail -n$test_num pos.shuffed >test.pos -tail -n$test_num neg.shuffed >test.neg - -cat train.pos train.neg | ${SHUF_PROG} >../train.txt -cat test.pos test.neg | ${SHUF_PROG} >../test.txt - -cd - -echo 'train.txt' > train.list -echo 'test.txt' > test.list - -# use 30k dict -rm -rf tmp -mv dict.txt dict_all.txt -cat dict_all.txt | head -n 30001 > dict.txt -echo 'Done.' diff --git a/v1_api_demo/quick_start/data/proc_from_raw_data/preprocess.py b/v1_api_demo/quick_start/data/proc_from_raw_data/preprocess.py deleted file mode 100755 index 5706351a21..0000000000 --- a/v1_api_demo/quick_start/data/proc_from_raw_data/preprocess.py +++ /dev/null @@ -1,236 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. -# -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. -# -*- coding: UTF-8 -*- - -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -1. Tokenize the words and punctuation -2. pos sample : rating score 5; neg sample: rating score 1-2. - -Usage: - python preprocess.py -i data_file [random seed] -""" - -import sys -import os -import operator -import gzip -from subprocess import Popen, PIPE -from optparse import OptionParser -import json -from multiprocessing import Queue -from multiprocessing import Pool -import multiprocessing - -batch_size = 5000 -word_count = {} -num_tokenize = max(1, - multiprocessing.cpu_count() - 2) # parse + tokenize + save -max_queue_size = 8 -parse_queue = Queue(maxsize=max_queue_size + num_tokenize) -tokenize_queue = Queue(maxsize=max_queue_size + num_tokenize) - - -def create_dict(data): - """ - Create dictionary based on data, and saved in data_dir/dict.txt. - The first line is unk \t -1. - data: list, input data by batch. - """ - for seq in data: - try: - for w in seq.lower().split(): - if w not in word_count: - word_count[w] = 1 - else: - word_count[w] += 1 - except: - sys.stderr.write(seq + "\tERROR\n") - - -def parse(path): - """ - Open .gz file. - """ - sys.stderr.write(path) - g = gzip.open(path, 'r') - for l in g: - yield json.loads(l) - g.close() - - -def tokenize(sentences): - """ - Use tokenizer.perl to tokenize input sentences. - tokenizer.perl is tool of Moses. - sentences : a list of input sentences. - return: a list of processed text. - """ - dir = './mosesdecoder-master/scripts/tokenizer/tokenizer.perl' - if not os.path.exists(dir): - sys.exit( - "The ./mosesdecoder-master/scripts/tokenizer/tokenizer.perl does not exists." - ) - tokenizer_cmd = [dir, '-l', 'en', '-q', '-'] - assert isinstance(sentences, list) - text = "\n".join(sentences) - tokenizer = Popen(tokenizer_cmd, stdin=PIPE, stdout=PIPE) - tok_text, _ = tokenizer.communicate(text) - toks = tok_text.split('\n')[:-1] - return toks - - -def save_data(instance, data_dir, pre_fix, batch_num): - """ - save data by batch - """ - label = ['1' if pre_fix == 'pos' else '0' for i in range(len(instance))] - lines = ['%s\t%s' % (label[i], instance[i]) for i in range(len(label))] - file_name = os.path.join(data_dir, "%s_%s.txt" % (pre_fix, batch_num)) - file(file_name, 'w').write('\n'.join(lines) + '\n') - - -def tokenize_batch(id): - """ - tokenize data by batch - """ - while True: - num_batch, instance, pre_fix = parse_queue.get() - if num_batch == -1: ### parse_queue finished - tokenize_queue.put((-1, None, None)) - sys.stderr.write("Thread %s finish\n" % (id)) - break - tokenize_instance = tokenize(instance) - tokenize_queue.put((num_batch, tokenize_instance, pre_fix)) - sys.stderr.write('.') - - -def save_batch(data_dir, num_tokenize, data_dir_dict): - """ - save data by batch - build dict.txt - """ - token_count = 0 - while True: - num_batch, instance, pre_fix = tokenize_queue.get() - if num_batch == -1: - token_count += 1 - if token_count == num_tokenize: #### tokenize finished. - break - else: - continue - save_data(instance, data_dir, pre_fix, num_batch) - create_dict(instance) ## update dict - - sys.stderr.write("save file finish\n") - f = open(data_dir_dict, 'w') - f.write('%s\t%s\n' % ('unk', '-1')) - for k, v in sorted(word_count.items(), key=operator.itemgetter(1), \ - reverse=True): - f.write('%s\t%s\n' % (k, v)) - f.close() - sys.stderr.write("build dict finish\n") - - -def parse_batch(data, num_tokenize): - """ - parse data by batch - parse -> tokenize -> save - """ - raw_txt = parse(data) - neg, pos = [], [] - count = 0 - sys.stderr.write("extract raw data\n") - for l in raw_txt: - rating = l["overall"] - text = l["reviewText"].lower() # # convert words to lower case - if rating == 5.0 and text: - pos.append(text) - if rating < 3.0 and text: - neg.append(text) - if len(pos) == batch_size or len(neg) == batch_size: - if len(pos) == batch_size: - batch = pos - pre_fix = 'pos' - else: - batch = neg - pre_fix = 'neg' - - parse_queue.put((count, batch, pre_fix)) - count += 1 - if pre_fix == 'pos': - pos = [] - else: - neg = [] - - if len(pos) > 0: - parse_queue.put((count, pos, 'pos')) - count += 1 - if len(neg) > 0: - parse_queue.put((count, neg, 'neg')) - count += 1 - for i in range(num_tokenize): - parse_queue.put((-1, None, None)) #### for tokenize's input finished - sys.stderr.write("parsing finish\n") - - -def option_parser(): - parser = OptionParser(usage="usage: python preprcoess.py "\ - "-i data_path [options]") - parser.add_option( - "-i", "--data", action="store", dest="input", help="Input data path.") - parser.add_option( - "-s", - "--seed", - action="store", - dest="seed", - default=1024, - help="Set random seed.") - return parser.parse_args() - - -def main(): - reload(sys) - sys.setdefaultencoding('utf-8') - options, args = option_parser() - data = options.input - seed = options.seed - data_dir_dict = os.path.join(os.path.dirname(data), 'dict.txt') - data_dir = os.path.join(os.path.dirname(data), 'tmp') - pool = Pool(processes=num_tokenize + 2) - pool.apply_async(parse_batch, args=(data, num_tokenize)) - for i in range(num_tokenize): - pool.apply_async(tokenize_batch, args=(str(i), )) - pool.apply_async(save_batch, args=(data_dir, num_tokenize, data_dir_dict)) - pool.close() - pool.join() - - file(os.path.join(os.path.dirname(data), 'labels.list'), - 'w').write('neg\t0\npos\t1\n') - - -if __name__ == '__main__': - main() diff --git a/v1_api_demo/quick_start/dataprovider_bow.py b/v1_api_demo/quick_start/dataprovider_bow.py deleted file mode 100644 index 2745495586..0000000000 --- a/v1_api_demo/quick_start/dataprovider_bow.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer.PyDataProvider2 import * - -# id of the word not in dictionary -UNK_IDX = 0 - - -# initializer is called by the framework during initialization. -# It allows the user to describe the data types and setup the -# necessary data structure for later use. -# `settings` is an object. initializer need to properly fill settings.input_types. -# initializer can also store other data structures needed to be used at process(). -# In this example, dictionary is stored in settings. -# `dictionay` and `kwargs` are arguments passed from trainer_config.lr.py -def initializer(settings, dictionary, **kwargs): - # Put the word dictionary into settings - settings.word_dict = dictionary - - # setting.input_types specifies what the data types the data provider - # generates. - settings.input_types = { - # The first input is a sparse_binary_vector, - # which means each dimension of the vector is either 0 or 1. It is the - # bag-of-words (BOW) representation of the texts. - 'word': sparse_binary_vector(len(dictionary)), - # The second input is an integer. It represents the category id of the - # sample. 2 means there are two labels in the dataset. - # (1 for positive and 0 for negative) - 'label': integer_value(2) - } - - -# Delaring a data provider. It has an initializer 'data_initialzer'. -# It will cache the generated data of the first pass in memory, so that -# during later pass, no on-the-fly data generation will be needed. -# `setting` is the same object used by initializer() -# `file_name` is the name of a file listed train_list or test_list file given -# to define_py_data_sources2(). See trainer_config.lr.py. -@provider(init_hook=initializer, cache=CacheType.CACHE_PASS_IN_MEM) -def process(settings, file_name): - # Open the input data file. - with open(file_name, 'r') as f: - # Read each line. - for line in f: - # Each line contains the label and text of the comment, separated by \t. - label, comment = line.strip().split('\t') - - # Split the words into a list. - words = comment.split() - - # convert the words into a list of ids by looking them up in word_dict. - word_vector = [settings.word_dict.get(w, UNK_IDX) for w in words] - - # Return the features for the current comment. The first is a list - # of ids representing a 0-1 binary sparse vector of the text, - # the second is the integer id of the label. - yield {'word': word_vector, 'label': int(label)} - - -def predict_initializer(settings, dictionary, **kwargs): - settings.word_dict = dictionary - settings.input_types = {'word': sparse_binary_vector(len(dictionary))} - - -# Declaring a data provider for prediction. The difference with process -# is that label is not generated. -@provider(init_hook=predict_initializer, should_shuffle=False) -def process_predict(settings, file_name): - with open(file_name, 'r') as f: - for line in f: - comment = line.strip().split() - word_vector = [settings.word_dict.get(w, UNK_IDX) for w in comment] - yield {'word': word_vector} diff --git a/v1_api_demo/quick_start/dataprovider_emb.py b/v1_api_demo/quick_start/dataprovider_emb.py deleted file mode 100755 index ddfa3ce9b7..0000000000 --- a/v1_api_demo/quick_start/dataprovider_emb.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer.PyDataProvider2 import * - -UNK_IDX = 0 - - -def initializer(settings, dictionary, **kwargs): - settings.word_dict = dictionary - settings.input_types = { - # Define the type of the first input as sequence of integer. - # The value of the integers range from 0 to len(dictrionary)-1 - 'word': integer_value_sequence(len(dictionary)), - # Define the second input for label id - 'label': integer_value(2) - } - - -@provider(init_hook=initializer, cache=CacheType.CACHE_PASS_IN_MEM) -def process(settings, file_name): - with open(file_name, 'r') as f: - for line in f: - label, comment = line.strip().split('\t') - words = comment.split() - word_slot = [settings.word_dict.get(w, UNK_IDX) for w in words] - yield {'word': word_slot, 'label': int(label)} - - -def predict_initializer(settings, dictionary, **kwargs): - settings.word_dict = dictionary - settings.input_types = {'word': integer_value_sequence(len(dictionary))} - - -@provider(init_hook=predict_initializer, should_shuffle=False) -def process_predict(settings, file_name): - with open(file_name, 'r') as f: - for line in f: - comment = line.strip().split() - word_slot = [settings.word_dict.get(w, UNK_IDX) for w in comment] - yield {'word': word_slot} diff --git a/v1_api_demo/quick_start/predict.sh b/v1_api_demo/quick_start/predict.sh deleted file mode 100755 index e47c2dd01f..0000000000 --- a/v1_api_demo/quick_start/predict.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -cfg=trainer_config.lr.py -#cfg=trainer_config.emb.py -#cfg=trainer_config.cnn.py -#cfg=trainer_config.lstm.py -model="output/pass-00003" -paddle train \ - --config=$cfg \ - --use_gpu=false \ - --job=test \ - --init_model_path=$model \ - --config_args=is_predict=1 \ - --predict_output_dir=. \ -2>&1 | tee 'predict.log' -paddle usage -l 'predict.log' -e $? -n "quick_start_predict_${cfg}" >/dev/null 2>&1 - -mv rank-00000 result.txt diff --git a/v1_api_demo/quick_start/train.sh b/v1_api_demo/quick_start/train.sh deleted file mode 100755 index 01697fed48..0000000000 --- a/v1_api_demo/quick_start/train.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -cfg=trainer_config.lr.py -#cfg=trainer_config.emb.py -#cfg=trainer_config.cnn.py -#cfg=trainer_config.lstm.py -#cfg=trainer_config.bidi-lstm.py -#cfg=trainer_config.db-lstm.py -#cfg=trainer_config.resnet-lstm.py -paddle train \ - --config=$cfg \ - --save_dir=./output \ - --trainer_count=4 \ - --log_period=100 \ - --num_passes=15 \ - --use_gpu=false \ - --show_parameter_stats_period=100 \ - --test_all_data_in_one_period=1 \ - 2>&1 | tee 'train.log' -paddle usage -l "train.log" -e $? -n "quick_start_${cfg}" >/dev/null 2>&1 diff --git a/v1_api_demo/quick_start/trainer_config.bidi-lstm.py b/v1_api_demo/quick_start/trainer_config.bidi-lstm.py deleted file mode 100644 index 3deff4aa00..0000000000 --- a/v1_api_demo/quick_start/trainer_config.bidi-lstm.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. -# -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. -# edit-mode: -*- python -*- - -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer_config_helpers import * - -dict_file = "./data/dict.txt" -word_dict = dict() -with open(dict_file, 'r') as f: - for i, line in enumerate(f): - w = line.strip().split()[0] - word_dict[w] = i - -is_predict = get_config_arg('is_predict', bool, False) -trn = 'data/train.list' if not is_predict else None -tst = 'data/test.list' if not is_predict else 'data/pred.list' -process = 'process' if not is_predict else 'process_predict' -define_py_data_sources2( - train_list=trn, - test_list=tst, - module="dataprovider_emb", - obj=process, - args={"dictionary": word_dict}) - -batch_size = 128 if not is_predict else 1 -settings( - batch_size=batch_size, - learning_rate=2e-3, - learning_method=AdamOptimizer(), - regularization=L2Regularization(8e-4), - gradient_clipping_threshold=25) - -bias_attr = ParamAttr(initial_std=0., l2_rate=0.) -data = data_layer(name="word", size=len(word_dict)) -emb = embedding_layer(input=data, size=128) - -bi_lstm = bidirectional_lstm(input=emb, size=128) -dropout = dropout_layer(input=bi_lstm, dropout_rate=0.5) - -output = fc_layer( - input=dropout, size=2, bias_attr=bias_attr, act=SoftmaxActivation()) - -if is_predict: - maxid = maxid_layer(output) - outputs([maxid, output]) -else: - label = data_layer(name="label", size=2) - cls = classification_cost(input=output, label=label) - outputs(cls) diff --git a/v1_api_demo/quick_start/trainer_config.cnn.py b/v1_api_demo/quick_start/trainer_config.cnn.py deleted file mode 100644 index e09e41484d..0000000000 --- a/v1_api_demo/quick_start/trainer_config.cnn.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. -# -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. -# edit-mode: -*- python -*- - -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer_config_helpers import * - -dict_file = "./data/dict.txt" -word_dict = dict() -with open(dict_file, 'r') as f: - for i, line in enumerate(f): - w = line.strip().split()[0] - word_dict[w] = i - -is_predict = get_config_arg('is_predict', bool, False) -trn = 'data/train.list' if not is_predict else None -tst = 'data/test.list' if not is_predict else 'data/pred.list' -process = 'process' if not is_predict else 'process_predict' -define_py_data_sources2( - train_list=trn, - test_list=tst, - module="dataprovider_emb", - obj=process, - args={"dictionary": word_dict}) - -batch_size = 128 if not is_predict else 1 -settings( - batch_size=batch_size, - learning_rate=2e-3, - learning_method=AdamOptimizer(), - regularization=L2Regularization(8e-4), - gradient_clipping_threshold=25) - -data = data_layer(name="word", size=len(word_dict)) -embedding = embedding_layer(input=data, size=128) -conv = sequence_conv_pool(input=embedding, context_len=3, hidden_size=512) -output = fc_layer(input=conv, size=2, act=SoftmaxActivation()) -if is_predict: - maxid = maxid_layer(output) - outputs([maxid, output]) -else: - label = data_layer(name="label", size=2) - cls = classification_cost(input=output, label=label) - outputs(cls) diff --git a/v1_api_demo/quick_start/trainer_config.db-lstm.py b/v1_api_demo/quick_start/trainer_config.db-lstm.py deleted file mode 100644 index fba802b460..0000000000 --- a/v1_api_demo/quick_start/trainer_config.db-lstm.py +++ /dev/null @@ -1,74 +0,0 @@ -# edit-mode: -*- python -*- - -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer_config_helpers import * - -dict_file = "./data/dict.txt" -word_dict = dict() -with open(dict_file, 'r') as f: - for i, line in enumerate(f): - w = line.strip().split()[0] - word_dict[w] = i - -is_predict = get_config_arg('is_predict', bool, False) -trn = 'data/train.list' if not is_predict else None -tst = 'data/test.list' if not is_predict else 'data/pred.list' -process = 'process' if not is_predict else 'process_predict' -define_py_data_sources2( - train_list=trn, - test_list=tst, - module="dataprovider_emb", - obj=process, - args={"dictionary": word_dict}) - -batch_size = 128 if not is_predict else 1 -settings( - batch_size=batch_size, - learning_rate=2e-3, - learning_method=AdamOptimizer(), - regularization=L2Regularization(8e-4), - gradient_clipping_threshold=25) - -bias_attr = ParamAttr(initial_std=0., l2_rate=0.) - -data = data_layer(name="word", size=len(word_dict)) -emb = embedding_layer(input=data, size=128) - -hidden_0 = mixed_layer(size=128, input=[full_matrix_projection(input=emb)]) -lstm_0 = lstmemory(input=hidden_0, layer_attr=ExtraAttr(drop_rate=0.1)) - -input_layers = [hidden_0, lstm_0] - -for i in range(1, 8): - fc = fc_layer(input=input_layers, size=128) - lstm = lstmemory( - input=fc, - layer_attr=ExtraAttr(drop_rate=0.1), - reverse=(i % 2) == 1, ) - input_layers = [fc, lstm] - -lstm_last = pooling_layer(input=lstm, pooling_type=MaxPooling()) - -output = fc_layer( - input=lstm_last, size=2, bias_attr=bias_attr, act=SoftmaxActivation()) - -if is_predict: - maxid = maxid_layer(output) - outputs([maxid, output]) -else: - label = data_layer(name="label", size=2) - cls = classification_cost(input=output, label=label) - outputs(cls) diff --git a/v1_api_demo/quick_start/trainer_config.emb.py b/v1_api_demo/quick_start/trainer_config.emb.py deleted file mode 100644 index f69f98ff7f..0000000000 --- a/v1_api_demo/quick_start/trainer_config.emb.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. -# -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. -# edit-mode: -*- python -*- - -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer_config_helpers import * - -dict_file = "./data/dict.txt" -word_dict = dict() -with open(dict_file, 'r') as f: - for i, line in enumerate(f): - w = line.strip().split()[0] - word_dict[w] = i - -is_predict = get_config_arg('is_predict', bool, False) -trn = 'data/train.list' if not is_predict else None -tst = 'data/test.list' if not is_predict else 'data/pred.list' -process = 'process' if not is_predict else 'process_predict' -define_py_data_sources2( - train_list=trn, - test_list=tst, - module="dataprovider_emb", - obj=process, - args={"dictionary": word_dict}) - -batch_size = 128 if not is_predict else 1 -settings( - batch_size=batch_size, learning_rate=2e-3, learning_method=AdamOptimizer()) - -data = data_layer(name="word", size=len(word_dict)) -embedding = embedding_layer(input=data, size=128) -avg = pooling_layer(input=embedding, pooling_type=AvgPooling()) -output = fc_layer(input=avg, size=2, act=SoftmaxActivation()) -if is_predict: - maxid = maxid_layer(output) - outputs([maxid, output]) -else: - label = data_layer(name="label", size=2) - cls = classification_cost(input=output, label=label) - outputs(cls) diff --git a/v1_api_demo/quick_start/trainer_config.lr.py b/v1_api_demo/quick_start/trainer_config.lr.py deleted file mode 100644 index b7b694940e..0000000000 --- a/v1_api_demo/quick_start/trainer_config.lr.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. -# -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. -# edit-mode: -*- python -*- - -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer_config_helpers import * - -dict_file = get_config_arg('dict_file', str, "./data/dict.txt") -word_dict = dict() -with open(dict_file, 'r') as f: - for i, line in enumerate(f): - w = line.strip().split()[0] - word_dict[w] = i - -is_predict = get_config_arg('is_predict', bool, False) -trn = 'data/train.list' if not is_predict else None -tst = 'data/test.list' if not is_predict else 'data/pred.list' -process = 'process' if not is_predict else 'process_predict' - -# define the data sources for the model. -# We need to use different process for training and prediction. -# For training, the input data includes both word IDs and labels. -# For prediction, the input data only includs word Ids. -define_py_data_sources2( - train_list=trn, - test_list=tst, - module="dataprovider_bow", - obj=process, - args={"dictionary": word_dict}) - -batch_size = 128 if not is_predict else 1 -settings( - batch_size=batch_size, - learning_rate=2e-3, - learning_method=AdamOptimizer(), - regularization=L2Regularization(8e-4), - gradient_clipping_threshold=25) - -# Define the data for text features. The size of the data layer is the number -# of words in the dictionary. -data = data_layer(name="word", size=len(word_dict)) - -# Define a fully connected layer with logistic activation. -# (also called softmax activation). -output = fc_layer(input=data, size=2, act=SoftmaxActivation()) - -if not is_predict: - # For training, we need label and cost - - # define the category id for each example. - # The size of the data layer is the number of labels. - label = data_layer(name="label", size=2) - - # Define cross-entropy classification loss and error. - cls = classification_cost(input=output, label=label) - outputs(cls) -else: - # For prediction, no label is needed. We need to output - # We need to output classification result, and class probabilities. - maxid = maxid_layer(output) - outputs([maxid, output]) diff --git a/v1_api_demo/quick_start/trainer_config.lstm.py b/v1_api_demo/quick_start/trainer_config.lstm.py deleted file mode 100644 index 8967d78807..0000000000 --- a/v1_api_demo/quick_start/trainer_config.lstm.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. -# -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. -# edit-mode: -*- python -*- - -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer_config_helpers import * - -dict_file = "./data/dict.txt" -word_dict = dict() -with open(dict_file, 'r') as f: - for i, line in enumerate(f): - w = line.strip().split()[0] - word_dict[w] = i - -is_predict = get_config_arg('is_predict', bool, False) -trn = 'data/train.list' if not is_predict else None -tst = 'data/test.list' if not is_predict else 'data/pred.list' -process = 'process' if not is_predict else 'process_predict' -define_py_data_sources2( - train_list=trn, - test_list=tst, - module="dataprovider_emb", - obj=process, - args={"dictionary": word_dict}) - -batch_size = 128 if not is_predict else 1 -settings( - batch_size=batch_size, - learning_rate=2e-3, - learning_method=AdamOptimizer(), - regularization=L2Regularization(8e-4), - gradient_clipping_threshold=25) - -data = data_layer(name="word", size=len(word_dict)) -emb = embedding_layer(input=data, size=128) -lstm = simple_lstm( - input=emb, size=128, lstm_cell_attr=ExtraAttr(drop_rate=0.25)) -lstm_max = pooling_layer(input=lstm, pooling_type=MaxPooling()) -output = fc_layer(input=lstm_max, size=2, act=SoftmaxActivation()) -if is_predict: - maxid = maxid_layer(output) - outputs([maxid, output]) -else: - label = data_layer(name="label", size=2) - cls = classification_cost(input=output, label=label) - outputs(cls) diff --git a/v1_api_demo/quick_start/trainer_config.resnet-lstm.py b/v1_api_demo/quick_start/trainer_config.resnet-lstm.py deleted file mode 100644 index 32d0596f25..0000000000 --- a/v1_api_demo/quick_start/trainer_config.resnet-lstm.py +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. -# -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. -# edit-mode: -*- python -*- - -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -This configuration is a demonstration of how to implement the stacked LSTM -with residual connections, i.e. an LSTM layer takes the sum of the hidden states -and inputs of the previous LSTM layer instead of only the hidden states. -This architecture is from: -Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V. Le, Mohammad Norouzi, -Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, -Jeff Klingner, Apurva Shah, Melvin Johnson, Xiaobing Liu, Lukasz Kaiser, -Stephan Gouws, Yoshikiyo Kato, Taku Kudo, Hideto Kazawa, Keith Stevens, -George Kurian, Nishant Patil, Wei Wang, Cliff Young, Jason Smith, Jason Riesa, -Alex Rudnick, Oriol Vinyals, Greg Corrado, Macduff Hughes, Jeffrey Dean. 2016. -Google's Neural Machine Translation System: Bridging the Gap between Human and -Machine Translation. In arXiv https://arxiv.org/pdf/1609.08144v2.pdf -Different from the architecture described in the paper, we use a stack single -direction LSTM layers as the first layer instead of bi-directional LSTM. Also, -since this is a demo code, to reduce computation time, we stacked 4 layers -instead of 8 layers. -""" - -from paddle.trainer_config_helpers import * - -dict_file = "./data/dict.txt" -word_dict = dict() -with open(dict_file, 'r') as f: - for i, line in enumerate(f): - w = line.strip().split()[0] - word_dict[w] = i - -is_predict = get_config_arg('is_predict', bool, False) -trn = 'data/train.list' if not is_predict else None -tst = 'data/test.list' if not is_predict else 'data/pred.list' -process = 'process' if not is_predict else 'process_predict' -define_py_data_sources2( - train_list=trn, - test_list=tst, - module="dataprovider_emb", - obj=process, - args={"dictionary": word_dict}) - -batch_size = 128 if not is_predict else 1 -settings( - batch_size=batch_size, - learning_rate=2e-3, - learning_method=AdamOptimizer(), - regularization=L2Regularization(8e-4), - gradient_clipping_threshold=25) - -bias_attr = ParamAttr(initial_std=0., l2_rate=0.) - -data = data_layer(name="word", size=len(word_dict)) -emb = embedding_layer(input=data, size=128) -lstm = simple_lstm(input=emb, size=128, lstm_cell_attr=ExtraAttr(drop_rate=0.1)) - -previous_input, previous_hidden_state = emb, lstm - -for i in range(3): - # The input to the current layer is the sum of the hidden state - # and input of the previous layer. - current_input = addto_layer(input=[previous_input, previous_hidden_state]) - hidden_state = simple_lstm( - input=current_input, size=128, lstm_cell_attr=ExtraAttr(drop_rate=0.1)) - previous_input, previous_hidden_state = current_input, hidden_state - -lstm = previous_hidden_state - -lstm_last = pooling_layer(input=lstm, pooling_type=MaxPooling()) -output = fc_layer( - input=lstm_last, size=2, bias_attr=bias_attr, act=SoftmaxActivation()) - -if is_predict: - maxid = maxid_layer(output) - outputs([maxid, output]) -else: - label = data_layer(name="label", size=2) - cls = classification_cost(input=output, label=label) - outputs(cls) diff --git a/v1_api_demo/sequence_tagging/data/get_data.sh b/v1_api_demo/sequence_tagging/data/get_data.sh deleted file mode 100755 index 0cdb394035..0000000000 --- a/v1_api_demo/sequence_tagging/data/get_data.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -DIR="$( cd "$(dirname "$0")" ; pwd -P )" -cd $DIR - -wget http://www.cnts.ua.ac.be/conll2000/chunking/train.txt.gz -wget http://www.cnts.ua.ac.be/conll2000/chunking/test.txt.gz diff --git a/v1_api_demo/sequence_tagging/data/test.list b/v1_api_demo/sequence_tagging/data/test.list deleted file mode 100644 index 073c0a0c90..0000000000 --- a/v1_api_demo/sequence_tagging/data/test.list +++ /dev/null @@ -1 +0,0 @@ -data/test.txt.gz diff --git a/v1_api_demo/sequence_tagging/data/train.list b/v1_api_demo/sequence_tagging/data/train.list deleted file mode 100644 index 43c24d5f64..0000000000 --- a/v1_api_demo/sequence_tagging/data/train.list +++ /dev/null @@ -1 +0,0 @@ -data/train.txt.gz diff --git a/v1_api_demo/sequence_tagging/dataprovider.py b/v1_api_demo/sequence_tagging/dataprovider.py deleted file mode 100644 index bb4b4465bc..0000000000 --- a/v1_api_demo/sequence_tagging/dataprovider.py +++ /dev/null @@ -1,260 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer.PyDataProvider2 import * -import gzip -import logging - -logging.basicConfig( - format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s', ) -logger = logging.getLogger('paddle') -logger.setLevel(logging.INFO) - -OOV_POLICY_IGNORE = 0 -OOV_POLICY_USE = 1 -OOV_POLICY_ERROR = 2 - -num_original_columns = 3 - -# Feature combination patterns. -# [[-1,0], [0,0]] means previous token at column 0 and current token at -# column 0 are combined as one feature. -patterns = [ - [[-2, 0]], - [[-1, 0]], - [[0, 0]], - [[1, 0]], - [[2, 0]], - [[-1, 0], [0, 0]], - [[0, 0], [1, 0]], - [[-2, 1]], - [[-1, 1]], - [[0, 1]], - [[1, 1]], - [[2, 1]], - [[-2, 1], [-1, 1]], - [[-1, 1], [0, 1]], - [[0, 1], [1, 1]], - [[1, 1], [2, 1]], - [[-2, 1], [-1, 1], [0, 1]], - [[-1, 1], [0, 1], [1, 1]], - [[0, 1], [1, 1], [2, 1]], -] - -dict_label = { - 'B-ADJP': 0, - 'I-ADJP': 1, - 'B-ADVP': 2, - 'I-ADVP': 3, - 'B-CONJP': 4, - 'I-CONJP': 5, - 'B-INTJ': 6, - 'I-INTJ': 7, - 'B-LST': 8, - 'I-LST': 9, - 'B-NP': 10, - 'I-NP': 11, - 'B-PP': 12, - 'I-PP': 13, - 'B-PRT': 14, - 'I-PRT': 15, - 'B-SBAR': 16, - 'I-SBAR': 17, - 'B-UCP': 18, - 'I-UCP': 19, - 'B-VP': 20, - 'I-VP': 21, - 'O': 22 -} - - -def make_features(sequence): - length = len(sequence) - num_features = len(sequence[0]) - - def get_features(pos): - if pos < 0: - return ['#B%s' % -pos] * num_features - if pos >= length: - return ['#E%s' % (pos - length + 1)] * num_features - return sequence[pos] - - for i in xrange(length): - for pattern in patterns: - fname = '/'.join([get_features(i + pos)[f] for pos, f in pattern]) - sequence[i].append(fname) - - -''' -Source file format: -Each line is for one timestep. The features are separated by space. -An empty line indicates end of a sequence. - -cutoff: a list of numbers. If count of a feature is smaller than this, - it will be ignored. -if oov_policy[i] is OOV_POLICY_USE, id 0 is reserved for OOV features of -i-th column. - -return a list of dict for each column -''' - - -def create_dictionaries(filename, cutoff, oov_policy): - def add_to_dict(sequence, dicts): - num_features = len(dicts) - for features in sequence: - l = len(features) - assert l == num_features, "Wrong number of features " + line - for i in xrange(l): - if features[i] in dicts[i]: - dicts[i][features[i]] += 1 - else: - dicts[i][features[i]] = 1 - - num_features = len(cutoff) - dicts = [] - for i in xrange(num_features): - dicts.append(dict()) - - f = gzip.open(filename, 'rb') - - sequence = [] - - for line in f: - line = line.strip() - if not line: - make_features(sequence) - add_to_dict(sequence, dicts) - sequence = [] - continue - features = line.split(' ') - sequence.append(features) - - for i in xrange(num_features): - dct = dicts[i] - n = 1 if oov_policy[i] == OOV_POLICY_USE else 0 - todo = [] - for k, v in dct.iteritems(): - if v < cutoff[i]: - todo.append(k) - else: - dct[k] = n - n += 1 - - if oov_policy[i] == OOV_POLICY_USE: - # placeholder so that len(dct) will be the number of features - # including OOV - dct['#OOV#'] = 0 - - logger.info('column %d dict size=%d, ignored %d' % (i, n, len(todo))) - for k in todo: - del dct[k] - - f.close() - return dicts - - -def initializer(settings, **xargs): - cutoff = [3, 1, 0] - cutoff += [3] * len(patterns) - oov_policy = [OOV_POLICY_IGNORE, OOV_POLICY_ERROR, OOV_POLICY_ERROR] - oov_policy += [OOV_POLICY_IGNORE] * len(patterns) - dicts = create_dictionaries('data/train.txt.gz', cutoff, oov_policy) - dicts[2] = dict_label - settings.dicts = dicts - settings.oov_policy = oov_policy - input_types = [] - num_features = len(dicts) - for i in xrange(num_original_columns): - input_types.append(integer_sequence(len(dicts[i]))) - logger.info("slot %s size=%s" % (i, len(dicts[i]))) - if patterns: - dim = 0 - for i in xrange(num_original_columns, num_features): - dim += len(dicts[i]) - input_types.append(sparse_binary_vector_sequence(dim)) - logger.info("feature size=%s" % dim) - settings.input_types = input_types - - -''' -if oov_policy[i] == OOV_POLICY_USE, features in i-th column which are not -existed in dicts[i] will be assigned to id 0. -if oov_policy[i] == OOV_POLICY_ERROR, all features in i-th column MUST exist -in dicts[i]. -''' - - -@provider(init_hook=initializer, cache=CacheType.CACHE_PASS_IN_MEM) -def process(settings, filename): - input_file = filename - dicts = settings.dicts - oov_policy = settings.oov_policy - - def gen_sample(sequence): - num_features = len(dicts) - sample = [list() for i in xrange(num_original_columns)] - if patterns: - sample.append([]) - for features in sequence: - assert len(features) == num_features, \ - "Wrong number of features: " + line - for i in xrange(num_original_columns): - id = dicts[i].get(features[i], -1) - if id != -1: - sample[i].append(id) - elif oov_policy[i] == OOV_POLICY_IGNORE: - sample[i].append(0xffffffff) - elif oov_policy[i] == OOV_POLICY_ERROR: - logger.fatal("Unknown token: %s" % features[i]) - else: - sample[i].append(0) - - if patterns: - dim = 0 - vec = [] - for i in xrange(num_original_columns, num_features): - id = dicts[i].get(features[i], -1) - if id != -1: - vec.append(dim + id) - elif oov_policy[i] == OOV_POLICY_IGNORE: - pass - elif oov_policy[i] == OOV_POLICY_ERROR: - logger.fatal("Unknown token: %s" % features[i]) - else: - vec.ids.append(dim + 0) - - dim += len(dicts[i]) - sample[-1].append(vec) - return sample - - num_features = len(dicts) - f = gzip.open(input_file, 'rb') - - num_sequences = 0 - sequence = [] - for line in f: - line = line.strip() - if not line: - make_features(sequence) - yield gen_sample(sequence) - sequence = [] - num_sequences += 1 - continue - features = line.split(' ') - sequence.append(features) - - f.close() - - logger.info("num_sequences=%s" % num_sequences) diff --git a/v1_api_demo/sequence_tagging/linear_crf.py b/v1_api_demo/sequence_tagging/linear_crf.py deleted file mode 100644 index ea012ba1ae..0000000000 --- a/v1_api_demo/sequence_tagging/linear_crf.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer_config_helpers import * - -import math - -define_py_data_sources2( - train_list="data/train.list", - test_list="data/test.list", - module="dataprovider", - obj="process") - -batch_size = 1 -settings( - learning_method=MomentumOptimizer(), - batch_size=batch_size, - regularization=L2Regularization(batch_size * 1e-4), - model_average=ModelAverage(0.5), - learning_rate=1e-1, - learning_rate_decay_a=1e-5, - learning_rate_decay_b=0.25, ) - -num_label_types = 23 - - -def get_simd_size(size): - return int(math.ceil(float(size) / 8)) * 8 - - -# Currently, in order to use sparse_update=True, -# the size has to be aligned. -num_label_types = get_simd_size(num_label_types) - -features = data_layer(name="features", size=76328) -word = data_layer(name="word", size=6778) -pos = data_layer(name="pos", size=44) -chunk = data_layer(name="chunk", size=num_label_types) - -crf_input = fc_layer( - input=features, - size=num_label_types, - act=LinearActivation(), - bias_attr=False, - param_attr=ParamAttr( - initial_std=0, sparse_update=True)) - -crf = crf_layer( - input=crf_input, - label=chunk, - param_attr=ParamAttr( - name="crfw", initial_std=0), ) - -crf_decoding = crf_decoding_layer( - size=num_label_types, - input=crf_input, - label=chunk, - param_attr=ParamAttr(name="crfw"), ) - -sum_evaluator( - name="error", - input=crf_decoding, ) - -chunk_evaluator( - name="chunk_f1", - input=crf_decoding, - label=chunk, - chunk_scheme="IOB", - num_chunk_types=11, ) - -inputs(word, pos, chunk, features) -outputs(crf) diff --git a/v1_api_demo/sequence_tagging/readme.md b/v1_api_demo/sequence_tagging/readme.md deleted file mode 100644 index 2e17fffb83..0000000000 --- a/v1_api_demo/sequence_tagging/readme.md +++ /dev/null @@ -1,45 +0,0 @@ -# Sequence Tagging - -This demo is a sequence model for assigning tags to each token in a sentence. The task is described at CONLL2000 Text Chunking task. - -## Download data -```bash -cd demo/sequence_tagging -./data/get_data.sh -``` - -## Train model -```bash -cd demo/sequence_tagging -./train.sh -``` - -## Model description - -We provide two models. One is a linear CRF model (linear_crf.py) with is equivalent to the one at leon.bottou.org/projects/sgd. The second one is a stacked bidirectional RNN and CRF model (rnn_crf.py). -

- - - - - - - - - - - - - - - - - - - - - - -
Model nameNumber of parametersF1 score
linear_crf 1.8M 0.937
rnn_crf 960K 0.941
-
-
diff --git a/v1_api_demo/sequence_tagging/rnn_crf.py b/v1_api_demo/sequence_tagging/rnn_crf.py deleted file mode 100644 index 937a34df10..0000000000 --- a/v1_api_demo/sequence_tagging/rnn_crf.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer_config_helpers import * - -import math - -define_py_data_sources2( - train_list="data/train.list", - test_list="data/test.list", - module="dataprovider", - obj="process") - -batch_size = 16 -settings( - learning_method=MomentumOptimizer(), - batch_size=batch_size, - regularization=L2Regularization(batch_size * 1e-5), - model_average=ModelAverage(0.5), - learning_rate=2e-3, - learning_rate_decay_a=5e-7, - learning_rate_decay_b=0.5, ) - -word_dim = 128 -hidden_dim = 128 -with_rnn = True - -initial_std = 1 / math.sqrt(hidden_dim) -param_attr = ParamAttr(initial_std=initial_std) -cpu_layer_attr = ExtraLayerAttribute(device=-1) - -default_device(0) - -num_label_types = 23 - -features = data_layer(name="features", size=76328) -word = data_layer(name="word", size=6778) -pos = data_layer(name="pos", size=44) -chunk = data_layer( - name="chunk", size=num_label_types, layer_attr=cpu_layer_attr) - -emb = embedding_layer( - input=word, size=word_dim, param_attr=ParamAttr(initial_std=0)) - -hidden1 = mixed_layer( - size=hidden_dim, - act=STanhActivation(), - bias_attr=True, - input=[ - full_matrix_projection(emb), table_projection( - pos, param_attr=param_attr) - ]) - -if with_rnn: - rnn1 = recurrent_layer( - act=ReluActivation(), - bias_attr=True, - input=hidden1, - param_attr=ParamAttr(initial_std=0), ) - -hidden2 = mixed_layer( - size=hidden_dim, - act=STanhActivation(), - bias_attr=True, - input=[full_matrix_projection(hidden1)] + - ([full_matrix_projection( - rnn1, param_attr=ParamAttr(initial_std=0))] if with_rnn else []), ) - -if with_rnn: - rnn2 = recurrent_layer( - reverse=True, - act=ReluActivation(), - bias_attr=True, - input=hidden2, - param_attr=ParamAttr(initial_std=0), ) - -crf_input = mixed_layer( - size=num_label_types, - bias_attr=False, - input=[full_matrix_projection(hidden2), ] + - ([full_matrix_projection( - rnn2, param_attr=ParamAttr(initial_std=0))] if with_rnn else []), ) - -crf = crf_layer( - input=crf_input, - label=chunk, - param_attr=ParamAttr( - name="crfw", initial_std=0), - layer_attr=cpu_layer_attr, ) - -crf_decoding = crf_decoding_layer( - size=num_label_types, - input=crf_input, - label=chunk, - param_attr=ParamAttr(name="crfw"), - layer_attr=cpu_layer_attr, ) - -sum_evaluator( - name="error", - input=crf_decoding, ) - -chunk_evaluator( - name="chunk_f1", - input=crf_decoding, - label=chunk, - chunk_scheme="IOB", - num_chunk_types=11, ) - -inputs(word, pos, chunk, features) -outputs(crf) diff --git a/v1_api_demo/sequence_tagging/train.sh b/v1_api_demo/sequence_tagging/train.sh deleted file mode 100755 index 37e196c842..0000000000 --- a/v1_api_demo/sequence_tagging/train.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -paddle train \ - --config rnn_crf.py \ - --parallel_nn=1 \ - --use_gpu=1 \ - --dot_period=10 \ - --log_period=1000 \ - --test_period=0 \ - --num_passes=10 \ -2>&1 | tee 'train.log' -paddle usage -l 'train.log' -e $? -n "sequence_tagging_train" >/dev/null 2>&1 diff --git a/v1_api_demo/sequence_tagging/train_linear.sh b/v1_api_demo/sequence_tagging/train_linear.sh deleted file mode 100755 index ad6e2d8ee7..0000000000 --- a/v1_api_demo/sequence_tagging/train_linear.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -paddle train \ - --config linear_crf.py \ - --use_gpu=0 \ - --dot_period=100 \ - --log_period=10000 \ - --test_period=0 \ - --num_passes=10 -2>&1 | tee 'train_linear.log' -paddle usage -l 'train_linear.log' -e $? -n "sequence_tagging_train_linear" >/dev/null 2>&1 diff --git a/v1_api_demo/traffic_prediction/README b/v1_api_demo/traffic_prediction/README deleted file mode 100644 index 4c95188583..0000000000 --- a/v1_api_demo/traffic_prediction/README +++ /dev/null @@ -1,7 +0,0 @@ -run by: -cd ./data -sh get_data.sh -cd .. -sh train.sh -sh predict.sh - diff --git a/v1_api_demo/traffic_prediction/data/get_data.sh b/v1_api_demo/traffic_prediction/data/get_data.sh deleted file mode 100755 index f2fa548d47..0000000000 --- a/v1_api_demo/traffic_prediction/data/get_data.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors, Inc. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e -set -x - -DIR="$( cd "$(dirname "$0")" ; pwd -P )" -cd $DIR - -#download the dataset -echo "Downloading traffic data..." -wget http://paddlepaddle.cdn.bcebos.com/demo/traffic/traffic_data.tar.gz - -#extract package -echo "Unzipping..." -tar -zxvf traffic_data.tar.gz - -echo "data/speeds.csv" > train.list -echo "data/speeds.csv" > test.list -echo "data/speeds.csv" > pred.list - -echo "Done." diff --git a/v1_api_demo/traffic_prediction/dataprovider.py b/v1_api_demo/traffic_prediction/dataprovider.py deleted file mode 100644 index c7883b6950..0000000000 --- a/v1_api_demo/traffic_prediction/dataprovider.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors, Inc. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer.PyDataProvider2 import * -import sys -import numpy as np -TERM_NUM = 24 -FORECASTING_NUM = 24 -LABEL_VALUE_NUM = 4 - - -def initHook(settings, file_list, **kwargs): - """ - Init hook is invoked before process data. It will set obj.slots and store data meta. - - :param settings: global object. It will passed to process routine. - :type obj: object - :param file_list: the meta file object, which passed from trainer_config.py,but unused in this function. - :param kwargs: unused other arguments. - """ - del kwargs #unused - - settings.pool_size = sys.maxint - #Use a time seires of the past as feature. - #Dense_vector's expression form is [float,float,...,float] - settings.input_types = [dense_vector(TERM_NUM)] - #There are next FORECASTING_NUM fragments you need predict. - #Every predicted condition at time point has four states. - for i in range(FORECASTING_NUM): - settings.input_types.append(integer_value(LABEL_VALUE_NUM)) - - -@provider( - init_hook=initHook, cache=CacheType.CACHE_PASS_IN_MEM, should_shuffle=True) -def process(settings, file_name): - with open(file_name) as f: - #abandon fields name - f.next() - for row_num, line in enumerate(f): - speeds = map(int, line.rstrip('\r\n').split(",")[1:]) - # Get the max index. - end_time = len(speeds) - # Scanning and generating samples - for i in range(TERM_NUM, end_time - FORECASTING_NUM): - # For dense slot - pre_spd = map(float, speeds[i - TERM_NUM:i]) - - # Integer value need predicting, values start from 0, so every one minus 1. - fol_spd = [j - 1 for j in speeds[i:i + FORECASTING_NUM]] - - # Predicting label is missing, abandon the sample. - if -1 in fol_spd: - continue - yield [pre_spd] + fol_spd - - -def predict_initHook(settings, file_list, **kwargs): - settings.pool_size = sys.maxint - settings.input_types = [dense_vector(TERM_NUM)] - - -@provider(init_hook=predict_initHook, should_shuffle=False) -def process_predict(settings, file_name): - with open(file_name) as f: - #abandon fields name - f.next() - for row_num, line in enumerate(f): - speeds = map(int, line.rstrip('\r\n').split(",")) - end_time = len(speeds) - pre_spd = map(float, speeds[end_time - TERM_NUM:end_time]) - yield pre_spd diff --git a/v1_api_demo/traffic_prediction/gen_result.py b/v1_api_demo/traffic_prediction/gen_result.py deleted file mode 100644 index 3da70b3031..0000000000 --- a/v1_api_demo/traffic_prediction/gen_result.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors, Inc. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -res = [] -with open('./rank-00000') as f: - for line in f: - pred = map(int, line.strip('\r\n;').split(";")) - #raw prediction range from 0 to 3 - res.append([i + 1 for i in pred]) - -file_name = open('./data/pred.list').read().strip('\r\n') - -FORECASTING_NUM = 24 -header = [ - 'id', - '201604200805', - '201604200810', - '201604200815', - '201604200820', - '201604200825', - '201604200830', - '201604200835', - '201604200840', - '201604200845', - '201604200850', - '201604200855', - '201604200900', - '201604200905', - '201604200910', - '201604200915', - '201604200920', - '201604200925', - '201604200930', - '201604200935', - '201604200940', - '201604200945', - '201604200950', - '201604200955', - '201604201000', -] -################### -## To CSV format ## -################### -with open(file_name) as f: - f.next() - print ','.join(header) - for row_num, line in enumerate(f): - fields = line.rstrip('\r\n').split(',') - linkid = fields[0] - print linkid + ',' + ','.join(map(str, res[row_num])) diff --git a/v1_api_demo/traffic_prediction/predict.sh b/v1_api_demo/traffic_prediction/predict.sh deleted file mode 100755 index 2dbd5e8805..0000000000 --- a/v1_api_demo/traffic_prediction/predict.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors, Inc. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -cfg=trainer_config.py -# pass choice -model="output/pass-00000" -paddle train \ - --config=$cfg \ - --use_gpu=false \ - --job=test \ - --init_model_path=$model \ - --config_args=is_predict=1 \ - --predict_output_dir=. - -python gen_result.py > result.csv - -rm -rf rank-00000 diff --git a/v1_api_demo/traffic_prediction/train.sh b/v1_api_demo/traffic_prediction/train.sh deleted file mode 100755 index 48dfc5604f..0000000000 --- a/v1_api_demo/traffic_prediction/train.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 PaddlePaddle Authors, Inc. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -cfg=trainer_config.py -paddle train \ - --config=$cfg \ - --save_dir=./output \ - --trainer_count=4 \ - --log_period=1000 \ - --dot_period=10 \ - --num_passes=10 \ - --use_gpu=false \ - --show_parameter_stats_period=3000 \ - 2>&1 | tee 'train.log' diff --git a/v1_api_demo/traffic_prediction/trainer_config.py b/v1_api_demo/traffic_prediction/trainer_config.py deleted file mode 100755 index 52d678624a..0000000000 --- a/v1_api_demo/traffic_prediction/trainer_config.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors, Inc. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from paddle.trainer_config_helpers import * - -################################### DATA Configuration ############################################# -is_predict = get_config_arg('is_predict', bool, False) -trn = './data/train.list' if not is_predict else None -tst = './data/test.list' if not is_predict else './data/pred.list' -process = 'process' if not is_predict else 'process_predict' -define_py_data_sources2( - train_list=trn, test_list=tst, module="dataprovider", obj=process) -################################### Parameter Configuaration ####################################### -TERM_NUM = 24 -FORECASTING_NUM = 24 -emb_size = 16 -batch_size = 128 if not is_predict else 1 -settings( - batch_size=batch_size, - learning_rate=1e-3, - learning_method=RMSPropOptimizer()) -################################### Algorithm Configuration ######################################## - -output_label = [] - -link_encode = data_layer(name='link_encode', size=TERM_NUM) -for i in xrange(FORECASTING_NUM): - # Each task share same weight. - link_param = ParamAttr( - name='_link_vec.w', initial_max=1.0, initial_min=-1.0) - link_vec = fc_layer(input=link_encode, size=emb_size, param_attr=link_param) - score = fc_layer(input=link_vec, size=4, act=SoftmaxActivation()) - if is_predict: - maxid = maxid_layer(score) - output_label.append(maxid) - else: - # Multi-task training. - label = data_layer(name='label_%dmin' % ((i + 1) * 5), size=4) - cls = classification_cost( - input=score, name="cost_%dmin" % ((i + 1) * 5), label=label) - output_label.append(cls) -outputs(output_label) diff --git a/v1_api_demo/vae/README.md b/v1_api_demo/vae/README.md deleted file mode 100644 index e55d483b02..0000000000 --- a/v1_api_demo/vae/README.md +++ /dev/null @@ -1,13 +0,0 @@ -#Variational Autoencoder (VAE) - -This demo implements VAE training described in the original paper (https://arxiv.org/abs/1312.6114). - - -In order to run the model, first download the MNIST dataset by running the shell script in ./data. - -Then you can run the command below. The flag --useGpu specifies whether to use gpu for training (0 is cpu, 1 is gpu). - -$python vae_train.py [--use_gpu 1] - -The generated images will be stored in ./samples/ -The corresponding models will be stored in ./params/ diff --git a/v1_api_demo/vae/data/get_mnist_data.sh b/v1_api_demo/vae/data/get_mnist_data.sh deleted file mode 100755 index a77c81bf5a..0000000000 --- a/v1_api_demo/vae/data/get_mnist_data.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env sh -# This script downloads the mnist data and unzips it. -set -e -DIR="$( cd "$(dirname "$0")" ; pwd -P )" -rm -rf "$DIR/mnist_data" -mkdir "$DIR/mnist_data" -cd "$DIR/mnist_data" - -echo "Downloading..." - -for fname in train-images-idx3-ubyte train-labels-idx1-ubyte t10k-images-idx3-ubyte t10k-labels-idx1-ubyte -do - if [ ! -e $fname ]; then - wget --no-check-certificate http://yann.lecun.com/exdb/mnist/${fname}.gz - gunzip ${fname}.gz - fi -done diff --git a/v1_api_demo/vae/dataloader.py b/v1_api_demo/vae/dataloader.py deleted file mode 100644 index e9ff95d44f..0000000000 --- a/v1_api_demo/vae/dataloader.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np - - -class MNISTloader(): - def __init__(self, - data_path="./data/mnist_data/", - batch_size=60, - process='train'): - self.batch_size = batch_size - self.data_path = data_path - self._pointer = 0 - self.image_batches = np.array([]) - self.process = process - - def _extract_images(self, filename, n): - f = open(filename, 'rb') - f.read(16) - data = np.fromfile(f, 'ubyte', count=n * 28 * 28).reshape((n, 28 * 28)) - #Mapping data into [-1, 1] - data = data / 255. * 2. - 1 - data_batches = np.split(data, 60000 / self.batch_size, 0) - - f.close() - - return data_batches - - @property - def pointer(self): - return self._pointer - - def load_data(self): - TRAIN_IMAGES = '%s/train-images-idx3-ubyte' % self.data_path - TEST_IMAGES = '%s/t10k-images-idx3-ubyte' % self.data_path - - if self.process == 'train': - self.image_batches = self._extract_images(TRAIN_IMAGES, 60000) - else: - self.image_batches = self._extract_images(TEST_IMAGES, 10000) - - def next_batch(self): - batch = self.image_batches[self._pointer] - self._pointer = (self._pointer + 1) % (60000 / self.batch_size) - return np.array(batch) - - def reset_pointer(self): - self._pointer = 0 diff --git a/v1_api_demo/vae/vae_conf.py b/v1_api_demo/vae/vae_conf.py deleted file mode 100644 index 301dd23793..0000000000 --- a/v1_api_demo/vae/vae_conf.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.trainer_config_helpers import * -import numpy as np - -is_generating = get_config_arg("is_generating", bool, False) - -settings(batch_size=32, learning_rate=1e-3, learning_method=AdamOptimizer()) - -X_dim = 28 * 28 -h_dim = 128 -z_dim = 100 - - -def reparameterization(mu, logvar): - eps = ParamAttr(initial_mean=0., initial_std=1) - with mixed_layer() as sigma: - sigma += dotmul_projection(layer_math.exp(logvar) * 0.5, param_attr=eps) - return mu + sigma - - -def q_func(X): - """ - xavier initialization - """ - param_attr = ParamAttr( - name='share.w', initial_mean=0., initial_std=1. / np.sqrt(X_dim / 2.)) - mu_param = ParamAttr( - name='mu.w', initial_mean=0., initial_std=1. / np.sqrt(h_dim / 2.)) - logvar_param = ParamAttr( - name='logvar.w', initial_mean=0., initial_std=1. / np.sqrt(h_dim / 2.)) - - bias_attr = ParamAttr(name='share.bias', initial_mean=0., initial_std=0.) - mu_bias = ParamAttr(name='mu.bias', initial_mean=0., initial_std=0.) - logvar_bias = ParamAttr(name='logvar.bias', initial_mean=0., initial_std=0.) - - share_layer = fc_layer( - X, - size=h_dim, - param_attr=param_attr, - bias_attr=bias_attr, - act=ReluActivation()) - - return (fc_layer( - share_layer, - size=z_dim, - param_attr=mu_param, - bias_attr=mu_bias, - act=LinearActivation()), fc_layer( - share_layer, - size=z_dim, - param_attr=logvar_param, - bias_attr=logvar_bias, - act=LinearActivation())) - - -def generator(z): - - hidden_param = ParamAttr( - name='hidden.w', initial_mean=0., initial_std=1. / np.sqrt(z_dim / 2.)) - hidden_bias = ParamAttr(name='hidden.bias', initial_mean=0., initial_std=0.) - prob_param = ParamAttr( - name='prob.w', initial_mean=0., initial_std=1. / np.sqrt(h_dim / 2.)) - prob_bias = ParamAttr(name='prob.bias', initial_mean=0., initial_std=0.) - - hidden_layer = fc_layer( - z, - size=h_dim, - act=ReluActivation(), - param_attr=hidden_param, - bias_attr=hidden_bias) - prob = fc_layer( - hidden_layer, - size=X_dim, - act=SigmoidActivation(), - param_attr=prob_param, - bias_attr=prob_bias) - - return prob - - -def reconstruct_error(prob, X): - cost = multi_binary_label_cross_entropy(input=prob, label=X) - return cost - - -def KL_loss(mu, logvar): - with mixed_layer() as mu_square: - mu_square += dotmul_operator(mu, mu, scale=1.) - - cost = 0.5 * sum_cost(layer_math.exp(logvar) + mu_square - 1. - logvar) - - return cost - - -if not is_generating: - x_batch = data_layer(name='x_batch', size=X_dim) - mu, logvar = q_func(x_batch) - z_samples = reparameterization(mu, logvar) - prob = generator(z_samples) - outputs(reconstruct_error(prob, x_batch) + KL_loss(mu, logvar)) -else: - z_samples = data_layer(name='noise', size=z_dim) - outputs(generator(z_samples)) diff --git a/v1_api_demo/vae/vae_train.py b/v1_api_demo/vae/vae_train.py deleted file mode 100644 index 1babb011c7..0000000000 --- a/v1_api_demo/vae/vae_train.py +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import random -import numpy as np -import cPickle -import sys, os -from PIL import Image - -from paddle.trainer.config_parser import parse_config -from paddle.trainer.config_parser import logger -import py_paddle.swig_paddle as api -import dataloader -import matplotlib.pyplot as plt - - -def plot_samples(samples): - fig = plt.figure(figsize=(4, 4)) - gs = gridspec.GridSpec(4, 4) - gs.update(wspace=0.05, hspace=0.05) - for i, sample in enumerate(samples): - plt.subplot(gs[i]) - plt.axis('off') - plt.imshow(sample.reshape(28, 28), cmap='Greys_r') - - return fig - - -def CHECK_EQ(a, b): - assert a == b, "a=%s, b=%s" % (a, b) - - -def get_fake_samples(generator_machine, batch_size, noise): - gen_inputs = api.Arguments.createArguments(1) - gen_inputs.setSlotValue(0, api.Matrix.createDenseFromNumpy(noise)) - gen_outputs = api.Arguments.createArguments(0) - generator_machine.forward(gen_inputs, gen_outputs, api.PASS_TEST) - fake_samples = gen_outputs.getSlotValue(0).copyToNumpyMat() - return fake_samples - - -def copy_shared_parameters(src, dst): - ''' - copy the parameters from src to dst - :param src: the source of the parameters - :type src: GradientMachine - :param dst: the destination of the parameters - :type dst: GradientMachine - ''' - src_params = [src.getParameter(i) for i in xrange(src.getParameterSize())] - src_params = dict([(p.getName(), p) for p in src_params]) - - for i in xrange(dst.getParameterSize()): - dst_param = dst.getParameter(i) - src_param = src_params.get(dst_param.getName(), None) - if src_param is None: - continue - src_value = src_param.getBuf(api.PARAMETER_VALUE) - dst_value = dst_param.getBuf(api.PARAMETER_VALUE) - CHECK_EQ(len(src_value), len(dst_value)) - dst_value.copyFrom(src_value) - dst_param.setValueUpdated() - - -def find(iterable, cond): - for item in iterable: - if cond(item): - return item - return None - - -def get_layer_size(model_conf, layer_name): - layer_conf = find(model_conf.layers, lambda x: x.name == layer_name) - assert layer_conf is not None, "Cannot find '%s' layer" % layer_name - return layer_conf.size - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument( - "--use_gpu", default="1", help="1 means use gpu for training") - parser.add_argument("--gpu_id", default="0", help="the gpu_id parameter") - args = parser.parse_args() - use_gpu = args.use_gpu - assert use_gpu in ["0", "1"] - - if not os.path.exists("./samples/"): - os.makedirs("./samples/") - - if not os.path.exists("./params/"): - os.makedirs("./params/") - - api.initPaddle('--use_gpu=' + use_gpu, '--dot_period=10', - '--log_period=1000', '--gpu_id=' + args.gpu_id, - '--save_dir=' + "./params/") - - conf = "vae_conf.py" - - trainer_conf = parse_config(conf, "is_generating=False") - gener_conf = parse_config(conf, "is_generating=True") - - batch_size = trainer_conf.opt_config.batch_size - - noise_dim = get_layer_size(gener_conf.model_config, "noise") - - mnist = dataloader.MNISTloader(batch_size=batch_size) - mnist.load_data() - - training_machine = api.GradientMachine.createFromConfigProto( - trainer_conf.model_config) - - generator_machine = api.GradientMachine.createFromConfigProto( - gener_conf.model_config) - - trainer = api.Trainer.create(trainer_conf, training_machine) - - trainer.startTrain() - - for train_pass in xrange(100): - trainer.startTrainPass() - mnist.reset_pointer() - i = 0 - it = 0 - while mnist.pointer != 0 or i == 0: - X = mnist.next_batch().astype('float32') - - inputs = api.Arguments.createArguments(1) - inputs.setSlotValue(0, api.Matrix.createDenseFromNumpy(X)) - - trainer.trainOneDataBatch(batch_size, inputs) - - if it % 1000 == 0: - - outputs = api.Arguments.createArguments(0) - training_machine.forward(inputs, outputs, api.PASS_TEST) - loss = np.mean(outputs.getSlotValue(0).copyToNumpyMat()) - print "\niter: {}".format(str(it).zfill(3)) - print "VAE loss: {}".format(str(loss).zfill(3)) - - #Sync parameters between networks (GradientMachine) at the beginning - copy_shared_parameters(training_machine, generator_machine) - - z_samples = np.random.randn(batch_size, - noise_dim).astype('float32') - samples = get_fake_samples(generator_machine, batch_size, - z_samples) - - #Generating the first 16 images for a picture. - figure = plot_samples(samples[:16]) - plt.savefig( - "./samples/{}_{}.png".format( - str(train_pass).zfill(3), str(i).zfill(3)), - bbox_inches='tight') - plt.close(figure) - i += 1 - it += 1 - - trainer.finishTrainPass() - trainer.finishTrain() - - -if __name__ == '__main__': - main()