commit
144854d2e8
@ -1,5 +0,0 @@
|
|||||||
The examples in v1_api_demo are using v1_api currently, and will be upgraded to v2_api later.
|
|
||||||
Thus, v1_api_demo is a temporary directory. We decide not to maintain it and will delete it in future.
|
|
||||||
|
|
||||||
Please go to [PaddlePaddle/book](https://github.com/PaddlePaddle/book) and
|
|
||||||
[PaddlePaddle/models](https://github.com/PaddlePaddle/models) to learn PaddlePaddle.
|
|
@ -1,11 +0,0 @@
|
|||||||
output/
|
|
||||||
uniform_params/
|
|
||||||
cifar_params/
|
|
||||||
mnist_params/
|
|
||||||
*.png
|
|
||||||
.pydevproject
|
|
||||||
.project
|
|
||||||
*.log
|
|
||||||
*.pyc
|
|
||||||
data/mnist_data/
|
|
||||||
data/cifar-10-batches-py/
|
|
@ -1,13 +0,0 @@
|
|||||||
# Generative Adversarial Networks (GAN)
|
|
||||||
|
|
||||||
This demo implements GAN training described in the original GAN paper (https://arxiv.org/abs/1406.2661) and DCGAN (https://arxiv.org/abs/1511.06434).
|
|
||||||
|
|
||||||
The general training procedures are implemented in gan_trainer.py. The neural network configurations are specified in gan_conf.py (for synthetic data) and gan_conf_image.py (for image data).
|
|
||||||
|
|
||||||
In order to run the model, first download the corresponding data by running the shell script in ./data.
|
|
||||||
Then you can run the command below. The flag -d specifies the training data (cifar, mnist or uniform) and flag --useGpu specifies whether to use gpu for training (0 is cpu, 1 is gpu).
|
|
||||||
|
|
||||||
$python gan_trainer.py -d cifar --use_gpu 1
|
|
||||||
|
|
||||||
The generated images will be stored in ./cifar_samples/
|
|
||||||
The corresponding models will be stored in ./cifar_params/
|
|
@ -1,18 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
set -e
|
|
||||||
wget https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
|
|
||||||
tar zxf cifar-10-python.tar.gz
|
|
||||||
rm cifar-10-python.tar.gz
|
|
@ -1,17 +0,0 @@
|
|||||||
#!/usr/bin/env sh
|
|
||||||
# This script downloads the mnist data and unzips it.
|
|
||||||
set -e
|
|
||||||
DIR="$( cd "$(dirname "$0")" ; pwd -P )"
|
|
||||||
rm -rf "$DIR/mnist_data"
|
|
||||||
mkdir "$DIR/mnist_data"
|
|
||||||
cd "$DIR/mnist_data"
|
|
||||||
|
|
||||||
echo "Downloading..."
|
|
||||||
|
|
||||||
for fname in train-images-idx3-ubyte train-labels-idx1-ubyte t10k-images-idx3-ubyte t10k-labels-idx1-ubyte
|
|
||||||
do
|
|
||||||
if [ ! -e $fname ]; then
|
|
||||||
wget --no-check-certificate http://yann.lecun.com/exdb/mnist/${fname}.gz
|
|
||||||
gunzip ${fname}.gz
|
|
||||||
fi
|
|
||||||
done
|
|
@ -1,151 +0,0 @@
|
|||||||
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
from paddle.trainer_config_helpers import *
|
|
||||||
|
|
||||||
mode = get_config_arg("mode", str, "generator")
|
|
||||||
assert mode in set([
|
|
||||||
"generator", "discriminator", "generator_training", "discriminator_training"
|
|
||||||
])
|
|
||||||
|
|
||||||
is_generator_training = mode == "generator_training"
|
|
||||||
is_discriminator_training = mode == "discriminator_training"
|
|
||||||
is_generator = mode == "generator"
|
|
||||||
is_discriminator = mode == "discriminator"
|
|
||||||
|
|
||||||
# The network structure below follows the ref https://arxiv.org/abs/1406.2661
|
|
||||||
# Here we used two hidden layers and batch_norm
|
|
||||||
|
|
||||||
print('mode=%s' % mode)
|
|
||||||
# the dim of the noise (z) as the input of the generator network
|
|
||||||
noise_dim = 10
|
|
||||||
# the dim of the hidden layer
|
|
||||||
hidden_dim = 10
|
|
||||||
# the dim of the generated sample
|
|
||||||
sample_dim = 2
|
|
||||||
|
|
||||||
settings(
|
|
||||||
batch_size=128,
|
|
||||||
learning_rate=1e-4,
|
|
||||||
learning_method=AdamOptimizer(beta1=0.5))
|
|
||||||
|
|
||||||
|
|
||||||
def discriminator(sample):
|
|
||||||
"""
|
|
||||||
discriminator ouputs the probablity of a sample is from generator
|
|
||||||
or real data.
|
|
||||||
The output has two dimenstional: dimension 0 is the probablity
|
|
||||||
of the sample is from generator and dimension 1 is the probabblity
|
|
||||||
of the sample is from real data.
|
|
||||||
"""
|
|
||||||
param_attr = ParamAttr(is_static=is_generator_training)
|
|
||||||
bias_attr = ParamAttr(
|
|
||||||
is_static=is_generator_training, initial_mean=1.0, initial_std=0)
|
|
||||||
|
|
||||||
hidden = fc_layer(
|
|
||||||
input=sample,
|
|
||||||
name="dis_hidden",
|
|
||||||
size=hidden_dim,
|
|
||||||
bias_attr=bias_attr,
|
|
||||||
param_attr=param_attr,
|
|
||||||
act=ReluActivation())
|
|
||||||
|
|
||||||
hidden2 = fc_layer(
|
|
||||||
input=hidden,
|
|
||||||
name="dis_hidden2",
|
|
||||||
size=hidden_dim,
|
|
||||||
bias_attr=bias_attr,
|
|
||||||
param_attr=param_attr,
|
|
||||||
act=LinearActivation())
|
|
||||||
|
|
||||||
hidden_bn = batch_norm_layer(
|
|
||||||
hidden2,
|
|
||||||
act=ReluActivation(),
|
|
||||||
name="dis_hidden_bn",
|
|
||||||
bias_attr=bias_attr,
|
|
||||||
param_attr=ParamAttr(
|
|
||||||
is_static=is_generator_training, initial_mean=1.0,
|
|
||||||
initial_std=0.02),
|
|
||||||
use_global_stats=False)
|
|
||||||
|
|
||||||
return fc_layer(
|
|
||||||
input=hidden_bn,
|
|
||||||
name="dis_prob",
|
|
||||||
size=2,
|
|
||||||
bias_attr=bias_attr,
|
|
||||||
param_attr=param_attr,
|
|
||||||
act=SoftmaxActivation())
|
|
||||||
|
|
||||||
|
|
||||||
def generator(noise):
|
|
||||||
"""
|
|
||||||
generator generates a sample given noise
|
|
||||||
"""
|
|
||||||
param_attr = ParamAttr(is_static=is_discriminator_training)
|
|
||||||
bias_attr = ParamAttr(
|
|
||||||
is_static=is_discriminator_training, initial_mean=1.0, initial_std=0)
|
|
||||||
|
|
||||||
hidden = fc_layer(
|
|
||||||
input=noise,
|
|
||||||
name="gen_layer_hidden",
|
|
||||||
size=hidden_dim,
|
|
||||||
bias_attr=bias_attr,
|
|
||||||
param_attr=param_attr,
|
|
||||||
act=ReluActivation())
|
|
||||||
|
|
||||||
hidden2 = fc_layer(
|
|
||||||
input=hidden,
|
|
||||||
name="gen_hidden2",
|
|
||||||
size=hidden_dim,
|
|
||||||
bias_attr=bias_attr,
|
|
||||||
param_attr=param_attr,
|
|
||||||
act=LinearActivation())
|
|
||||||
|
|
||||||
hidden_bn = batch_norm_layer(
|
|
||||||
hidden2,
|
|
||||||
act=ReluActivation(),
|
|
||||||
name="gen_layer_hidden_bn",
|
|
||||||
bias_attr=bias_attr,
|
|
||||||
param_attr=ParamAttr(
|
|
||||||
is_static=is_discriminator_training,
|
|
||||||
initial_mean=1.0,
|
|
||||||
initial_std=0.02),
|
|
||||||
use_global_stats=False)
|
|
||||||
|
|
||||||
return fc_layer(
|
|
||||||
input=hidden_bn,
|
|
||||||
name="gen_layer1",
|
|
||||||
size=sample_dim,
|
|
||||||
bias_attr=bias_attr,
|
|
||||||
param_attr=param_attr,
|
|
||||||
act=LinearActivation())
|
|
||||||
|
|
||||||
|
|
||||||
if is_generator_training:
|
|
||||||
noise = data_layer(name="noise", size=noise_dim)
|
|
||||||
sample = generator(noise)
|
|
||||||
|
|
||||||
if is_discriminator_training:
|
|
||||||
sample = data_layer(name="sample", size=sample_dim)
|
|
||||||
|
|
||||||
if is_generator_training or is_discriminator_training:
|
|
||||||
label = data_layer(name="label", size=1)
|
|
||||||
prob = discriminator(sample)
|
|
||||||
cost = cross_entropy(input=prob, label=label)
|
|
||||||
classification_error_evaluator(
|
|
||||||
input=prob, label=label, name=mode + '_error')
|
|
||||||
outputs(cost)
|
|
||||||
|
|
||||||
if is_generator:
|
|
||||||
noise = data_layer(name="noise", size=noise_dim)
|
|
||||||
outputs(generator(noise))
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,10 +0,0 @@
|
|||||||
data/raw_data
|
|
||||||
data/*.list
|
|
||||||
mnist_vgg_model
|
|
||||||
plot.png
|
|
||||||
train.log
|
|
||||||
*pyc
|
|
||||||
.ipynb_checkpoints
|
|
||||||
params.pkl
|
|
||||||
params.tar
|
|
||||||
params.tar.gz
|
|
@ -1,209 +0,0 @@
|
|||||||
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
|
|
||||||
#
|
|
||||||
#Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
#you may not use this file except in compliance with the License.
|
|
||||||
#You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
#Unless required by applicable law or agreed to in writing, software
|
|
||||||
#distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
#See the License for the specific language governing permissions and
|
|
||||||
#limitations under the License.
|
|
||||||
"""
|
|
||||||
A very basic example for how to use current Raw SWIG API to train mnist network.
|
|
||||||
|
|
||||||
Current implementation uses Raw SWIG, which means the API call is directly \
|
|
||||||
passed to C++ side of Paddle.
|
|
||||||
|
|
||||||
The user api could be simpler and carefully designed.
|
|
||||||
"""
|
|
||||||
import random
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
import paddle.v2 as paddle_v2
|
|
||||||
import py_paddle.swig_paddle as api
|
|
||||||
from paddle.trainer_config_helpers import *
|
|
||||||
from py_paddle import DataProviderConverter
|
|
||||||
|
|
||||||
from mnist_util import read_from_mnist
|
|
||||||
|
|
||||||
|
|
||||||
def init_parameter(network):
|
|
||||||
assert isinstance(network, api.GradientMachine)
|
|
||||||
for each_param in network.getParameters():
|
|
||||||
assert isinstance(each_param, api.Parameter)
|
|
||||||
array_size = len(each_param)
|
|
||||||
array = np.random.uniform(-1.0, 1.0, array_size).astype('float32')
|
|
||||||
each_param.getBuf(api.PARAMETER_VALUE).copyFromNumpyArray(array)
|
|
||||||
|
|
||||||
|
|
||||||
def generator_to_batch(generator, batch_size):
|
|
||||||
ret_val = list()
|
|
||||||
for each_item in generator:
|
|
||||||
ret_val.append(each_item)
|
|
||||||
if len(ret_val) == batch_size:
|
|
||||||
yield ret_val
|
|
||||||
ret_val = list()
|
|
||||||
if len(ret_val) != 0:
|
|
||||||
yield ret_val
|
|
||||||
|
|
||||||
|
|
||||||
class BatchPool(object):
|
|
||||||
def __init__(self, generator, batch_size):
|
|
||||||
self.data = list(generator)
|
|
||||||
self.batch_size = batch_size
|
|
||||||
|
|
||||||
def __call__(self):
|
|
||||||
random.shuffle(self.data)
|
|
||||||
for offset in xrange(0, len(self.data), self.batch_size):
|
|
||||||
limit = min(offset + self.batch_size, len(self.data))
|
|
||||||
yield self.data[offset:limit]
|
|
||||||
|
|
||||||
|
|
||||||
def input_order_converter(generator):
|
|
||||||
for each_item in generator:
|
|
||||||
yield each_item['pixel'], each_item['label']
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
api.initPaddle("-use_gpu=false", "-trainer_count=4") # use 4 cpu cores
|
|
||||||
|
|
||||||
optimizer = paddle_v2.optimizer.Adam(
|
|
||||||
learning_rate=1e-4,
|
|
||||||
batch_size=1000,
|
|
||||||
model_average=ModelAverage(average_window=0.5),
|
|
||||||
regularization=L2Regularization(rate=0.5))
|
|
||||||
|
|
||||||
# Create Local Updater. Local means not run in cluster.
|
|
||||||
# For a cluster training, here we can change to createRemoteUpdater
|
|
||||||
# in future.
|
|
||||||
updater = optimizer.create_local_updater()
|
|
||||||
assert isinstance(updater, api.ParameterUpdater)
|
|
||||||
|
|
||||||
# define network
|
|
||||||
images = paddle_v2.layer.data(
|
|
||||||
name='pixel', type=paddle_v2.data_type.dense_vector(784))
|
|
||||||
label = paddle_v2.layer.data(
|
|
||||||
name='label', type=paddle_v2.data_type.integer_value(10))
|
|
||||||
hidden1 = paddle_v2.layer.fc(input=images, size=200)
|
|
||||||
hidden2 = paddle_v2.layer.fc(input=hidden1, size=200)
|
|
||||||
inference = paddle_v2.layer.fc(input=hidden2,
|
|
||||||
size=10,
|
|
||||||
act=paddle_v2.activation.Softmax())
|
|
||||||
cost = paddle_v2.layer.classification_cost(input=inference, label=label)
|
|
||||||
|
|
||||||
# Create Simple Gradient Machine.
|
|
||||||
model_config = paddle_v2.layer.parse_network(cost)
|
|
||||||
m = api.GradientMachine.createFromConfigProto(model_config,
|
|
||||||
api.CREATE_MODE_NORMAL,
|
|
||||||
optimizer.enable_types())
|
|
||||||
|
|
||||||
# This type check is not useful. Only enable type hint in IDE.
|
|
||||||
# Such as PyCharm
|
|
||||||
assert isinstance(m, api.GradientMachine)
|
|
||||||
|
|
||||||
# Initialize Parameter by numpy.
|
|
||||||
init_parameter(network=m)
|
|
||||||
|
|
||||||
# Initialize ParameterUpdater.
|
|
||||||
updater.init(m)
|
|
||||||
|
|
||||||
# DataProvider Converter is a utility convert Python Object to Paddle C++
|
|
||||||
# Input. The input format is as same as Paddle's DataProvider.
|
|
||||||
converter = DataProviderConverter(input_types=[images.type, label.type])
|
|
||||||
|
|
||||||
train_file = './data/raw_data/train'
|
|
||||||
test_file = './data/raw_data/t10k'
|
|
||||||
|
|
||||||
# start gradient machine.
|
|
||||||
# the gradient machine must be started before invoke forward/backward.
|
|
||||||
# not just for training, but also for inference.
|
|
||||||
m.start()
|
|
||||||
|
|
||||||
# evaluator can print error rate, etc. It is a C++ class.
|
|
||||||
batch_evaluator = m.makeEvaluator()
|
|
||||||
test_evaluator = m.makeEvaluator()
|
|
||||||
|
|
||||||
# Get Train Data.
|
|
||||||
# TrainData will stored in a data pool. Currently implementation is not care
|
|
||||||
# about memory, speed. Just a very naive implementation.
|
|
||||||
train_data_generator = input_order_converter(read_from_mnist(train_file))
|
|
||||||
train_data = BatchPool(train_data_generator, 512)
|
|
||||||
|
|
||||||
# outArgs is Neural Network forward result. Here is not useful, just passed
|
|
||||||
# to gradient_machine.forward
|
|
||||||
outArgs = api.Arguments.createArguments(0)
|
|
||||||
|
|
||||||
for pass_id in xrange(2): # we train 2 passes.
|
|
||||||
updater.startPass()
|
|
||||||
|
|
||||||
for batch_id, data_batch in enumerate(train_data()):
|
|
||||||
# data_batch is input images.
|
|
||||||
# here, for online learning, we could get data_batch from network.
|
|
||||||
|
|
||||||
# Start update one batch.
|
|
||||||
pass_type = updater.startBatch(len(data_batch))
|
|
||||||
|
|
||||||
# Start BatchEvaluator.
|
|
||||||
# batch_evaluator can be used between start/finish.
|
|
||||||
batch_evaluator.start()
|
|
||||||
|
|
||||||
# forwardBackward is a shortcut for forward and backward.
|
|
||||||
# It is sometimes faster than invoke forward/backward separately,
|
|
||||||
# because in GradientMachine, it may be async.
|
|
||||||
m.forwardBackward(converter(data_batch), outArgs, pass_type)
|
|
||||||
|
|
||||||
for each_param in m.getParameters():
|
|
||||||
updater.update(each_param)
|
|
||||||
|
|
||||||
# Get cost. We use numpy to calculate total cost for this batch.
|
|
||||||
cost_vec = outArgs.getSlotValue(0)
|
|
||||||
cost_vec = cost_vec.copyToNumpyMat()
|
|
||||||
cost = cost_vec.sum() / len(data_batch)
|
|
||||||
|
|
||||||
# Make evaluator works.
|
|
||||||
m.eval(batch_evaluator)
|
|
||||||
|
|
||||||
# Print logs.
|
|
||||||
print 'Pass id', pass_id, 'Batch id', batch_id, 'with cost=', \
|
|
||||||
cost, batch_evaluator
|
|
||||||
|
|
||||||
batch_evaluator.finish()
|
|
||||||
# Finish batch.
|
|
||||||
# * will clear gradient.
|
|
||||||
# * ensure all values should be updated.
|
|
||||||
updater.finishBatch(cost)
|
|
||||||
|
|
||||||
# testing stage. use test data set to test current network.
|
|
||||||
updater.apply()
|
|
||||||
test_evaluator.start()
|
|
||||||
test_data_generator = input_order_converter(read_from_mnist(test_file))
|
|
||||||
for data_batch in generator_to_batch(test_data_generator, 512):
|
|
||||||
# in testing stage, only forward is needed.
|
|
||||||
m.forward(converter(data_batch), outArgs, api.PASS_TEST)
|
|
||||||
m.eval(test_evaluator)
|
|
||||||
|
|
||||||
# print error rate for test data set
|
|
||||||
print 'Pass', pass_id, ' test evaluator: ', test_evaluator
|
|
||||||
test_evaluator.finish()
|
|
||||||
updater.restore()
|
|
||||||
|
|
||||||
updater.catchUpWith()
|
|
||||||
params = m.getParameters()
|
|
||||||
for each_param in params:
|
|
||||||
assert isinstance(each_param, api.Parameter)
|
|
||||||
value = each_param.getBuf(api.PARAMETER_VALUE)
|
|
||||||
value = value.copyToNumpyArray()
|
|
||||||
|
|
||||||
# Here, we could save parameter to every where you want
|
|
||||||
print each_param.getName(), value
|
|
||||||
|
|
||||||
updater.finishPass()
|
|
||||||
|
|
||||||
m.finish()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
@ -1,21 +0,0 @@
|
|||||||
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
o = open("./" + "train.list", "w")
|
|
||||||
o.write("./data/raw_data/train" + "\n")
|
|
||||||
o.close()
|
|
||||||
|
|
||||||
o = open("./" + "test.list", "w")
|
|
||||||
o.write("./data/raw_data/t10k" + "\n")
|
|
||||||
o.close()
|
|
@ -1,21 +0,0 @@
|
|||||||
#!/usr/bin/env sh
|
|
||||||
# This scripts downloads the mnist data and unzips it.
|
|
||||||
set -e
|
|
||||||
DIR="$( cd "$(dirname "$0")" ; pwd -P )"
|
|
||||||
rm -rf "$DIR/raw_data"
|
|
||||||
mkdir "$DIR/raw_data"
|
|
||||||
cd "$DIR/raw_data"
|
|
||||||
|
|
||||||
echo "Downloading..."
|
|
||||||
|
|
||||||
for fname in train-images-idx3-ubyte train-labels-idx1-ubyte t10k-images-idx3-ubyte t10k-labels-idx1-ubyte
|
|
||||||
do
|
|
||||||
if [ ! -e $fname ]; then
|
|
||||||
wget --no-check-certificate http://yann.lecun.com/exdb/mnist/${fname}.gz
|
|
||||||
gunzip ${fname}.gz
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
cd $DIR
|
|
||||||
rm -f *.list
|
|
||||||
python generate_list.py
|
|
@ -1,79 +0,0 @@
|
|||||||
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from paddle.trainer_config_helpers import *
|
|
||||||
|
|
||||||
is_predict = get_config_arg("is_predict", bool, False)
|
|
||||||
|
|
||||||
####################Data Configuration ##################
|
|
||||||
|
|
||||||
if not is_predict:
|
|
||||||
data_dir = './data/'
|
|
||||||
define_py_data_sources2(
|
|
||||||
train_list=data_dir + 'train.list',
|
|
||||||
test_list=data_dir + 'test.list',
|
|
||||||
module='mnist_provider',
|
|
||||||
obj='process')
|
|
||||||
|
|
||||||
######################Algorithm Configuration #############
|
|
||||||
settings(batch_size=50, learning_rate=0.001, learning_method=AdamOptimizer())
|
|
||||||
|
|
||||||
#######################Network Configuration #############
|
|
||||||
|
|
||||||
data_size = 1 * 28 * 28
|
|
||||||
label_size = 10
|
|
||||||
img = data_layer(name='pixel', size=data_size)
|
|
||||||
|
|
||||||
|
|
||||||
# light cnn
|
|
||||||
# A shallower cnn model: [CNN, BN, ReLU, Max-Pooling] x4 + FC x1
|
|
||||||
# Easier to train for mnist dataset and quite efficient
|
|
||||||
# Final performance is close to deeper ones on tasks such as digital and character classification
|
|
||||||
def light_cnn(input_image, num_channels, num_classes):
|
|
||||||
def __light__(ipt,
|
|
||||||
num_filter=128,
|
|
||||||
times=1,
|
|
||||||
conv_filter_size=3,
|
|
||||||
dropouts=0,
|
|
||||||
num_channels_=None):
|
|
||||||
return img_conv_group(
|
|
||||||
input=ipt,
|
|
||||||
num_channels=num_channels_,
|
|
||||||
pool_size=2,
|
|
||||||
pool_stride=2,
|
|
||||||
conv_padding=0,
|
|
||||||
conv_num_filter=[num_filter] * times,
|
|
||||||
conv_filter_size=conv_filter_size,
|
|
||||||
conv_act=ReluActivation(),
|
|
||||||
conv_with_batchnorm=True,
|
|
||||||
conv_batchnorm_drop_rate=dropouts,
|
|
||||||
pool_type=MaxPooling())
|
|
||||||
|
|
||||||
tmp = __light__(input_image, num_filter=128, num_channels_=num_channels)
|
|
||||||
tmp = __light__(tmp, num_filter=128)
|
|
||||||
tmp = __light__(tmp, num_filter=128)
|
|
||||||
tmp = __light__(tmp, num_filter=128, conv_filter_size=1)
|
|
||||||
|
|
||||||
tmp = fc_layer(input=tmp, size=num_classes, act=SoftmaxActivation())
|
|
||||||
return tmp
|
|
||||||
|
|
||||||
|
|
||||||
predict = light_cnn(input_image=img, num_channels=1, num_classes=label_size)
|
|
||||||
|
|
||||||
if not is_predict:
|
|
||||||
lbl = data_layer(name="label", size=label_size)
|
|
||||||
inputs(img, lbl)
|
|
||||||
outputs(classification_cost(input=predict, label=lbl))
|
|
||||||
else:
|
|
||||||
outputs(predict)
|
|
@ -1,25 +0,0 @@
|
|||||||
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
|
|
||||||
#
|
|
||||||
#Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
#you may not use this file except in compliance with the License.
|
|
||||||
#You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
#Unless required by applicable law or agreed to in writing, software
|
|
||||||
#distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
#See the License for the specific language governing permissions and
|
|
||||||
#limitations under the License.
|
|
||||||
from paddle.trainer.PyDataProvider2 import *
|
|
||||||
from mnist_util import read_from_mnist
|
|
||||||
|
|
||||||
|
|
||||||
# Define a py data provider
|
|
||||||
@provider(
|
|
||||||
input_types={'pixel': dense_vector(28 * 28),
|
|
||||||
'label': integer_value(10)},
|
|
||||||
cache=CacheType.CACHE_PASS_IN_MEM)
|
|
||||||
def process(settings, filename): # settings is not used currently.
|
|
||||||
for each in read_from_mnist(filename):
|
|
||||||
yield each
|
|
@ -1,30 +0,0 @@
|
|||||||
import numpy
|
|
||||||
|
|
||||||
__all__ = ['read_from_mnist']
|
|
||||||
|
|
||||||
|
|
||||||
def read_from_mnist(filename):
|
|
||||||
imgf = filename + "-images-idx3-ubyte"
|
|
||||||
labelf = filename + "-labels-idx1-ubyte"
|
|
||||||
f = open(imgf, "rb")
|
|
||||||
l = open(labelf, "rb")
|
|
||||||
|
|
||||||
f.read(16)
|
|
||||||
l.read(8)
|
|
||||||
|
|
||||||
# Define number of samples for train/test
|
|
||||||
if "train" in filename:
|
|
||||||
n = 60000
|
|
||||||
else:
|
|
||||||
n = 10000
|
|
||||||
|
|
||||||
images = numpy.fromfile(
|
|
||||||
f, 'ubyte', count=n * 28 * 28).reshape((n, 28 * 28)).astype('float32')
|
|
||||||
images = images / 255.0 * 2.0 - 1.0
|
|
||||||
labels = numpy.fromfile(l, 'ubyte', count=n).astype("int")
|
|
||||||
|
|
||||||
for i in xrange(n):
|
|
||||||
yield {"pixel": images[i, :], 'label': labels[i]}
|
|
||||||
|
|
||||||
f.close()
|
|
||||||
l.close()
|
|
@ -1,32 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
set -e
|
|
||||||
config=vgg_16_mnist.py
|
|
||||||
output=./mnist_vgg_model
|
|
||||||
log=train.log
|
|
||||||
|
|
||||||
paddle train \
|
|
||||||
--config=$config \
|
|
||||||
--dot_period=10 \
|
|
||||||
--log_period=100 \
|
|
||||||
--test_all_data_in_one_period=1 \
|
|
||||||
--use_gpu=0 \
|
|
||||||
--trainer_count=1 \
|
|
||||||
--num_passes=100 \
|
|
||||||
--save_dir=$output \
|
|
||||||
2>&1 | tee $log
|
|
||||||
paddle usage -l $log -e $? -n "mnist_train" >/dev/null 2>&1
|
|
||||||
|
|
||||||
python -m paddle.utils.plotcurve -i $log > plot.png
|
|
@ -1,50 +0,0 @@
|
|||||||
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from paddle.trainer_config_helpers import *
|
|
||||||
|
|
||||||
is_predict = get_config_arg("is_predict", bool, False)
|
|
||||||
|
|
||||||
####################Data Configuration ##################
|
|
||||||
|
|
||||||
if not is_predict:
|
|
||||||
data_dir = './data/'
|
|
||||||
define_py_data_sources2(
|
|
||||||
train_list=data_dir + 'train.list',
|
|
||||||
test_list=data_dir + 'test.list',
|
|
||||||
module='mnist_provider',
|
|
||||||
obj='process')
|
|
||||||
|
|
||||||
######################Algorithm Configuration #############
|
|
||||||
settings(
|
|
||||||
batch_size=128,
|
|
||||||
learning_rate=0.1 / 128.0,
|
|
||||||
learning_method=MomentumOptimizer(0.9),
|
|
||||||
regularization=L2Regularization(0.0005 * 128))
|
|
||||||
|
|
||||||
#######################Network Configuration #############
|
|
||||||
|
|
||||||
data_size = 1 * 28 * 28
|
|
||||||
label_size = 10
|
|
||||||
img = data_layer(name='pixel', size=data_size)
|
|
||||||
|
|
||||||
# small_vgg is predined in trainer_config_helpers.network
|
|
||||||
predict = small_vgg(input_image=img, num_channels=1, num_classes=label_size)
|
|
||||||
|
|
||||||
if not is_predict:
|
|
||||||
lbl = data_layer(name="label", size=label_size)
|
|
||||||
inputs(img, lbl)
|
|
||||||
outputs(classification_cost(input=predict, label=lbl))
|
|
||||||
else:
|
|
||||||
outputs(predict)
|
|
@ -1,2 +0,0 @@
|
|||||||
baidu.dict
|
|
||||||
model_*.emb
|
|
@ -1,113 +0,0 @@
|
|||||||
#!/bin/env python
|
|
||||||
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
"""
|
|
||||||
Example:
|
|
||||||
python extract_para.py --preModel PREMODEL --preDict PREDICT \
|
|
||||||
--usrModel USRMODEL --usrDict USRDICT -d DIM
|
|
||||||
|
|
||||||
Options:
|
|
||||||
-h, --help show this help message and exit
|
|
||||||
--preModel PREMODEL the name of pretrained embedding model
|
|
||||||
--preDict PREDICT the name of pretrained dictionary
|
|
||||||
--usrModel usrModel the name of output usr embedding model
|
|
||||||
--usrDict usrDict the name of user specified dictionary
|
|
||||||
-d DIM dimension of parameter
|
|
||||||
"""
|
|
||||||
from optparse import OptionParser
|
|
||||||
import struct
|
|
||||||
|
|
||||||
|
|
||||||
def get_row_index(preDict, usrDict):
|
|
||||||
"""
|
|
||||||
Get the row positions for all words in user dictionary from pre-trained dictionary.
|
|
||||||
return: a list of row positions
|
|
||||||
Example: preDict='a\nb\nc\n', usrDict='a\nc\n', then return [0,2]
|
|
||||||
"""
|
|
||||||
pos = []
|
|
||||||
index = dict()
|
|
||||||
with open(preDict, "r") as f:
|
|
||||||
for line_index, line in enumerate(f):
|
|
||||||
word = line.strip().split()[0]
|
|
||||||
index[word] = line_index
|
|
||||||
with open(usrDict, "r") as f:
|
|
||||||
for line in f:
|
|
||||||
word = line.strip().split()[0]
|
|
||||||
pos.append(index[word])
|
|
||||||
return pos
|
|
||||||
|
|
||||||
|
|
||||||
def extract_parameters_by_usrDict(preModel, preDict, usrModel, usrDict,
|
|
||||||
paraDim):
|
|
||||||
"""
|
|
||||||
Extract desired parameters from a pretrained embedding model based on user dictionary
|
|
||||||
"""
|
|
||||||
if paraDim not in [32, 64, 128, 256]:
|
|
||||||
raise RuntimeError("We only support 32, 64, 128, 256 dimensions now")
|
|
||||||
|
|
||||||
fi = open(preModel, "rb")
|
|
||||||
fo = open(usrModel, "wb")
|
|
||||||
|
|
||||||
# write filehead
|
|
||||||
rowIndex = get_row_index(preDict, usrDict)
|
|
||||||
newHead = struct.pack("iil", 0, 4, len(rowIndex) * paraDim)
|
|
||||||
fo.write(newHead)
|
|
||||||
bytes = 4 * paraDim
|
|
||||||
for i in range(0, len(rowIndex)):
|
|
||||||
# find the absolute position of input file
|
|
||||||
fi.seek(rowIndex[i] * bytes + 16, 0)
|
|
||||||
fo.write(fi.read(bytes))
|
|
||||||
|
|
||||||
print "extract parameters finish, total", len(rowIndex), "lines"
|
|
||||||
fi.close()
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
"""
|
|
||||||
Main entry for running paraconvert.py
|
|
||||||
"""
|
|
||||||
usage = "usage: \n" \
|
|
||||||
"python %prog --preModel PREMODEL --preDict PREDICT" \
|
|
||||||
" --usrModel USRMODEL --usrDict USRDICT -d DIM"
|
|
||||||
parser = OptionParser(usage)
|
|
||||||
parser.add_option(
|
|
||||||
"--preModel",
|
|
||||||
action="store",
|
|
||||||
dest="preModel",
|
|
||||||
help="the name of pretrained embedding model")
|
|
||||||
parser.add_option(
|
|
||||||
"--preDict",
|
|
||||||
action="store",
|
|
||||||
dest="preDict",
|
|
||||||
help="the name of pretrained dictionary")
|
|
||||||
parser.add_option(
|
|
||||||
"--usrModel",
|
|
||||||
action="store",
|
|
||||||
dest="usrModel",
|
|
||||||
help="the name of output usr embedding model")
|
|
||||||
parser.add_option(
|
|
||||||
"--usrDict",
|
|
||||||
action="store",
|
|
||||||
dest="usrDict",
|
|
||||||
help="the name of user specified dictionary")
|
|
||||||
parser.add_option(
|
|
||||||
"-d", action="store", dest="dim", help="dimension of parameter")
|
|
||||||
(options, args) = parser.parse_args()
|
|
||||||
extract_parameters_by_usrDict(options.preModel, options.preDict,
|
|
||||||
options.usrModel, options.usrDict,
|
|
||||||
int(options.dim))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
@ -1,159 +0,0 @@
|
|||||||
#!/bin/env python
|
|
||||||
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
"""
|
|
||||||
Example:
|
|
||||||
python paraconvert.py --b2t -i INPUT -o OUTPUT -d DIM
|
|
||||||
python paraconvert.py --t2b -i INPUT -o OUTPUT
|
|
||||||
|
|
||||||
Options:
|
|
||||||
-h, --help show this help message and exit
|
|
||||||
--b2t convert parameter file of embedding model from binary to text
|
|
||||||
--t2b convert parameter file of embedding model from text to binary
|
|
||||||
-i INPUT input parameter file name
|
|
||||||
-o OUTPUT output parameter file name
|
|
||||||
-d DIM dimension of parameter
|
|
||||||
"""
|
|
||||||
from optparse import OptionParser
|
|
||||||
import struct
|
|
||||||
|
|
||||||
|
|
||||||
def binary2text(input, output, paraDim):
|
|
||||||
"""
|
|
||||||
Convert a binary parameter file of embedding model to be a text file.
|
|
||||||
input: the name of input binary parameter file, the format is:
|
|
||||||
1) the first 16 bytes is filehead:
|
|
||||||
version(4 bytes): version of paddle, default = 0
|
|
||||||
floatSize(4 bytes): sizeof(float) = 4
|
|
||||||
paraCount(8 bytes): total number of parameter
|
|
||||||
2) the next (paraCount * 4) bytes is parameters, each has 4 bytes
|
|
||||||
output: the name of output text parameter file, for example:
|
|
||||||
0,4,32156096
|
|
||||||
-0.7845433,1.1937413,-0.1704215,...
|
|
||||||
0.0000909,0.0009465,-0.0008813,...
|
|
||||||
...
|
|
||||||
the format is:
|
|
||||||
1) the first line is filehead:
|
|
||||||
version=0, floatSize=4, paraCount=32156096
|
|
||||||
2) other lines print the paramters
|
|
||||||
a) each line prints paraDim paramters splitted by ','
|
|
||||||
b) there is paraCount/paraDim lines (embedding words)
|
|
||||||
paraDim: dimension of parameters
|
|
||||||
"""
|
|
||||||
fi = open(input, "rb")
|
|
||||||
fo = open(output, "w")
|
|
||||||
"""
|
|
||||||
"""
|
|
||||||
version, floatSize, paraCount = struct.unpack("iil", fi.read(16))
|
|
||||||
newHead = ','.join([str(version), str(floatSize), str(paraCount)])
|
|
||||||
print >> fo, newHead
|
|
||||||
|
|
||||||
bytes = 4 * int(paraDim)
|
|
||||||
format = "%df" % int(paraDim)
|
|
||||||
context = fi.read(bytes)
|
|
||||||
line = 0
|
|
||||||
|
|
||||||
while context:
|
|
||||||
numbers = struct.unpack(format, context)
|
|
||||||
lst = []
|
|
||||||
for i in numbers:
|
|
||||||
lst.append('%8.7f' % i)
|
|
||||||
print >> fo, ','.join(lst)
|
|
||||||
context = fi.read(bytes)
|
|
||||||
line += 1
|
|
||||||
fi.close()
|
|
||||||
fo.close()
|
|
||||||
print "binary2text finish, total", line, "lines"
|
|
||||||
|
|
||||||
|
|
||||||
def get_para_count(input):
|
|
||||||
"""
|
|
||||||
Compute the total number of embedding parameters in input text file.
|
|
||||||
input: the name of input text file
|
|
||||||
"""
|
|
||||||
numRows = 1
|
|
||||||
paraDim = 0
|
|
||||||
with open(input) as f:
|
|
||||||
line = f.readline()
|
|
||||||
paraDim = len(line.split(","))
|
|
||||||
for line in f:
|
|
||||||
numRows += 1
|
|
||||||
return numRows * paraDim
|
|
||||||
|
|
||||||
|
|
||||||
def text2binary(input, output, paddle_head=True):
|
|
||||||
"""
|
|
||||||
Convert a text parameter file of embedding model to be a binary file.
|
|
||||||
input: the name of input text parameter file, for example:
|
|
||||||
-0.7845433,1.1937413,-0.1704215,...
|
|
||||||
0.0000909,0.0009465,-0.0008813,...
|
|
||||||
...
|
|
||||||
the format is:
|
|
||||||
1) it doesn't have filehead
|
|
||||||
2) each line stores the same dimension of parameters,
|
|
||||||
the separator is commas ','
|
|
||||||
output: the name of output binary parameter file, the format is:
|
|
||||||
1) the first 16 bytes is filehead:
|
|
||||||
version(4 bytes), floatSize(4 bytes), paraCount(8 bytes)
|
|
||||||
2) the next (paraCount * 4) bytes is parameters, each has 4 bytes
|
|
||||||
"""
|
|
||||||
fi = open(input, "r")
|
|
||||||
fo = open(output, "wb")
|
|
||||||
|
|
||||||
newHead = struct.pack("iil", 0, 4, get_para_count(input))
|
|
||||||
fo.write(newHead)
|
|
||||||
|
|
||||||
count = 0
|
|
||||||
for line in fi:
|
|
||||||
line = line.strip().split(",")
|
|
||||||
for i in range(0, len(line)):
|
|
||||||
binary_data = struct.pack("f", float(line[i]))
|
|
||||||
fo.write(binary_data)
|
|
||||||
count += 1
|
|
||||||
fi.close()
|
|
||||||
fo.close()
|
|
||||||
print "text2binary finish, total", count, "lines"
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
"""
|
|
||||||
Main entry for running paraconvert.py
|
|
||||||
"""
|
|
||||||
usage = "usage: \n" \
|
|
||||||
"python %prog --b2t -i INPUT -o OUTPUT -d DIM \n" \
|
|
||||||
"python %prog --t2b -i INPUT -o OUTPUT"
|
|
||||||
parser = OptionParser(usage)
|
|
||||||
parser.add_option(
|
|
||||||
"--b2t",
|
|
||||||
action="store_true",
|
|
||||||
help="convert parameter file of embedding model from binary to text")
|
|
||||||
parser.add_option(
|
|
||||||
"--t2b",
|
|
||||||
action="store_true",
|
|
||||||
help="convert parameter file of embedding model from text to binary")
|
|
||||||
parser.add_option(
|
|
||||||
"-i", action="store", dest="input", help="input parameter file name")
|
|
||||||
parser.add_option(
|
|
||||||
"-o", action="store", dest="output", help="output parameter file name")
|
|
||||||
parser.add_option(
|
|
||||||
"-d", action="store", dest="dim", help="dimension of parameter")
|
|
||||||
(options, args) = parser.parse_args()
|
|
||||||
if options.b2t:
|
|
||||||
binary2text(options.input, options.output, options.dim)
|
|
||||||
if options.t2b:
|
|
||||||
text2binary(options.input, options.output)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
@ -1,32 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
set -e
|
|
||||||
set -x
|
|
||||||
BASE_URL='http://paddlepaddle.cdn.bcebos.com/model_zoo/embedding'
|
|
||||||
|
|
||||||
DOWNLOAD_ITEMS=(baidu.dict model_32.emb model_64.emb model_128.emb model_256.emb)
|
|
||||||
ITEM_MD5=(fa03a12321eaab6c30a8fcc9442eaea3
|
|
||||||
f88c8325ee6da6187f1080e8fe66c1cd
|
|
||||||
927cf70f27f860aff1a5703ebf7f1584
|
|
||||||
a52e43655cd25d279777ed509a1ae27b
|
|
||||||
b92c67fe9ff70fea53596080e351ac80)
|
|
||||||
|
|
||||||
for ((i=0; i<${#ITEM_MD5[@]}; i++))
|
|
||||||
do
|
|
||||||
FILENAME=${DOWNLOAD_ITEMS[${i}]}
|
|
||||||
REAL_MD5=`wget ${BASE_URL}/${FILENAME} -O - | tee ${FILENAME} | md5sum | cut -d ' ' -f 1`
|
|
||||||
EXPECTED_MD5=${ITEM_MD5[${i}]}
|
|
||||||
[ "${EXPECTED_MD5}" = "${REAL_MD5}" ]
|
|
||||||
done
|
|
@ -1,5 +0,0 @@
|
|||||||
fea_output/
|
|
||||||
features/
|
|
||||||
model.list
|
|
||||||
ResNet_50.dot
|
|
||||||
ResNet_50.png
|
|
File diff suppressed because it is too large
Load Diff
@ -1 +0,0 @@
|
|||||||
*image_list_provider_copy_1.py
|
|
@ -1,13 +0,0 @@
|
|||||||
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue