commit
7830893a83
@ -0,0 +1,24 @@
|
||||
# Get the latest git tag.
|
||||
set(PADDLE_VERSION $ENV{PADDLE_VERSION})
|
||||
set(tmp_version "HEAD")
|
||||
while ("${PADDLE_VERSION}" STREQUAL "")
|
||||
execute_process(
|
||||
COMMAND ${GIT_EXECUTABLE} describe --tags --abbrev=0 ${tmp_version}
|
||||
WORKING_DIRECTORY ${PROJ_ROOT}
|
||||
OUTPUT_VARIABLE GIT_TAG_NAME
|
||||
RESULT_VARIABLE GIT_RESULT
|
||||
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
if (NOT ${GIT_RESULT})
|
||||
# Check the tag is a correct version
|
||||
if (${GIT_TAG_NAME} MATCHES "v[0-9]+\\.[0-9]+\\.[0-9]+(\\.(a|b|rc)\\.[0-9]+)?")
|
||||
string(REPLACE "v" "" PADDLE_VERSION ${GIT_TAG_NAME})
|
||||
else() # otherwise, get the previous git tag name.
|
||||
set(tmp_version "${GIT_TAG_NAME}~1")
|
||||
endif()
|
||||
else()
|
||||
set(PADDLE_VERSION "0.0.0")
|
||||
message(WARNING "Cannot add paddle version from git tag")
|
||||
endif()
|
||||
endwhile()
|
||||
|
||||
message(STATUS "Paddle version is ${PADDLE_VERSION}")
|
@ -0,0 +1,11 @@
|
||||
output/
|
||||
uniform_params/
|
||||
cifar_params/
|
||||
mnist_params/
|
||||
*.png
|
||||
.pydevproject
|
||||
.project
|
||||
*.log
|
||||
*.pyc
|
||||
data/mnist_data/
|
||||
data/cifar-10-batches-py/
|
@ -0,0 +1,13 @@
|
||||
# Generative Adversarial Networks (GAN)
|
||||
|
||||
This demo implements GAN training described in the original GAN paper (https://arxiv.org/abs/1406.2661) and DCGAN (https://arxiv.org/abs/1511.06434).
|
||||
|
||||
The general training procedures are implemented in gan_trainer.py. The neural network configurations are specified in gan_conf.py (for synthetic data) and gan_conf_image.py (for image data).
|
||||
|
||||
In order to run the model, first download the corresponding data by running the shell script in ./data.
|
||||
Then you can run the command below. The flag -d specifies the training data (cifar, mnist or uniform) and flag --useGpu specifies whether to use gpu for training (0 is cpu, 1 is gpu).
|
||||
|
||||
$python gan_trainer.py -d cifar --use_gpu 1
|
||||
|
||||
The generated images will be stored in ./cifar_samples/
|
||||
The corresponding models will be stored in ./cifar_params/
|
@ -0,0 +1,18 @@
|
||||
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
set -e
|
||||
wget https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
|
||||
tar zxf cifar-10-python.tar.gz
|
||||
rm cifar-10-python.tar.gz
|
||||
|
@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env sh
|
||||
# This script downloads the mnist data and unzips it.
|
||||
set -e
|
||||
DIR="$( cd "$(dirname "$0")" ; pwd -P )"
|
||||
rm -rf "$DIR/mnist_data"
|
||||
mkdir "$DIR/mnist_data"
|
||||
cd "$DIR/mnist_data"
|
||||
|
||||
echo "Downloading..."
|
||||
|
||||
for fname in train-images-idx3-ubyte train-labels-idx1-ubyte t10k-images-idx3-ubyte t10k-labels-idx1-ubyte
|
||||
do
|
||||
if [ ! -e $fname ]; then
|
||||
wget --no-check-certificate http://yann.lecun.com/exdb/mnist/${fname}.gz
|
||||
gunzip ${fname}.gz
|
||||
fi
|
||||
done
|
||||
|
||||
|
@ -0,0 +1,134 @@
|
||||
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from paddle.trainer_config_helpers import *
|
||||
|
||||
mode = get_config_arg("mode", str, "generator")
|
||||
assert mode in set(["generator",
|
||||
"discriminator",
|
||||
"generator_training",
|
||||
"discriminator_training"])
|
||||
|
||||
is_generator_training = mode == "generator_training"
|
||||
is_discriminator_training = mode == "discriminator_training"
|
||||
is_generator = mode == "generator"
|
||||
is_discriminator = mode == "discriminator"
|
||||
|
||||
# The network structure below follows the ref https://arxiv.org/abs/1406.2661
|
||||
# Here we used two hidden layers and batch_norm
|
||||
|
||||
print('mode=%s' % mode)
|
||||
# the dim of the noise (z) as the input of the generator network
|
||||
noise_dim = 10
|
||||
# the dim of the hidden layer
|
||||
hidden_dim = 10
|
||||
# the dim of the generated sample
|
||||
sample_dim = 2
|
||||
|
||||
settings(
|
||||
batch_size=128,
|
||||
learning_rate=1e-4,
|
||||
learning_method=AdamOptimizer(beta1=0.5)
|
||||
)
|
||||
|
||||
def discriminator(sample):
|
||||
"""
|
||||
discriminator ouputs the probablity of a sample is from generator
|
||||
or real data.
|
||||
The output has two dimenstional: dimension 0 is the probablity
|
||||
of the sample is from generator and dimension 1 is the probabblity
|
||||
of the sample is from real data.
|
||||
"""
|
||||
param_attr = ParamAttr(is_static=is_generator_training)
|
||||
bias_attr = ParamAttr(is_static=is_generator_training,
|
||||
initial_mean=1.0,
|
||||
initial_std=0)
|
||||
|
||||
hidden = fc_layer(input=sample, name="dis_hidden", size=hidden_dim,
|
||||
bias_attr=bias_attr,
|
||||
param_attr=param_attr,
|
||||
act=ReluActivation())
|
||||
|
||||
hidden2 = fc_layer(input=hidden, name="dis_hidden2", size=hidden_dim,
|
||||
bias_attr=bias_attr,
|
||||
param_attr=param_attr,
|
||||
act=LinearActivation())
|
||||
|
||||
hidden_bn = batch_norm_layer(hidden2,
|
||||
act=ReluActivation(),
|
||||
name="dis_hidden_bn",
|
||||
bias_attr=bias_attr,
|
||||
param_attr=ParamAttr(is_static=is_generator_training,
|
||||
initial_mean=1.0,
|
||||
initial_std=0.02),
|
||||
use_global_stats=False)
|
||||
|
||||
return fc_layer(input=hidden_bn, name="dis_prob", size=2,
|
||||
bias_attr=bias_attr,
|
||||
param_attr=param_attr,
|
||||
act=SoftmaxActivation())
|
||||
|
||||
def generator(noise):
|
||||
"""
|
||||
generator generates a sample given noise
|
||||
"""
|
||||
param_attr = ParamAttr(is_static=is_discriminator_training)
|
||||
bias_attr = ParamAttr(is_static=is_discriminator_training,
|
||||
initial_mean=1.0,
|
||||
initial_std=0)
|
||||
|
||||
hidden = fc_layer(input=noise,
|
||||
name="gen_layer_hidden",
|
||||
size=hidden_dim,
|
||||
bias_attr=bias_attr,
|
||||
param_attr=param_attr,
|
||||
act=ReluActivation())
|
||||
|
||||
hidden2 = fc_layer(input=hidden, name="gen_hidden2", size=hidden_dim,
|
||||
bias_attr=bias_attr,
|
||||
param_attr=param_attr,
|
||||
act=LinearActivation())
|
||||
|
||||
hidden_bn = batch_norm_layer(hidden2,
|
||||
act=ReluActivation(),
|
||||
name="gen_layer_hidden_bn",
|
||||
bias_attr=bias_attr,
|
||||
param_attr=ParamAttr(is_static=is_discriminator_training,
|
||||
initial_mean=1.0,
|
||||
initial_std=0.02),
|
||||
use_global_stats=False)
|
||||
|
||||
return fc_layer(input=hidden_bn,
|
||||
name="gen_layer1",
|
||||
size=sample_dim,
|
||||
bias_attr=bias_attr,
|
||||
param_attr=param_attr,
|
||||
act=LinearActivation())
|
||||
|
||||
if is_generator_training:
|
||||
noise = data_layer(name="noise", size=noise_dim)
|
||||
sample = generator(noise)
|
||||
|
||||
if is_discriminator_training:
|
||||
sample = data_layer(name="sample", size=sample_dim)
|
||||
|
||||
if is_generator_training or is_discriminator_training:
|
||||
label = data_layer(name="label", size=1)
|
||||
prob = discriminator(sample)
|
||||
cost = cross_entropy(input=prob, label=label)
|
||||
classification_error_evaluator(input=prob, label=label, name=mode+'_error')
|
||||
outputs(cost)
|
||||
|
||||
if is_generator:
|
||||
noise = data_layer(name="noise", size=noise_dim)
|
||||
outputs(generator(noise))
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue