commit
7830893a83
@ -0,0 +1,24 @@
|
||||
# Get the latest git tag.
|
||||
set(PADDLE_VERSION $ENV{PADDLE_VERSION})
|
||||
set(tmp_version "HEAD")
|
||||
while ("${PADDLE_VERSION}" STREQUAL "")
|
||||
execute_process(
|
||||
COMMAND ${GIT_EXECUTABLE} describe --tags --abbrev=0 ${tmp_version}
|
||||
WORKING_DIRECTORY ${PROJ_ROOT}
|
||||
OUTPUT_VARIABLE GIT_TAG_NAME
|
||||
RESULT_VARIABLE GIT_RESULT
|
||||
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
if (NOT ${GIT_RESULT})
|
||||
# Check the tag is a correct version
|
||||
if (${GIT_TAG_NAME} MATCHES "v[0-9]+\\.[0-9]+\\.[0-9]+(\\.(a|b|rc)\\.[0-9]+)?")
|
||||
string(REPLACE "v" "" PADDLE_VERSION ${GIT_TAG_NAME})
|
||||
else() # otherwise, get the previous git tag name.
|
||||
set(tmp_version "${GIT_TAG_NAME}~1")
|
||||
endif()
|
||||
else()
|
||||
set(PADDLE_VERSION "0.0.0")
|
||||
message(WARNING "Cannot add paddle version from git tag")
|
||||
endif()
|
||||
endwhile()
|
||||
|
||||
message(STATUS "Paddle version is ${PADDLE_VERSION}")
|
@ -0,0 +1,11 @@
|
||||
output/
|
||||
uniform_params/
|
||||
cifar_params/
|
||||
mnist_params/
|
||||
*.png
|
||||
.pydevproject
|
||||
.project
|
||||
*.log
|
||||
*.pyc
|
||||
data/mnist_data/
|
||||
data/cifar-10-batches-py/
|
@ -0,0 +1,13 @@
|
||||
# Generative Adversarial Networks (GAN)
|
||||
|
||||
This demo implements GAN training described in the original GAN paper (https://arxiv.org/abs/1406.2661) and DCGAN (https://arxiv.org/abs/1511.06434).
|
||||
|
||||
The general training procedures are implemented in gan_trainer.py. The neural network configurations are specified in gan_conf.py (for synthetic data) and gan_conf_image.py (for image data).
|
||||
|
||||
In order to run the model, first download the corresponding data by running the shell script in ./data.
|
||||
Then you can run the command below. The flag -d specifies the training data (cifar, mnist or uniform) and flag --useGpu specifies whether to use gpu for training (0 is cpu, 1 is gpu).
|
||||
|
||||
$python gan_trainer.py -d cifar --use_gpu 1
|
||||
|
||||
The generated images will be stored in ./cifar_samples/
|
||||
The corresponding models will be stored in ./cifar_params/
|
@ -0,0 +1,18 @@
|
||||
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
set -e
|
||||
wget https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
|
||||
tar zxf cifar-10-python.tar.gz
|
||||
rm cifar-10-python.tar.gz
|
||||
|
@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env sh
|
||||
# This script downloads the mnist data and unzips it.
|
||||
set -e
|
||||
DIR="$( cd "$(dirname "$0")" ; pwd -P )"
|
||||
rm -rf "$DIR/mnist_data"
|
||||
mkdir "$DIR/mnist_data"
|
||||
cd "$DIR/mnist_data"
|
||||
|
||||
echo "Downloading..."
|
||||
|
||||
for fname in train-images-idx3-ubyte train-labels-idx1-ubyte t10k-images-idx3-ubyte t10k-labels-idx1-ubyte
|
||||
do
|
||||
if [ ! -e $fname ]; then
|
||||
wget --no-check-certificate http://yann.lecun.com/exdb/mnist/${fname}.gz
|
||||
gunzip ${fname}.gz
|
||||
fi
|
||||
done
|
||||
|
||||
|
@ -0,0 +1,134 @@
|
||||
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from paddle.trainer_config_helpers import *
|
||||
|
||||
mode = get_config_arg("mode", str, "generator")
|
||||
assert mode in set(["generator",
|
||||
"discriminator",
|
||||
"generator_training",
|
||||
"discriminator_training"])
|
||||
|
||||
is_generator_training = mode == "generator_training"
|
||||
is_discriminator_training = mode == "discriminator_training"
|
||||
is_generator = mode == "generator"
|
||||
is_discriminator = mode == "discriminator"
|
||||
|
||||
# The network structure below follows the ref https://arxiv.org/abs/1406.2661
|
||||
# Here we used two hidden layers and batch_norm
|
||||
|
||||
print('mode=%s' % mode)
|
||||
# the dim of the noise (z) as the input of the generator network
|
||||
noise_dim = 10
|
||||
# the dim of the hidden layer
|
||||
hidden_dim = 10
|
||||
# the dim of the generated sample
|
||||
sample_dim = 2
|
||||
|
||||
settings(
|
||||
batch_size=128,
|
||||
learning_rate=1e-4,
|
||||
learning_method=AdamOptimizer(beta1=0.5)
|
||||
)
|
||||
|
||||
def discriminator(sample):
|
||||
"""
|
||||
discriminator ouputs the probablity of a sample is from generator
|
||||
or real data.
|
||||
The output has two dimenstional: dimension 0 is the probablity
|
||||
of the sample is from generator and dimension 1 is the probabblity
|
||||
of the sample is from real data.
|
||||
"""
|
||||
param_attr = ParamAttr(is_static=is_generator_training)
|
||||
bias_attr = ParamAttr(is_static=is_generator_training,
|
||||
initial_mean=1.0,
|
||||
initial_std=0)
|
||||
|
||||
hidden = fc_layer(input=sample, name="dis_hidden", size=hidden_dim,
|
||||
bias_attr=bias_attr,
|
||||
param_attr=param_attr,
|
||||
act=ReluActivation())
|
||||
|
||||
hidden2 = fc_layer(input=hidden, name="dis_hidden2", size=hidden_dim,
|
||||
bias_attr=bias_attr,
|
||||
param_attr=param_attr,
|
||||
act=LinearActivation())
|
||||
|
||||
hidden_bn = batch_norm_layer(hidden2,
|
||||
act=ReluActivation(),
|
||||
name="dis_hidden_bn",
|
||||
bias_attr=bias_attr,
|
||||
param_attr=ParamAttr(is_static=is_generator_training,
|
||||
initial_mean=1.0,
|
||||
initial_std=0.02),
|
||||
use_global_stats=False)
|
||||
|
||||
return fc_layer(input=hidden_bn, name="dis_prob", size=2,
|
||||
bias_attr=bias_attr,
|
||||
param_attr=param_attr,
|
||||
act=SoftmaxActivation())
|
||||
|
||||
def generator(noise):
|
||||
"""
|
||||
generator generates a sample given noise
|
||||
"""
|
||||
param_attr = ParamAttr(is_static=is_discriminator_training)
|
||||
bias_attr = ParamAttr(is_static=is_discriminator_training,
|
||||
initial_mean=1.0,
|
||||
initial_std=0)
|
||||
|
||||
hidden = fc_layer(input=noise,
|
||||
name="gen_layer_hidden",
|
||||
size=hidden_dim,
|
||||
bias_attr=bias_attr,
|
||||
param_attr=param_attr,
|
||||
act=ReluActivation())
|
||||
|
||||
hidden2 = fc_layer(input=hidden, name="gen_hidden2", size=hidden_dim,
|
||||
bias_attr=bias_attr,
|
||||
param_attr=param_attr,
|
||||
act=LinearActivation())
|
||||
|
||||
hidden_bn = batch_norm_layer(hidden2,
|
||||
act=ReluActivation(),
|
||||
name="gen_layer_hidden_bn",
|
||||
bias_attr=bias_attr,
|
||||
param_attr=ParamAttr(is_static=is_discriminator_training,
|
||||
initial_mean=1.0,
|
||||
initial_std=0.02),
|
||||
use_global_stats=False)
|
||||
|
||||
return fc_layer(input=hidden_bn,
|
||||
name="gen_layer1",
|
||||
size=sample_dim,
|
||||
bias_attr=bias_attr,
|
||||
param_attr=param_attr,
|
||||
act=LinearActivation())
|
||||
|
||||
if is_generator_training:
|
||||
noise = data_layer(name="noise", size=noise_dim)
|
||||
sample = generator(noise)
|
||||
|
||||
if is_discriminator_training:
|
||||
sample = data_layer(name="sample", size=sample_dim)
|
||||
|
||||
if is_generator_training or is_discriminator_training:
|
||||
label = data_layer(name="label", size=1)
|
||||
prob = discriminator(sample)
|
||||
cost = cross_entropy(input=prob, label=label)
|
||||
classification_error_evaluator(input=prob, label=label, name=mode+'_error')
|
||||
outputs(cost)
|
||||
|
||||
if is_generator:
|
||||
noise = data_layer(name="noise", size=noise_dim)
|
||||
outputs(generator(noise))
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,122 +1,81 @@
|
||||
Docker installation guide
|
||||
==========================
|
||||
Using and Building Docker Images
|
||||
================================
|
||||
|
||||
PaddlePaddle provide the `Docker <https://www.docker.com/>`_ image. `Docker`_ is a lightweight container utilities. The performance of PaddlePaddle in `Docker`_ container is basically as same as run it in a normal linux. The `Docker`_ is a very convenient way to deliver the binary release for linux programs.
|
||||
We release PaddlePaddle in the form of `Docker <https://www.docker.com/>`_ images on `dockerhub.com <https://hub.docker.com/r/paddledev/paddle/>`_. Running as Docker containers is currently the only officially-supported way to running PaddlePaddle.
|
||||
|
||||
.. note::
|
||||
Run Docker images
|
||||
-----------------
|
||||
|
||||
The `Docker`_ image is the recommended way to run PaddlePaddle
|
||||
For each version of PaddlePaddle, we release 4 variants of Docker images:
|
||||
|
||||
PaddlePaddle Docker images
|
||||
--------------------------
|
||||
+-----------------+-------------+-------+
|
||||
| | CPU AVX | GPU |
|
||||
+=================+=============+=======+
|
||||
| cpu | yes | no |
|
||||
+-----------------+-------------+-------+
|
||||
| cpu-noavx | no | no |
|
||||
+-----------------+-------------+-------+
|
||||
| gpu | yes | yes |
|
||||
+-----------------+-------------+-------+
|
||||
| gpu-noavx | no | yes |
|
||||
+-----------------+-------------+-------+
|
||||
|
||||
There are 12 `images <https://hub.docker.com/r/paddledev/paddle/tags/>`_ for PaddlePaddle, and the name is :code:`paddle-dev/paddle`, tags are\:
|
||||
We run the following command on Linux to check if the CPU supports :code:`AVX`.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
+-----------------+------------------+------------------------+-----------------------+
|
||||
| | normal | devel | demo |
|
||||
+=================+==================+========================+=======================+
|
||||
| CPU | cpu-latest | cpu-devel-latest | cpu-demo-latest |
|
||||
+-----------------+------------------+------------------------+-----------------------+
|
||||
| GPU | gpu-latest | gpu-devel-latest | gpu-demo-latest |
|
||||
+-----------------+------------------+------------------------+-----------------------+
|
||||
| CPU WITHOUT AVX | cpu-noavx-latest | cpu-devel-noavx-latest | cpu-demo-noavx-latest |
|
||||
+-----------------+------------------+------------------------+-----------------------+
|
||||
| GPU WITHOUT AVX | gpu-noavx-latest | gpu-devel-noavx-latest | gpu-demo-noavx-latest |
|
||||
+-----------------+------------------+------------------------+-----------------------+
|
||||
if cat /proc/cpuinfo | grep -i avx; then echo Yes; else echo No; fi
|
||||
|
||||
And the three columns are:
|
||||
On Mac OS X, we need to run
|
||||
|
||||
* normal\: The docker image only contains binary of PaddlePaddle.
|
||||
* devel\: The docker image contains PaddlePaddle binary, source code and essential build environment.
|
||||
* demo\: The docker image contains the dependencies to run PaddlePaddle demo.
|
||||
.. code-block:: bash
|
||||
|
||||
And the four rows are:
|
||||
sysctl -a | grep machdep.cpu.leaf7_features
|
||||
|
||||
* CPU\: CPU Version. Support CPU which has :code:`AVX` instructions.
|
||||
* GPU\: GPU Version. Support GPU, and cpu has :code:`AVX` instructions.
|
||||
* CPU WITHOUT AVX\: CPU Version, which support most CPU even doesn't have :code:`AVX` instructions.
|
||||
* GPU WITHOUT AVX\: GPU Version, which support most CPU even doesn't have :code:`AVX` instructions.
|
||||
|
||||
User can choose any version depends on machine. The following script can help you to detect your CPU support :code:`AVX` or not.
|
||||
Once we determine the proper variant, we can cope with the Docker image tag name by appending the version number. For example, the following command runs the AVX-enabled image of the most recent version:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
if cat /proc/cpuinfo | grep -q avx ; then echo "Support AVX"; else echo "Not support AVX"; fi
|
||||
.. code-block:: bash
|
||||
|
||||
If the output is :code:`Support AVX`, then you can choose the AVX version of PaddlePaddle, otherwise, you need select :code:`noavx` version of PaddlePaddle. For example, the CPU develop version of PaddlePaddle is :code:`paddle-dev/paddle:cpu-devel-latest`.
|
||||
docker run -it --rm paddledev/paddle:cpu-latest /bin/bash
|
||||
|
||||
The PaddlePaddle images don't contain any entry command. You need to write your entry command to use this image. See :code:`Remote Access` part or just use following command to run a :code:`bash`
|
||||
To run a GPU-enabled image, you need to install CUDA and let Docker knows about it:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run -it paddledev/paddle:cpu-latest /bin/bash
|
||||
|
||||
|
||||
Download and Run Docker images
|
||||
------------------------------
|
||||
|
||||
You have to install Docker in your machine which has linux kernel version 3.10+ first. You can refer to the official guide https://docs.docker.com/engine/installation/ for further information.
|
||||
|
||||
You can use :code:`docker pull ` to download images first, or just launch a container with :code:`docker run` \:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run -it paddledev/paddle:cpu-latest
|
||||
|
||||
|
||||
If you want to launch container with GPU support, you need to set some environment variables at the same time:
|
||||
|
||||
.. code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}')"
|
||||
export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}')
|
||||
docker run ${CUDA_SO} ${DEVICES} -it paddledev/paddle:gpu-latest
|
||||
|
||||
The default entry point of all our Docker images starts the OpenSSH server. To run PaddlePaddle and to expose OpenSSH port to 2202 on the host computer:
|
||||
|
||||
Some notes for docker
|
||||
---------------------
|
||||
|
||||
Performance
|
||||
+++++++++++
|
||||
|
||||
Since Docker is based on the lightweight virtual containers, the CPU computing performance maintains well. And GPU driver and equipments are all mapped to the container, so the GPU computing performance would not be seriously affected.
|
||||
|
||||
If you use high performance nic, such as RDMA(RoCE 40GbE or IB 56GbE), Ethernet(10GbE), it is recommended to use config "-net = host".
|
||||
|
||||
|
||||
|
||||
|
||||
Remote access
|
||||
+++++++++++++
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
If you want to enable ssh access background, you need to build an image by yourself. Please refer to official guide https://docs.docker.com/engine/reference/builder/ for further information.
|
||||
docker run -d -p 2202:22 paddledev/paddle:cpu-latest
|
||||
|
||||
Following is a simple Dockerfile with ssh:
|
||||
Then we can login to the container using username :code:`root` and password :code:`root`:
|
||||
|
||||
.. literalinclude:: ../../doc_cn/build_and_install/install/paddle_ssh.Dockerfile
|
||||
.. code-block:: bash
|
||||
|
||||
Then you can build an image with Dockerfile and launch a container:
|
||||
ssh -p 2202 root@localhost
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# cd into Dockerfile directory
|
||||
docker build . -t paddle_ssh
|
||||
# run container, and map host machine port 8022 to container port 22
|
||||
docker run -d -p 8022:22 --name paddle_ssh_machine paddle_ssh
|
||||
Build Docker images
|
||||
-------------------
|
||||
|
||||
Now, you can ssh on port 8022 to access the container, username is root, password is also root:
|
||||
Developers might want to build Docker images from their local commit or from a tagged version. Suppose that your local repo is at :code:`~/work/Paddle`, the following steps builds a cpu variant from your current work:
|
||||
|
||||
.. code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
ssh -p 8022 root@YOUR_HOST_MACHINE
|
||||
cd ~/Paddle
|
||||
./paddle/scripts/docker/generates.sh # Use m4 to generate Dockerfiles for each variant.
|
||||
docker build -t paddle:latest -f ./paddle/scripts/docker/Dockerfile.cpu
|
||||
|
||||
You can stop and delete the container as following:
|
||||
As a release engineer, you might want to build Docker images for a certain version and publish them to dockerhub.com. You can do this by switching to the right Git tag, or create a new tag, before running `docker build`. For example, the following commands build Docker images for v0.9.0:
|
||||
|
||||
.. code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
# stop
|
||||
docker stop paddle_ssh_machine
|
||||
# delete
|
||||
docker rm paddle_ssh_machine
|
||||
cd ~/Paddle
|
||||
git checkout tags/v0.9.0
|
||||
./paddle/scripts/docker/generates.sh # Use m4 to generate Dockerfiles for each variant.
|
||||
docker build -t paddle:cpu-v0.9.0 -f ./paddle/scripts/docker/Dockerfile.cpu
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue