merge develop

avx_docs
qiaolongfei 9 years ago
commit 8ff42cd37d

@ -1,31 +0,0 @@
# External dependency to Google protobuf.
http_archive(
name="protobuf",
url="http://github.com/google/protobuf/archive/v3.1.0.tar.gz",
sha256="0a0ae63cbffc274efb573bdde9a253e3f32e458c41261df51c5dbc5ad541e8f7",
strip_prefix="protobuf-3.1.0")
# External dependency to gtest 1.7.0. This method comes from
# https://www.bazel.io/versions/master/docs/tutorial/cpp.html.
new_http_archive(
name="gtest",
url="https://github.com/google/googletest/archive/release-1.7.0.zip",
sha256="b58cb7547a28b2c718d1e38aee18a3659c9e3ff52440297e965f5edffe34b6d0",
build_file="third_party/gtest.BUILD",
strip_prefix="googletest-release-1.7.0")
# External dependency to gflags. This method comes from
# https://github.com/gflags/example/blob/master/WORKSPACE.
new_git_repository(
name="gflags",
tag="v2.2.0",
remote="https://github.com/gflags/gflags.git",
build_file="third_party/gflags.BUILD")
# External dependency to glog. This method comes from
# https://github.com/reyoung/bazel_playground/blob/master/WORKSPACE
new_git_repository(
name="glog",
remote="https://github.com/google/glog.git",
commit="b6a5e0524c28178985f0d228e9eaa43808dbec3c",
build_file="third_party/glog.BUILD")

@ -17,7 +17,7 @@ set -e
#Note the default model is pass-00002, you shold make sure the model path
#exists or change the mode path.
#only test on trainer_config.lr.py
model=output/pass-00001/
model=output/model/pass-00001/
config=trainer_config.lr.py
label=data/labels.list
dict=data/dict.txt

@ -0,0 +1,44 @@
#!/bin/bash
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
# Should run pserver.sh before run this script.
bin_dir=$(cd `dirname $0`; pwd)
home_dir=$(cd "${bin_dir}/.."; pwd)
source "$bin_dir/env.sh"
model_dir="$bin_dir/output"
log_file="$bin_dir/train.log"
pushd "$home_dir"
cfg=trainer_config.lr.py
paddle train \
--config=$cfg \
--save_dir=${model_dir} \
--trainer_count=4 \
--local=0 \
--log_period=100 \
--num_passes=15 \
--use_gpu=false \
--show_parameter_stats_period=100 \
--test_all_data_in_one_period=1 \
--num_gradient_servers=1 \
--nics=`get_nics` \
--port=7164 \
--ports_num=1 \
--pservers="127.0.0.1" \
--comment="paddle_trainer" \
2>&1 | tee "$log_file"
popd

@ -0,0 +1,28 @@
#!/bin/bash
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
function get_nics() {
machine=`uname -s`
local nics=""
if [ "$machine" == "Linux" ]; then
nics="lo"
elif [ "$machine" == "Darwin" ]; then
nics="lo0"
else
nics="unsupport"
fi
echo $nics
}

@ -0,0 +1,26 @@
#!/bin/bash
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
bin_dir=$(cd `dirname $0`; pwd)
source "$bin_dir/env.sh"
paddle pserver \
--nics=`get_nics` \
--port=7164 \
--ports_num=1 \
--ports_num_for_sparse=1 \
--num_gradient_servers=1 \
--comment="paddle_pserver" \
2>&1 | tee 'pserver.log'

@ -65,16 +65,13 @@ The general development workflow with Docker and Bazel is as follows:
--name paddle \
-p 2022:22 \
-v $PWD:/paddle \
-v $HOME/.cache/bazel:/root/.cache/bazel \
paddle:dev
where :code:`-d` makes the container running in background,
:code:`--name paddle` allows us to run a nginx container to serve
documents in this container, :code:`-p 2022:22` allows us to SSH
into this container, :code:`-v $PWD:/paddle` shares the source code
on the host with the container, :code:`-v
$HOME/.cache/bazel:/root/.cache/bazel` shares Bazel cache on the
host with the container.
on the host with the container.
4. SSH into the container:
@ -94,13 +91,6 @@ The general development workflow with Docker and Bazel is as follows:
make -j `nproc`
CTEST_OUTPUT_ON_FAILURE=1 ctest
or Bazel in the container:
.. code-block:: bash
cd /paddle
bazel test ...
CPU-only and GPU Images
-----------------------

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

@ -0,0 +1,143 @@
# Generative Adversarial Networks (GAN)
This demo implements GAN training described in the original [GAN paper](https://arxiv.org/abs/1406.2661) and deep convolutional generative adversarial networks [DCGAN paper](https://arxiv.org/abs/1511.06434).
The high-level structure of GAN is shown in Figure. 1 below. It is composed of two major parts: a generator and a discriminator, both of which are based on neural networks. The generator takes in some kind of noise with a known distribution and transforms it into an image. The discriminator takes in an image and determines whether it is artificially generated by the generator or a real image. So the generator and the discriminator are in a competitive game in which generator is trying to generate image to look as real as possible to fool the discriminator, while the discriminator is trying to distinguish between real and fake images.
<p align="center">
<img src="./gan.png" width="500" height="300">
</p>
<p align="center">
Figure 1. GAN-Model-Structure
<a href="https://ishmaelbelghazi.github.io/ALI/">figure credit</a>
</p>
The generator and discriminator take turn to be trained using SGD. The objective function of the generator is for its generated images being classified as real by the discriminator, and the objective function of the discriminator is to correctly classify real and fake images. When the GAN model is trained to converge to the equilibrium state, the generator will transform the given noise distribution to the distribution of real images, and the discriminator will not be able to distinguish between real and fake images at all.
## Implementation of GAN Model Structure
Since GAN model involves multiple neural networks, it requires to use paddle python API. So the code walk-through below can also partially serve as an introduction to the usage of Paddle Python API.
There are three networks defined in gan_conf.py, namely **generator_training**, **discriminator_training** and **generator**. The relationship to the model structure we defined above is that **discriminator_training** is the discriminator, **generator** is the generator, and the **generator_training** combined the generator and discriminator since training generator would require the discriminator to provide loss function. This relationship is described in the following code:
```python
if is_generator_training:
noise = data_layer(name="noise", size=noise_dim)
sample = generator(noise)
if is_discriminator_training:
sample = data_layer(name="sample", size=sample_dim)
if is_generator_training or is_discriminator_training:
label = data_layer(name="label", size=1)
prob = discriminator(sample)
cost = cross_entropy(input=prob, label=label)
classification_error_evaluator(
input=prob, label=label, name=mode + '_error')
outputs(cost)
if is_generator:
noise = data_layer(name="noise", size=noise_dim)
outputs(generator(noise))
```
In order to train the networks defined in gan_conf.py, one first needs to initialize a Paddle environment, parse the config, create GradientMachine from the config and create trainer from GradientMachine as done in the code chunk below:
```python
import py_paddle.swig_paddle as api
# init paddle environment
api.initPaddle('--use_gpu=' + use_gpu, '--dot_period=10',
'--log_period=100', '--gpu_id=' + args.gpu_id,
'--save_dir=' + "./%s_params/" % data_source)
# Parse config
gen_conf = parse_config(conf, "mode=generator_training,data=" + data_source)
dis_conf = parse_config(conf, "mode=discriminator_training,data=" + data_source)
generator_conf = parse_config(conf, "mode=generator,data=" + data_source)
# Create GradientMachine
dis_training_machine = api.GradientMachine.createFromConfigProto(
dis_conf.model_config)
gen_training_machine = api.GradientMachine.createFromConfigProto(
gen_conf.model_config)
generator_machine = api.GradientMachine.createFromConfigProto(
generator_conf.model_config)
# Create trainer
dis_trainer = api.Trainer.create(dis_conf, dis_training_machine)
gen_trainer = api.Trainer.create(gen_conf, gen_training_machine)
```
In order to balance the strength between generator and discriminator, we schedule to train whichever one is performing worse by comparing their loss function value. The loss function value can be calculated by a forward pass through the GradientMachine.
```python
def get_training_loss(training_machine, inputs):
outputs = api.Arguments.createArguments(0)
training_machine.forward(inputs, outputs, api.PASS_TEST)
loss = outputs.getSlotValue(0).copyToNumpyMat()
return numpy.mean(loss)
```
After training one network, one needs to sync the new parameters to the other networks. The code below demonstrates one example of such use case:
```python
# Train the gen_training
gen_trainer.trainOneDataBatch(batch_size, data_batch_gen)
# Copy the parameters from gen_training to dis_training and generator
copy_shared_parameters(gen_training_machine,
dis_training_machine)
copy_shared_parameters(gen_training_machine, generator_machine)
```
## A Toy Example
With the infrastructure explained above, we can now walk you through a toy example of generating two dimensional uniform distribution using 10 dimensional Gaussian noise.
The Gaussian noises are generated using the code below:
```python
def get_noise(batch_size, noise_dim):
return numpy.random.normal(size=(batch_size, noise_dim)).astype('float32')
```
The real samples (2-D uniform) are generated using the code below:
```python
# synthesize 2-D uniform data in gan_trainer.py:114
def load_uniform_data():
data = numpy.random.rand(1000000, 2).astype('float32')
return data
```
The generator and discriminator network are built using fully-connected layer and batch_norm layer, and are defined in gan_conf.py.
To train the GAN model, one can use the command below. The flag -d specifies the training data (cifar, mnist or uniform) and flag --useGpu specifies whether to use gpu for training (0 is cpu, 1 is gpu).
```bash
$python gan_trainer.py -d uniform --useGpu 1
```
The generated samples can be found in ./uniform_samples/ and one example is shown below as Figure 2. One can see that it roughly recovers the 2D uniform distribution.
<p align="center">
<img src="./uniform_sample.png" width="300" height="300">
</p>
<p align="center">
Figure 2. Uniform Sample
</p>
## MNIST Example
### Data preparation
To download the MNIST data, one can use the following commands:
```bash
$cd data/
$./get_mnist_data.sh
```
### Model description
Following the DC-Gan paper (https://arxiv.org/abs/1511.06434), we use convolution/convolution-transpose layer in the discriminator/generator network to better deal with images. The details of the network structures are defined in gan_conf_image.py.
### Training the model
To train the GAN model on mnist data, one can use the following command:
```bash
$python gan_trainer.py -d mnist --useGpu 1
```
The generated sample images can be found at ./mnist_samples/ and one example is shown below as Figure 3.
<p align="center">
<img src="./mnist_sample.png" width="300" height="300">
</p>
<p align="center">
Figure 3. MNIST Sample
</p>

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

@ -0,0 +1,205 @@
图像分类教程
==========
在本教程中我们将使用CIFAR-10数据集训练一个卷积神经网络并使用这个神经网络来对图片进行分类。如下图所示卷积神经网络可以辨识图片中的主体并给出分类结果。
<center>![Image Classification](./image_classification.png)</center>
## 数据准备
首先下载CIFAR-10数据集。下面是CIFAR-10数据集的官方网址
<https://www.cs.toronto.edu/~kriz/cifar.html>
我们准备了一个脚本可以用于从官方网站上下载CIFAR-10数据集转为jpeg文件并存入特定的目录。使用这个脚本前请确认已经安装了pillow及相关依赖模块。可以参照下面的命令进行安装
1. 安装pillow
```bash
sudo apt-get install libjpeg-dev
pip install pillow
```
2. 下载数据集
```bash
cd demo/image_classification/data/
sh download_cifar.sh
```
CIFAR-10数据集包含60000张32x32的彩色图片。图片分为10类每个类包含6000张。其中50000张图片作为训练集10000张作为测试集。
下图展示了所有的图片类别每个类别中随机抽取了10张图片。
<center>![Image Classification](./cifar.png)</center>
脚本运行完成后我们应当会得到一个名为cifar-out的文件夹其下子文件夹的结构如下
```
train
---airplane
---automobile
---bird
---cat
---deer
---dog
---frog
---horse
---ship
---truck
test
---airplane
---automobile
---bird
---cat
---deer
---dog
---frog
---horse
---ship
---truck
```
cifar-out下包含`train`和`test`两个文件夹其中分别包含了CIFAR-10中的训练集和测试集。这两个文件夹下各自有10个子文件夹每个子文件夹下存储相应分类的图片。将图片按照上述结构存储好之后我们就可以着手对分类模型进行训练了。
## 预处理
数据下载之后还需要进行预处理将数据转换为Paddle的格式。我们可以通过如下命令进行预处理工作
```
cd demo/image_classification/
sh preprocess.sh
```
其中`preprocess.sh` 调用 `./demo/image_classification/preprocess.py` 对图片进行预处理
```sh
export PYTHONPATH=$PYTHONPATH:../../
data_dir=./data/cifar-out
python preprocess.py -i $data_dir -s 32 -c 1
```
`./demo/image_classification/preprocess.py` 使用如下参数:
- `-i``--input` 给出输入数据所在路径;
- `-s``--size` 给出图片尺寸;
- `-c``--color` 标示图片是彩色图或灰度图
## 模型训练
在开始训练之前,我们需要先创建一个模型配置文件。下面我们给出了一个配置示例。**注意**,这里的列出的和`vgg_16_cifar.py`文件稍有差别,因为该文件可适用于预测。
```python
from paddle.trainer_config_helpers import *
data_dir='data/cifar-out/batches/'
meta_path=data_dir+'batches.meta'
args = {'meta':meta_path, 'mean_img_size': 32,
'img_size': 32, 'num_classes': 10,
'use_jpeg': 1, 'color': "color"}
define_py_data_sources2(train_list=data_dir+"train.list",
test_list=data_dir+'test.list',
module='image_provider',
obj='processData',
args=args)
settings(
batch_size = 128,
learning_rate = 0.1 / 128.0,
learning_method = MomentumOptimizer(0.9),
regularization = L2Regularization(0.0005 * 128))
img = data_layer(name='image', size=3*32*32)
lbl = data_layer(name="label", size=10)
# small_vgg is predined in trainer_config_helpers.network
predict = small_vgg(input_image=img, num_channels=3)
outputs(classification_cost(input=predict, label=lbl))
```
在第一行中我们载入用于定义网络的函数。
```python
from paddle.trainer_config_helpers import *
```
之后定义的`define_py_data_sources2`使用Python数据提供器其中 `args`将在`image_provider.py`进行使用该文件负责产生图片数据并传递给Paddle系统
- `meta`: 训练集平均值。
- `mean_img_size`: 平均特征图的高度及宽度。
- `img_size`:输入图片的高度及宽度。
- `num_classes`:类别个数。
- `use_jpeg`:处理过程中数据存储格式。
- `color`:标示是否为彩色图片。
`settings`用于设置训练算法。在下面的例子中learning rate被设置为0.1除以batch size而weight decay则为0.0005乘以batch size。
```python
settings(
batch_size = 128,
learning_rate = 0.1 / 128.0,
learning_method = MomentumOptimizer(0.9),
regularization = L2Regularization(0.0005 * 128)
)
```
`small_vgg`定义了网络结构。这里我们使用的是一个小的VGG网络。关于VGG卷积神经网络的描述可以参考[http://www.robots.ox.ac.uk/~vgg/research/very_deep/](http://www.robots.ox.ac.uk/~vgg/research/very_deep/)。
```python
# small_vgg is predined in trainer_config_helpers.network
predict = small_vgg(input_image=img, num_channels=3)
```
配置创建完毕后可以运行脚本train.sh来训练模型。
```bash
config=vgg_16_cifar.py
output=./cifar_vgg_model
log=train.log
paddle train \
--config=$config \
--dot_period=10 \
--log_period=100 \
--test_all_data_in_one_period=1 \
--use_gpu=1 \
--save_dir=$output \
2>&1 | tee $log
python -m paddle.utils.plotcurve -i $log > plot.png
```
- 这里我们使用的是GPU模式进行训练。如果你没有GPU环境可以设置`use_gpu=0`。
- `./demo/image_classification/vgg_16_cifar.py`是网络和数据配置文件。各项参数的详细说明可以在命令行参数相关文档中找到。
- 脚本`plotcurve.py`依赖于python的`matplotlib`模块。因此如果这个脚本运行失败,也许是因为需要安装`matplotlib`。
在训练完成后,训练及测试误差曲线图会被`plotcurve.py`脚本保存在 `plot.png`中。下面是一个误差曲线图的示例:
<center>![Training and testing curves.](./plot.png)</center>
## 预测
在训练完成后,模型及参数会被保存在路径`./cifar_vgg_model/pass-%05d`下。例如第300个pass的模型会被保存在`./cifar_vgg_model/pass-00299`。
要对一个图片的进行分类预测,我们可以使用`predict.sh`,该脚本将输出预测分类的标签:
```
sh predict.sh
```
predict.sh:
```
model=cifar_vgg_model/pass-00299/
image=data/cifar-out/test/airplane/seaplane_s_000978.png
use_gpu=1
python prediction.py $model $image $use_gpu
```
## 练习
在CUB-200数据集上使用VGG模型训练一个鸟类图片分类模型。相关的鸟类数据集可以从如下地址下载其中包含了200种鸟类的照片主要来自北美洲
<http://www.vision.caltech.edu/visipedia/CUB-200.html>
## 细节探究
### 卷积神经网络
卷积神经网络是一种使用卷积层的前向神经网络,很适合构建用于理解图片内容的模型。一个典型的神经网络如下图所示:
![Convolutional Neural Network](./lenet.png)
一个卷积神经网络包含如下层:
- 卷积层:通过卷积操作从图片或特征图中提取特征
- 池化层使用max-pooling对特征图下采样
- 全连接层:使输入层到隐藏层的神经元是全部连接的。
卷积神经网络在图片分类上有着惊人的性能,这是因为它发掘出了图片的两类重要信息:局部关联性质和空间不变性质。通过交替使用卷积和池化处理, 卷积神经网络能够很好的表示这两类信息。
关于如何定义网络中的层以及如何在层之间进行连接请参考Layer文档。

@ -147,7 +147,7 @@ for classification. A description of VGG network can be found here [http://www.r
# small_vgg is predined in trainer_config_helpers.network
predict = small_vgg(input_image=img, num_channels=3)
```
After writing the config, we can train the model by running the script train.sh. Notice that the following script assumes the you run the script in the `./demo/image_classification` folder. If you run the script in a different folder, you need to change the paths of the scripts and the configuration files accordingly.
After writing the config, we can train the model by running the script train.sh.
```bash
config=vgg_16_cifar.py

@ -68,6 +68,14 @@ void GradientMachine::start() { m->machine->start(); }
void GradientMachine::finish() { m->machine->finish(); }
void GradientMachine::onPassEnd() { m->machine->onPassEnd(); }
void GradientMachine::prefetch(const Arguments& inArgs) {
auto& in =
m->cast<std::vector<paddle::Argument>>(inArgs.getInternalArgumentsPtr());
m->machine->prefetch(in);
}
void GradientMachine::forward(const Arguments& inArgs,
Arguments* outArgs,
PassType passType) {

@ -20,15 +20,11 @@ limitations under the License. */
#include <string>
#include <vector>
#include "paddle/utils/GlobalConstants.h"
#include "paddle/utils/TypeDefs.h"
#include "paddle/utils/common.h"
/// Import PaddlePaddle's enumeration into global namespace.
using namespace paddle::enumeration_wrapper; // NOLINT
#define DISABLE_COPY_AND_ASSIGN(classname) \
classname(const classname& other); \
classname& operator=(const classname& other)
/**
* @brief Initialize paddle.
*
@ -102,7 +98,7 @@ const size_t NO_SPARSE_ID = -1UL;
struct MatrixPrivate;
class Matrix {
Matrix(); // User Cannot Create Matrix.
DISABLE_COPY_AND_ASSIGN(Matrix);
DISABLE_COPY(Matrix);
static Matrix* createByPaddleMatrixPtr(void* sharedPtr);
public:
@ -242,7 +238,7 @@ private:
struct VectorPrivate;
class Vector {
DISABLE_COPY_AND_ASSIGN(Vector);
DISABLE_COPY(Vector);
Vector();
static Vector* createByPaddleVectorPtr(void* ptr);
@ -322,7 +318,7 @@ private:
struct IVectorPrivate;
class IVector {
IVector();
DISABLE_COPY_AND_ASSIGN(IVector);
DISABLE_COPY(IVector);
static IVector* createByPaddleVectorPtr(void* ptr);
public:
@ -402,7 +398,7 @@ struct ArgumentsPrivate;
class Arguments {
private:
Arguments(); // Internal Create.
DISABLE_COPY_AND_ASSIGN(Arguments);
DISABLE_COPY(Arguments);
public:
/**
@ -472,7 +468,7 @@ enum GradientMatchineCreateMode {
struct ParameterConfigPrivate;
class ParameterConfig {
DISABLE_COPY_AND_ASSIGN(ParameterConfig);
DISABLE_COPY(ParameterConfig);
ParameterConfig();
/**
@ -502,7 +498,7 @@ private:
struct OptimizationConfigPrivate;
class OptimizationConfig {
DISABLE_COPY_AND_ASSIGN(OptimizationConfig);
DISABLE_COPY(OptimizationConfig);
OptimizationConfig();
public:
@ -527,7 +523,7 @@ struct ParameterPrivate;
class Parameter {
private:
Parameter();
DISABLE_COPY_AND_ASSIGN(Parameter);
DISABLE_COPY(Parameter);
public:
virtual ~Parameter();
@ -572,7 +568,7 @@ struct ModelConfigPrivate;
class ModelConfig {
private:
ModelConfig();
DISABLE_COPY_AND_ASSIGN(ModelConfig);
DISABLE_COPY(ModelConfig);
public:
virtual ~ModelConfig();
@ -593,7 +589,7 @@ struct TrainerConfigPrivate;
class TrainerConfig {
private:
TrainerConfig();
DISABLE_COPY_AND_ASSIGN(TrainerConfig);
DISABLE_COPY(TrainerConfig);
public:
virtual ~TrainerConfig();
@ -633,7 +629,7 @@ public:
struct ParameterTraverseCallbackPrivate;
class ParameterTraverseCallback {
DISABLE_COPY_AND_ASSIGN(ParameterTraverseCallback);
DISABLE_COPY(ParameterTraverseCallback);
ParameterTraverseCallback();
public:
@ -655,7 +651,7 @@ private:
*/
struct ParameterOptimizerPrivate;
class ParameterOptimizer {
DISABLE_COPY_AND_ASSIGN(ParameterOptimizer);
DISABLE_COPY(ParameterOptimizer);
ParameterOptimizer();
public:
@ -692,7 +688,7 @@ struct GradientMachinePrivate;
class GradientMachine {
private:
GradientMachine();
DISABLE_COPY_AND_ASSIGN(GradientMachine);
DISABLE_COPY(GradientMachine);
public:
virtual ~GradientMachine();
@ -725,6 +721,16 @@ public:
void start();
/**
* Prefetch row ids of sparse parameter.
*/
void prefetch(const Arguments& inArgs);
/**
* Do some thing when train pass ended.
*/
void onPassEnd();
/**
* The forward stage of GradientMachine.
*
@ -866,7 +872,7 @@ struct EvaluatorPrivate;
class Evaluator {
private:
Evaluator();
DISABLE_COPY_AND_ASSIGN(Evaluator);
DISABLE_COPY(Evaluator);
public:
~Evaluator();
@ -900,7 +906,7 @@ private:
TrainerPrivate* m;
Trainer();
Trainer(TrainerConfig* optConfig, GradientMachine* gm);
DISABLE_COPY_AND_ASSIGN(Trainer);
DISABLE_COPY(Trainer);
public:
virtual ~Trainer();
@ -966,7 +972,7 @@ public:
struct SequenceGeneratorPrivate;
class SequenceGenerator {
DISABLE_COPY_AND_ASSIGN(SequenceGenerator);
DISABLE_COPY(SequenceGenerator);
SequenceGenerator();
public:

@ -141,9 +141,12 @@ try:
def c_flag(self):
if self.with_coverage:
return ["-fprofile-arcs", "-ftest-coverage", "-O0", "-g"]
return [
"-fprofile-arcs", "-ftest-coverage", "-O0", "-g",
"-std=c++11"
]
else:
return None
return ["-std=c++11"]
except ImportError:
class PaddleLDFlag(object):

@ -16,7 +16,31 @@ limitations under the License. */
#define HL_BASE_H_
#include <cstddef>
#include "paddle/utils/TypeDefs.h"
#ifdef PADDLE_TYPE_DOUBLE
#define HL_FLOAT_MAX 3.40282347e+38F
#define HL_FLOAT_MIN 1.17549435e-38F
using real = double;
#else
#define HL_FLOAT_MAX 1.7976931348623157e+308
#define HL_FLOAT_MIN 2.2250738585072014e-308
using real = float;
#endif
/**
* The maximum input value for exp, used to avoid overflow problem.
* currently only used for tanh function.
*/
#define EXP_MAX_INPUT 40.0
/**
* @brief DIVUP(x, y) is similar to ceil(x / y).
* @note For CUDA, DIVUP will be used to specify
* the size of blockDim.
*/
#ifndef DIVUP
#define DIVUP(x, y) (((x) + (y)-1) / (y))
#endif
/**
* HPPL is an internal high performance parallel computing library
@ -181,46 +205,6 @@ typedef struct {
size_t nnz;
} _hl_sparse_matrix_s, *hl_sparse_matrix_s;
#ifndef PADDLE_TYPE_DOUBLE
/**
* HPPL data type: real (float or double)
*
* if real == float
*
* HL_FLOAT_MAX: 3.40282347e+38F
*
* HL_FLOAT_MIN: 1.17549435e-38F
*/
#define HL_FLOAT_MAX 3.40282347e+38F
/**
* if real == double
*
* HL_FLOAT_MAX: 1.7976931348623157e+308
*
* HL_FLOAT_MIN: 2.2250738585072014e-308
*/
#define HL_FLOAT_MIN 1.17549435e-38F
#else
#define HL_FLOAT_MAX 1.7976931348623157e+308
#define HL_FLOAT_MIN 2.2250738585072014e-308
#endif
/**
* The maximum input value for exp, used to avoid overflow problem.
*
* Currently only used for tanh function.
*/
#define EXP_MAX_INPUT 40.0
/**
* @brief DIVUP(x, y) is similar to ceil(x / y).
* @note For CUDA, DIVUP will be used to specify
* the size of blockDim.
*/
#ifndef DIVUP
#define DIVUP(x, y) (((x) + (y)-1) / (y))
#endif
#ifdef __NVCC__
#include "cuda_runtime.h"

@ -14,11 +14,11 @@ limitations under the License. */
#include "hl_cuda_cudnn.h"
#include <cudnn.h>
#include <gflags/gflags.h>
#include <mutex>
#include "hl_cuda_cudnn.ph"
#include "hl_dso_loader.h"
#include "hl_thread.ph"
#include "paddle/utils/CommandLineParser.h"
#include "paddle/utils/Logging.h"
DEFINE_int32(cudnn_conv_workspace_limit_in_mb,

@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "hl_dso_loader.h"
#include "paddle/utils/CommandLineParser.h"
#include <gflags/gflags.h>
#include "paddle/utils/Logging.h"
DEFINE_string(cudnn_dir,

@ -34,8 +34,8 @@ limitations under the License. */
#include "paddle/utils/Logging.h"
#include "paddle/utils/Queue.h"
#include "paddle/utils/ThreadLocal.h"
#include "paddle/utils/TypeDefs.h"
#include "paddle/utils/Util.h"
#include "paddle/utils/common.h"
namespace paddle {
/**

@ -58,6 +58,8 @@ protected:
/// to batch, channels* imagePixels.
void shrinkMat(const MatrixPtr& in, MatrixPtr& out);
void onPassEnd() { firstTest_ = true; }
MatrixPtr tmpMat_, tmpGrad_;
MatrixPtr expandedIn_, expandedOut_;
MatrixPtr expandedInGrad_, expandedOutGrad_, inGrad_;

@ -16,7 +16,7 @@ limitations under the License. */
#include "ModelConfig.pb.h"
#include "hl_gpu.h"
#include "paddle/utils/TypeDefs.h"
#include "paddle/utils/common.h"
namespace paddle {

@ -16,7 +16,7 @@ limitations under the License. */
#include "ModelConfig.pb.h"
#include "hl_gpu.h"
#include "paddle/utils/TypeDefs.h"
#include "paddle/utils/common.h"
namespace paddle {

@ -16,7 +16,7 @@ limitations under the License. */
#include <memory>
#include <random>
#include "paddle/utils/TypeDefs.h"
#include "paddle/utils/common.h"
namespace paddle {

@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gflags/gflags.h>
#include "Layer.h"
#include "SequenceToBatch.h"
#include "paddle/utils/CommandLineParser.h"
#include "paddle/utils/Stat.h"
DEFINE_bool(rnn_use_batch, false, "Using the batch method for calculation.");

@ -13,9 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "TestUtil.h"
#include <gflags/gflags.h>
#include "paddle/math/SparseMatrix.h"
#include "paddle/utils/CommandLineParser.h"
DEFINE_int32(fixed_seq_length, 0, "Produce some sequence of fixed length");

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save