diff --git a/.travis.yml b/.travis.yml index 6c69b1121e..7de4ec7fc5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,10 +8,13 @@ os: env: - JOB=DOCS - JOB=BUILD_AND_TEST + - JOB=PRE_COMMIT matrix: exclude: - os: osx - env: JOB=DOCS # Only generate documentation in linux + env: JOB=DOCS # Only generate documentation in linux. + - os: osx + env: JOB=PRE_COMMIT # Only check pre-commit hook in linux addons: apt: @@ -39,6 +42,7 @@ addons: - lcov - graphviz - swig + - clang-format-3.8 before_install: - | if [ ${JOB} == "BUILD_AND_TEST" ]; then @@ -53,7 +57,8 @@ before_install: fi - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sudo paddle/scripts/travis/before_install.linux.sh; fi - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then paddle/scripts/travis/before_install.osx.sh; fi - - pip install wheel protobuf sphinx recommonmark virtualenv numpy sphinx_rtd_theme + - if [[ "$JOB" == "PRE_COMMIT" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi + - pip install wheel protobuf sphinx recommonmark virtualenv numpy sphinx_rtd_theme pre-commit script: - paddle/scripts/travis/main.sh notifications: diff --git a/WORKSPACE b/WORKSPACE index d6ae2af8eb..0b8299905a 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -1,17 +1,15 @@ # External dependency to Google protobuf. http_archive( - name = "protobuf", - url = "http://github.com/google/protobuf/archive/v3.1.0.tar.gz", - sha256 = "0a0ae63cbffc274efb573bdde9a253e3f32e458c41261df51c5dbc5ad541e8f7", - strip_prefix = "protobuf-3.1.0", -) + name="protobuf", + url="http://github.com/google/protobuf/archive/v3.1.0.tar.gz", + sha256="0a0ae63cbffc274efb573bdde9a253e3f32e458c41261df51c5dbc5ad541e8f7", + strip_prefix="protobuf-3.1.0", ) # External dependency to gtest 1.7.0. This method comes from # https://www.bazel.io/versions/master/docs/tutorial/cpp.html. new_http_archive( - name = "gtest", - url = "https://github.com/google/googletest/archive/release-1.7.0.zip", - sha256 = "b58cb7547a28b2c718d1e38aee18a3659c9e3ff52440297e965f5edffe34b6d0", - build_file = "third_party/gtest.BUILD", - strip_prefix = "googletest-release-1.7.0", -) + name="gtest", + url="https://github.com/google/googletest/archive/release-1.7.0.zip", + sha256="b58cb7547a28b2c718d1e38aee18a3659c9e3ff52440297e965f5edffe34b6d0", + build_file="third_party/gtest.BUILD", + strip_prefix="googletest-release-1.7.0", ) diff --git a/benchmark/tensorflow/rnn/run_multi.sh b/benchmark/tensorflow/rnn/run_multi.sh index f7f52e01e3..c2d7dd597e 100755 --- a/benchmark/tensorflow/rnn/run_multi.sh +++ b/benchmark/tensorflow/rnn/run_multi.sh @@ -25,4 +25,3 @@ test 4 2 256 512 test 4 2 512 128 test 4 2 512 256 test 4 2 512 512 - diff --git a/demo/gan/README.md b/demo/gan/README.md index fdc970a07b..1908b534b0 100644 --- a/demo/gan/README.md +++ b/demo/gan/README.md @@ -10,4 +10,4 @@ Then you can run the command below. The flag -d specifies the training data (cif $python gan_trainer.py -d cifar --use_gpu 1 The generated images will be stored in ./cifar_samples/ -The corresponding models will be stored in ./cifar_params/ \ No newline at end of file +The corresponding models will be stored in ./cifar_params/ diff --git a/demo/gan/data/download_cifar.sh b/demo/gan/data/download_cifar.sh index 32e73b3d8e..ae24ef2b7f 100755 --- a/demo/gan/data/download_cifar.sh +++ b/demo/gan/data/download_cifar.sh @@ -15,4 +15,3 @@ set -e wget https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz tar zxf cifar-10-python.tar.gz rm cifar-10-python.tar.gz - diff --git a/demo/gan/data/get_mnist_data.sh b/demo/gan/data/get_mnist_data.sh index d21bf70671..a77c81bf5a 100644 --- a/demo/gan/data/get_mnist_data.sh +++ b/demo/gan/data/get_mnist_data.sh @@ -15,5 +15,3 @@ do gunzip ${fname}.gz fi done - - diff --git a/demo/gan/gan_conf.py b/demo/gan/gan_conf.py index 58ba9dde58..86ac2dffe5 100644 --- a/demo/gan/gan_conf.py +++ b/demo/gan/gan_conf.py @@ -14,10 +14,9 @@ from paddle.trainer_config_helpers import * mode = get_config_arg("mode", str, "generator") -assert mode in set(["generator", - "discriminator", - "generator_training", - "discriminator_training"]) +assert mode in set([ + "generator", "discriminator", "generator_training", "discriminator_training" +]) is_generator_training = mode == "generator_training" is_discriminator_training = mode == "discriminator_training" @@ -38,8 +37,8 @@ sample_dim = 2 settings( batch_size=128, learning_rate=1e-4, - learning_method=AdamOptimizer(beta1=0.5) -) + learning_method=AdamOptimizer(beta1=0.5)) + def discriminator(sample): """ @@ -50,70 +49,87 @@ def discriminator(sample): of the sample is from real data. """ param_attr = ParamAttr(is_static=is_generator_training) - bias_attr = ParamAttr(is_static=is_generator_training, - initial_mean=1.0, - initial_std=0) - - hidden = fc_layer(input=sample, name="dis_hidden", size=hidden_dim, - bias_attr=bias_attr, - param_attr=param_attr, - act=ReluActivation()) - - hidden2 = fc_layer(input=hidden, name="dis_hidden2", size=hidden_dim, - bias_attr=bias_attr, - param_attr=param_attr, - act=LinearActivation()) - - hidden_bn = batch_norm_layer(hidden2, - act=ReluActivation(), - name="dis_hidden_bn", - bias_attr=bias_attr, - param_attr=ParamAttr(is_static=is_generator_training, - initial_mean=1.0, - initial_std=0.02), - use_global_stats=False) - - return fc_layer(input=hidden_bn, name="dis_prob", size=2, - bias_attr=bias_attr, - param_attr=param_attr, - act=SoftmaxActivation()) + bias_attr = ParamAttr( + is_static=is_generator_training, initial_mean=1.0, initial_std=0) + + hidden = fc_layer( + input=sample, + name="dis_hidden", + size=hidden_dim, + bias_attr=bias_attr, + param_attr=param_attr, + act=ReluActivation()) + + hidden2 = fc_layer( + input=hidden, + name="dis_hidden2", + size=hidden_dim, + bias_attr=bias_attr, + param_attr=param_attr, + act=LinearActivation()) + + hidden_bn = batch_norm_layer( + hidden2, + act=ReluActivation(), + name="dis_hidden_bn", + bias_attr=bias_attr, + param_attr=ParamAttr( + is_static=is_generator_training, initial_mean=1.0, + initial_std=0.02), + use_global_stats=False) + + return fc_layer( + input=hidden_bn, + name="dis_prob", + size=2, + bias_attr=bias_attr, + param_attr=param_attr, + act=SoftmaxActivation()) + def generator(noise): """ generator generates a sample given noise """ param_attr = ParamAttr(is_static=is_discriminator_training) - bias_attr = ParamAttr(is_static=is_discriminator_training, - initial_mean=1.0, - initial_std=0) - - hidden = fc_layer(input=noise, - name="gen_layer_hidden", - size=hidden_dim, - bias_attr=bias_attr, - param_attr=param_attr, - act=ReluActivation()) - - hidden2 = fc_layer(input=hidden, name="gen_hidden2", size=hidden_dim, - bias_attr=bias_attr, - param_attr=param_attr, - act=LinearActivation()) - - hidden_bn = batch_norm_layer(hidden2, - act=ReluActivation(), - name="gen_layer_hidden_bn", - bias_attr=bias_attr, - param_attr=ParamAttr(is_static=is_discriminator_training, - initial_mean=1.0, - initial_std=0.02), - use_global_stats=False) - - return fc_layer(input=hidden_bn, - name="gen_layer1", - size=sample_dim, - bias_attr=bias_attr, - param_attr=param_attr, - act=LinearActivation()) + bias_attr = ParamAttr( + is_static=is_discriminator_training, initial_mean=1.0, initial_std=0) + + hidden = fc_layer( + input=noise, + name="gen_layer_hidden", + size=hidden_dim, + bias_attr=bias_attr, + param_attr=param_attr, + act=ReluActivation()) + + hidden2 = fc_layer( + input=hidden, + name="gen_hidden2", + size=hidden_dim, + bias_attr=bias_attr, + param_attr=param_attr, + act=LinearActivation()) + + hidden_bn = batch_norm_layer( + hidden2, + act=ReluActivation(), + name="gen_layer_hidden_bn", + bias_attr=bias_attr, + param_attr=ParamAttr( + is_static=is_discriminator_training, + initial_mean=1.0, + initial_std=0.02), + use_global_stats=False) + + return fc_layer( + input=hidden_bn, + name="gen_layer1", + size=sample_dim, + bias_attr=bias_attr, + param_attr=param_attr, + act=LinearActivation()) + if is_generator_training: noise = data_layer(name="noise", size=noise_dim) @@ -126,7 +142,8 @@ if is_generator_training or is_discriminator_training: label = data_layer(name="label", size=1) prob = discriminator(sample) cost = cross_entropy(input=prob, label=label) - classification_error_evaluator(input=prob, label=label, name=mode+'_error') + classification_error_evaluator( + input=prob, label=label, name=mode + '_error') outputs(cost) if is_generator: diff --git a/demo/gan/gan_conf_image.py b/demo/gan/gan_conf_image.py index 5c2b140537..f89a4e706c 100644 --- a/demo/gan/gan_conf_image.py +++ b/demo/gan/gan_conf_image.py @@ -15,10 +15,9 @@ from paddle.trainer_config_helpers import * mode = get_config_arg("mode", str, "generator") dataSource = get_config_arg("data", str, "mnist") -assert mode in set(["generator", - "discriminator", - "generator_training", - "discriminator_training"]) +assert mode in set([ + "generator", "discriminator", "generator_training", "discriminator_training" +]) is_generator_training = mode == "generator_training" is_discriminator_training = mode == "discriminator_training" @@ -36,24 +35,33 @@ noise_dim = 100 gf_dim = 64 df_dim = 64 if dataSource == "mnist": - sample_dim = 28 # image dim - c_dim = 1 # image color + sample_dim = 28 # image dim + c_dim = 1 # image color else: sample_dim = 32 c_dim = 3 -s2, s4 = int(sample_dim/2), int(sample_dim/4), -s8, s16 = int(sample_dim/8), int(sample_dim/16) +s2, s4 = int(sample_dim / 2), int(sample_dim / 4), +s8, s16 = int(sample_dim / 8), int(sample_dim / 16) settings( batch_size=128, learning_rate=2e-4, - learning_method=AdamOptimizer(beta1=0.5) -) + learning_method=AdamOptimizer(beta1=0.5)) -def conv_bn(input, channels, imgSize, num_filters, output_x, stride, name, - param_attr, bias_attr, param_attr_bn, bn, trans=False, - act=ReluActivation()): - + +def conv_bn(input, + channels, + imgSize, + num_filters, + output_x, + stride, + name, + param_attr, + bias_attr, + param_attr_bn, + bn, + trans=False, + act=ReluActivation()): """ conv_bn is a utility function that constructs a convolution/deconv layer with an optional batch_norm layer @@ -63,10 +71,10 @@ def conv_bn(input, channels, imgSize, num_filters, output_x, stride, name, :param trans: whether to use conv (False) or deconv (True) :type trans: bool """ - + # calculate the filter_size and padding size based on the given # imgSize and ouput size - tmp = imgSize - (output_x - 1) * stride + tmp = imgSize - (output_x - 1) * stride if tmp <= 1 or tmp > 5: raise ValueError("conv input-output dimension does not fit") elif tmp <= 3: @@ -76,111 +84,134 @@ def conv_bn(input, channels, imgSize, num_filters, output_x, stride, name, filter_size = tmp padding = 0 - print (imgSize, output_x, stride, filter_size, padding) - + print(imgSize, output_x, stride, filter_size, padding) + if trans: nameApx = "_conv" else: nameApx = "_convt" - + if bn: - conv = img_conv_layer(input, filter_size=filter_size, - num_filters=num_filters, - name=name + nameApx, num_channels=channels, - act=LinearActivation(), groups=1, stride=stride, - padding=padding, bias_attr=bias_attr, - param_attr=param_attr, shared_biases=True, layer_attr=None, - filter_size_y=None, stride_y=None, padding_y=None, - trans=trans) - - conv_bn = batch_norm_layer(conv, - act=act, - name=name + nameApx + "_bn", - bias_attr=bias_attr, - param_attr=param_attr_bn, - use_global_stats=False) - + conv = img_conv_layer( + input, + filter_size=filter_size, + num_filters=num_filters, + name=name + nameApx, + num_channels=channels, + act=LinearActivation(), + groups=1, + stride=stride, + padding=padding, + bias_attr=bias_attr, + param_attr=param_attr, + shared_biases=True, + layer_attr=None, + filter_size_y=None, + stride_y=None, + padding_y=None, + trans=trans) + + conv_bn = batch_norm_layer( + conv, + act=act, + name=name + nameApx + "_bn", + bias_attr=bias_attr, + param_attr=param_attr_bn, + use_global_stats=False) + return conv_bn else: - conv = img_conv_layer(input, filter_size=filter_size, - num_filters=num_filters, - name=name + nameApx, num_channels=channels, - act=act, groups=1, stride=stride, - padding=padding, bias_attr=bias_attr, - param_attr=param_attr, shared_biases=True, layer_attr=None, - filter_size_y=None, stride_y=None, padding_y=None, - trans=trans) + conv = img_conv_layer( + input, + filter_size=filter_size, + num_filters=num_filters, + name=name + nameApx, + num_channels=channels, + act=act, + groups=1, + stride=stride, + padding=padding, + bias_attr=bias_attr, + param_attr=param_attr, + shared_biases=True, + layer_attr=None, + filter_size_y=None, + stride_y=None, + padding_y=None, + trans=trans) return conv - + + def generator(noise): """ generator generates a sample given noise """ - param_attr = ParamAttr(is_static=is_discriminator_training, - initial_mean=0.0, - initial_std=0.02) - bias_attr = ParamAttr(is_static=is_discriminator_training, - initial_mean=0.0, - initial_std=0.0) - - param_attr_bn=ParamAttr(is_static=is_discriminator_training, - initial_mean=1.0, - initial_std=0.02) - - h1 = fc_layer(input=noise, - name="gen_layer_h1", - size=s8 * s8 * gf_dim * 4, - bias_attr=bias_attr, - param_attr=param_attr, - act=LinearActivation()) - - h1_bn = batch_norm_layer(h1, - act=ReluActivation(), - name="gen_layer_h1_bn", - bias_attr=bias_attr, - param_attr=param_attr_bn, - use_global_stats=False) - - h2_bn = conv_bn(h1_bn, - channels=gf_dim*4, - output_x=s8, - num_filters=gf_dim*2, - imgSize=s4, - stride=2, - name="gen_layer_h2", - param_attr=param_attr, - bias_attr=bias_attr, - param_attr_bn=param_attr_bn, - bn=True, - trans=True) - - h3_bn = conv_bn(h2_bn, - channels=gf_dim*2, - output_x=s4, - num_filters=gf_dim, - imgSize=s2, - stride=2, - name="gen_layer_h3", - param_attr=param_attr, - bias_attr=bias_attr, - param_attr_bn=param_attr_bn, - bn=True, - trans=True) - - - return conv_bn(h3_bn, - channels=gf_dim, - output_x=s2, - num_filters=c_dim, - imgSize=sample_dim, - stride=2, - name="gen_layer_h4", - param_attr=param_attr, - bias_attr=bias_attr, - param_attr_bn=param_attr_bn, - bn=False, - trans=True, - act=TanhActivation()) + param_attr = ParamAttr( + is_static=is_discriminator_training, initial_mean=0.0, initial_std=0.02) + bias_attr = ParamAttr( + is_static=is_discriminator_training, initial_mean=0.0, initial_std=0.0) + + param_attr_bn = ParamAttr( + is_static=is_discriminator_training, initial_mean=1.0, initial_std=0.02) + + h1 = fc_layer( + input=noise, + name="gen_layer_h1", + size=s8 * s8 * gf_dim * 4, + bias_attr=bias_attr, + param_attr=param_attr, + act=LinearActivation()) + + h1_bn = batch_norm_layer( + h1, + act=ReluActivation(), + name="gen_layer_h1_bn", + bias_attr=bias_attr, + param_attr=param_attr_bn, + use_global_stats=False) + + h2_bn = conv_bn( + h1_bn, + channels=gf_dim * 4, + output_x=s8, + num_filters=gf_dim * 2, + imgSize=s4, + stride=2, + name="gen_layer_h2", + param_attr=param_attr, + bias_attr=bias_attr, + param_attr_bn=param_attr_bn, + bn=True, + trans=True) + + h3_bn = conv_bn( + h2_bn, + channels=gf_dim * 2, + output_x=s4, + num_filters=gf_dim, + imgSize=s2, + stride=2, + name="gen_layer_h3", + param_attr=param_attr, + bias_attr=bias_attr, + param_attr_bn=param_attr_bn, + bn=True, + trans=True) + + return conv_bn( + h3_bn, + channels=gf_dim, + output_x=s2, + num_filters=c_dim, + imgSize=sample_dim, + stride=2, + name="gen_layer_h4", + param_attr=param_attr, + bias_attr=bias_attr, + param_attr_bn=param_attr_bn, + bn=False, + trans=True, + act=TanhActivation()) def discriminator(sample): @@ -191,58 +222,60 @@ def discriminator(sample): of the sample is from generator and dimension 1 is the probabblity of the sample is from real data. """ - param_attr = ParamAttr(is_static=is_generator_training, - initial_mean=0.0, - initial_std=0.02) - bias_attr = ParamAttr(is_static=is_generator_training, - initial_mean=0.0, - initial_std=0.0) - - param_attr_bn=ParamAttr(is_static=is_generator_training, - initial_mean=1.0, - initial_std=0.02) - - h0 = conv_bn(sample, - channels=c_dim, - imgSize=sample_dim, - num_filters=df_dim, - output_x=s2, - stride=2, - name="dis_h0", - param_attr=param_attr, - bias_attr=bias_attr, - param_attr_bn=param_attr_bn, - bn=False) - - h1_bn = conv_bn(h0, - channels=df_dim, - imgSize=s2, - num_filters=df_dim*2, - output_x=s4, - stride=2, - name="dis_h1", - param_attr=param_attr, - bias_attr=bias_attr, - param_attr_bn=param_attr_bn, - bn=True) - - h2_bn = conv_bn(h1_bn, - channels=df_dim*2, - imgSize=s4, - num_filters=df_dim*4, - output_x=s8, - stride=2, - name="dis_h2", - param_attr=param_attr, - bias_attr=bias_attr, - param_attr_bn=param_attr_bn, - bn=True) - - return fc_layer(input=h2_bn, name="dis_prob", size=2, - bias_attr=bias_attr, - param_attr=param_attr, - act=SoftmaxActivation()) + param_attr = ParamAttr( + is_static=is_generator_training, initial_mean=0.0, initial_std=0.02) + bias_attr = ParamAttr( + is_static=is_generator_training, initial_mean=0.0, initial_std=0.0) + + param_attr_bn = ParamAttr( + is_static=is_generator_training, initial_mean=1.0, initial_std=0.02) + + h0 = conv_bn( + sample, + channels=c_dim, + imgSize=sample_dim, + num_filters=df_dim, + output_x=s2, + stride=2, + name="dis_h0", + param_attr=param_attr, + bias_attr=bias_attr, + param_attr_bn=param_attr_bn, + bn=False) + + h1_bn = conv_bn( + h0, + channels=df_dim, + imgSize=s2, + num_filters=df_dim * 2, + output_x=s4, + stride=2, + name="dis_h1", + param_attr=param_attr, + bias_attr=bias_attr, + param_attr_bn=param_attr_bn, + bn=True) + + h2_bn = conv_bn( + h1_bn, + channels=df_dim * 2, + imgSize=s4, + num_filters=df_dim * 4, + output_x=s8, + stride=2, + name="dis_h2", + param_attr=param_attr, + bias_attr=bias_attr, + param_attr_bn=param_attr_bn, + bn=True) + return fc_layer( + input=h2_bn, + name="dis_prob", + size=2, + bias_attr=bias_attr, + param_attr=param_attr, + act=SoftmaxActivation()) if is_generator_training: @@ -250,13 +283,14 @@ if is_generator_training: sample = generator(noise) if is_discriminator_training: - sample = data_layer(name="sample", size=sample_dim * sample_dim*c_dim) + sample = data_layer(name="sample", size=sample_dim * sample_dim * c_dim) if is_generator_training or is_discriminator_training: label = data_layer(name="label", size=1) prob = discriminator(sample) cost = cross_entropy(input=prob, label=label) - classification_error_evaluator(input=prob, label=label, name=mode+'_error') + classification_error_evaluator( + input=prob, label=label, name=mode + '_error') outputs(cost) if is_generator: diff --git a/demo/gan/gan_trainer.py b/demo/gan/gan_trainer.py index a8c1bd0414..4a26c230f7 100644 --- a/demo/gan/gan_trainer.py +++ b/demo/gan/gan_trainer.py @@ -16,7 +16,7 @@ import argparse import random import numpy import cPickle -import sys,os +import sys, os from PIL import Image from paddle.trainer.config_parser import parse_config @@ -24,6 +24,7 @@ from paddle.trainer.config_parser import logger import py_paddle.swig_paddle as api import matplotlib.pyplot as plt + def plot2DScatter(data, outputfile): ''' Plot the data as a 2D scatter plot and save to outputfile @@ -41,9 +42,11 @@ def plot2DScatter(data, outputfile): plt.scatter(x, y) plt.savefig(outputfile, bbox_inches='tight') + def CHECK_EQ(a, b): assert a == b, "a=%s, b=%s" % (a, b) + def copy_shared_parameters(src, dst): ''' copy the parameters from src to dst @@ -52,11 +55,9 @@ def copy_shared_parameters(src, dst): :param dst: the destination of the parameters :type dst: GradientMachine ''' - src_params = [src.getParameter(i) - for i in xrange(src.getParameterSize())] + src_params = [src.getParameter(i) for i in xrange(src.getParameterSize())] src_params = dict([(p.getName(), p) for p in src_params]) - for i in xrange(dst.getParameterSize()): dst_param = dst.getParameter(i) src_param = src_params.get(dst_param.getName(), None) @@ -67,15 +68,17 @@ def copy_shared_parameters(src, dst): CHECK_EQ(len(src_value), len(dst_value)) dst_value.copyFrom(src_value) dst_param.setValueUpdated() - + + def print_parameters(src): - src_params = [src.getParameter(i) - for i in xrange(src.getParameterSize())] + src_params = [src.getParameter(i) for i in xrange(src.getParameterSize())] print "***************" for p in src_params: print "Name is %s" % p.getName() - print "value is %s \n" % p.getBuf(api.PARAMETER_VALUE).copyToNumpyArray() + print "value is %s \n" % p.getBuf(api.PARAMETER_VALUE).copyToNumpyArray( + ) + def load_mnist_data(imageFile): f = open(imageFile, "rb") @@ -86,33 +89,36 @@ def load_mnist_data(imageFile): n = 60000 else: n = 10000 - - data = numpy.fromfile(f, 'ubyte', count=n*28*28).reshape((n, 28*28)) + + data = numpy.fromfile(f, 'ubyte', count=n * 28 * 28).reshape((n, 28 * 28)) data = data / 255.0 * 2.0 - 1.0 f.close() return data.astype('float32') + def load_cifar_data(cifar_path): batch_size = 10000 - data = numpy.zeros((5*batch_size, 32*32*3), dtype = "float32") + data = numpy.zeros((5 * batch_size, 32 * 32 * 3), dtype="float32") for i in range(1, 6): file = cifar_path + "/data_batch_" + str(i) fo = open(file, 'rb') dict = cPickle.load(fo) fo.close() - data[(i - 1)*batch_size:(i*batch_size), :] = dict["data"] - + data[(i - 1) * batch_size:(i * batch_size), :] = dict["data"] + data = data / 255.0 * 2.0 - 1.0 return data + # synthesize 2-D uniform data def load_uniform_data(): data = numpy.random.rand(1000000, 2).astype('float32') return data + def merge(images, size): - if images.shape[1] == 28*28: + if images.shape[1] == 28 * 28: h, w, c = 28, 28, 1 else: h, w, c = 32, 32, 3 @@ -124,6 +130,7 @@ def merge(images, size): ((images[idx, :].reshape((h, w, c), order="F").transpose(1, 0, 2) + 1.0) / 2.0 * 255.0) return img.astype('uint8') + def save_images(images, path): merged_img = merge(images, [8, 8]) if merged_img.shape[2] == 1: @@ -131,14 +138,17 @@ def save_images(images, path): else: im = Image.fromarray(merged_img, mode="RGB") im.save(path) - + + def get_real_samples(batch_size, data_np): - return data_np[numpy.random.choice(data_np.shape[0], batch_size, - replace=False),:] - + return data_np[numpy.random.choice( + data_np.shape[0], batch_size, replace=False), :] + + def get_noise(batch_size, noise_dim): return numpy.random.normal(size=(batch_size, noise_dim)).astype('float32') + def get_fake_samples(generator_machine, batch_size, noise): gen_inputs = api.Arguments.createArguments(1) gen_inputs.setSlotValue(0, api.Matrix.createDenseFromNumpy(noise)) @@ -147,12 +157,14 @@ def get_fake_samples(generator_machine, batch_size, noise): fake_samples = gen_outputs.getSlotValue(0).copyToNumpyMat() return fake_samples + def get_training_loss(training_machine, inputs): outputs = api.Arguments.createArguments(0) training_machine.forward(inputs, outputs, api.PASS_TEST) loss = outputs.getSlotValue(0).copyToNumpyMat() return numpy.mean(loss) + def prepare_discriminator_data_batch_pos(batch_size, data_np): real_samples = get_real_samples(batch_size, data_np) labels = numpy.ones(batch_size, dtype='int32') @@ -161,6 +173,7 @@ def prepare_discriminator_data_batch_pos(batch_size, data_np): inputs.setSlotIds(1, api.IVector.createVectorFromNumpy(labels)) return inputs + def prepare_discriminator_data_batch_neg(generator_machine, batch_size, noise): fake_samples = get_fake_samples(generator_machine, batch_size, noise) labels = numpy.zeros(batch_size, dtype='int32') @@ -169,6 +182,7 @@ def prepare_discriminator_data_batch_neg(generator_machine, batch_size, noise): inputs.setSlotIds(1, api.IVector.createVectorFromNumpy(labels)) return inputs + def prepare_generator_data_batch(batch_size, noise): label = numpy.ones(batch_size, dtype='int32') inputs = api.Arguments.createArguments(2) @@ -193,10 +207,9 @@ def get_layer_size(model_conf, layer_name): def main(): parser = argparse.ArgumentParser() parser.add_argument("-d", "--data_source", help="mnist or cifar or uniform") - parser.add_argument("--use_gpu", default="1", - help="1 means use gpu for training") - parser.add_argument("--gpu_id", default="0", - help="the gpu_id parameter") + parser.add_argument( + "--use_gpu", default="1", help="1 means use gpu for training") + parser.add_argument("--gpu_id", default="0", help="the gpu_id parameter") args = parser.parse_args() data_source = args.data_source use_gpu = args.use_gpu @@ -208,30 +221,32 @@ def main(): if not os.path.exists("./%s_params/" % data_source): os.makedirs("./%s_params/" % data_source) - - api.initPaddle('--use_gpu=' + use_gpu, '--dot_period=10', '--log_period=100', - '--gpu_id=' + args.gpu_id, '--save_dir=' + "./%s_params/" % data_source) - + + api.initPaddle('--use_gpu=' + use_gpu, '--dot_period=10', + '--log_period=100', '--gpu_id=' + args.gpu_id, + '--save_dir=' + "./%s_params/" % data_source) + if data_source == "uniform": conf = "gan_conf.py" num_iter = 10000 else: conf = "gan_conf_image.py" num_iter = 1000 - + gen_conf = parse_config(conf, "mode=generator_training,data=" + data_source) - dis_conf = parse_config(conf, "mode=discriminator_training,data=" + data_source) + dis_conf = parse_config(conf, + "mode=discriminator_training,data=" + data_source) generator_conf = parse_config(conf, "mode=generator,data=" + data_source) batch_size = dis_conf.opt_config.batch_size noise_dim = get_layer_size(gen_conf.model_config, "noise") - + if data_source == "mnist": data_np = load_mnist_data("./data/mnist_data/train-images-idx3-ubyte") elif data_source == "cifar": data_np = load_cifar_data("./data/cifar-10-batches-py/") else: data_np = load_uniform_data() - + # this creates a gradient machine for discriminator dis_training_machine = api.GradientMachine.createFromConfigProto( dis_conf.model_config) @@ -244,26 +259,24 @@ def main(): logger.info(str(generator_conf.model_config)) generator_machine = api.GradientMachine.createFromConfigProto( generator_conf.model_config) - - dis_trainer = api.Trainer.create( - dis_conf, dis_training_machine) - gen_trainer = api.Trainer.create( - gen_conf, gen_training_machine) - + dis_trainer = api.Trainer.create(dis_conf, dis_training_machine) + + gen_trainer = api.Trainer.create(gen_conf, gen_training_machine) + dis_trainer.startTrain() gen_trainer.startTrain() - + # Sync parameters between networks (GradientMachine) at the beginning copy_shared_parameters(gen_training_machine, dis_training_machine) copy_shared_parameters(gen_training_machine, generator_machine) - + # constrain that either discriminator or generator can not be trained # consecutively more than MAX_strike times curr_train = "dis" curr_strike = 0 MAX_strike = 5 - + for train_pass in xrange(100): dis_trainer.startTrainPass() gen_trainer.startTrainPass() @@ -272,23 +285,25 @@ def main(): noise = get_noise(batch_size, noise_dim) data_batch_dis_pos = prepare_discriminator_data_batch_pos( batch_size, data_np) - dis_loss_pos = get_training_loss(dis_training_machine, data_batch_dis_pos) - + dis_loss_pos = get_training_loss(dis_training_machine, + data_batch_dis_pos) + data_batch_dis_neg = prepare_discriminator_data_batch_neg( generator_machine, batch_size, noise) - dis_loss_neg = get_training_loss(dis_training_machine, data_batch_dis_neg) - + dis_loss_neg = get_training_loss(dis_training_machine, + data_batch_dis_neg) + dis_loss = (dis_loss_pos + dis_loss_neg) / 2.0 - + # Do forward pass in generator to get the gen_loss - data_batch_gen = prepare_generator_data_batch( - batch_size, noise) + data_batch_gen = prepare_generator_data_batch(batch_size, noise) gen_loss = get_training_loss(gen_training_machine, data_batch_gen) - + if i % 100 == 0: - print "d_pos_loss is %s d_neg_loss is %s" % (dis_loss_pos, dis_loss_neg) + print "d_pos_loss is %s d_neg_loss is %s" % (dis_loss_pos, + dis_loss_neg) print "d_loss is %s g_loss is %s" % (dis_loss, gen_loss) - + # Decide which network to train based on the training history # And the relative size of the loss if (not (curr_train == "dis" and curr_strike == MAX_strike)) and \ @@ -297,11 +312,12 @@ def main(): curr_strike += 1 else: curr_train = "dis" - curr_strike = 1 + curr_strike = 1 dis_trainer.trainOneDataBatch(batch_size, data_batch_dis_neg) - dis_trainer.trainOneDataBatch(batch_size, data_batch_dis_pos) - copy_shared_parameters(dis_training_machine, gen_training_machine) - + dis_trainer.trainOneDataBatch(batch_size, data_batch_dis_pos) + copy_shared_parameters(dis_training_machine, + gen_training_machine) + else: if curr_train == "gen": curr_strike += 1 @@ -311,19 +327,23 @@ def main(): gen_trainer.trainOneDataBatch(batch_size, data_batch_gen) # TODO: add API for paddle to allow true parameter sharing between different GradientMachines # so that we do not need to copy shared parameters. - copy_shared_parameters(gen_training_machine, dis_training_machine) + copy_shared_parameters(gen_training_machine, + dis_training_machine) copy_shared_parameters(gen_training_machine, generator_machine) - + dis_trainer.finishTrainPass() gen_trainer.finishTrainPass() # At the end of each pass, save the generated samples/images fake_samples = get_fake_samples(generator_machine, batch_size, noise) if data_source == "uniform": - plot2DScatter(fake_samples, "./%s_samples/train_pass%s.png" % (data_source, train_pass)) + plot2DScatter(fake_samples, "./%s_samples/train_pass%s.png" % + (data_source, train_pass)) else: - save_images(fake_samples, "./%s_samples/train_pass%s.png" % (data_source, train_pass)) + save_images(fake_samples, "./%s_samples/train_pass%s.png" % + (data_source, train_pass)) dis_trainer.finishTrain() gen_trainer.finishTrain() + if __name__ == '__main__': main() diff --git a/demo/quick_start/trainer_config.resnet-lstm.py b/demo/quick_start/trainer_config.resnet-lstm.py index 5bed925d84..89a837abb7 100644 --- a/demo/quick_start/trainer_config.resnet-lstm.py +++ b/demo/quick_start/trainer_config.resnet-lstm.py @@ -13,7 +13,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """ This configuration is a demonstration of how to implement the stacked LSTM with residual connections, i.e. an LSTM layer takes the sum of the hidden states @@ -46,11 +45,12 @@ is_predict = get_config_arg('is_predict', bool, False) trn = 'data/train.list' if not is_predict else None tst = 'data/test.list' if not is_predict else 'data/pred.list' process = 'process' if not is_predict else 'process_predict' -define_py_data_sources2(train_list=trn, - test_list=tst, - module="dataprovider_emb", - obj=process, - args={"dictionary": word_dict}) +define_py_data_sources2( + train_list=trn, + test_list=tst, + module="dataprovider_emb", + obj=process, + args={"dictionary": word_dict}) batch_size = 128 if not is_predict else 1 settings( @@ -58,10 +58,9 @@ settings( learning_rate=2e-3, learning_method=AdamOptimizer(), regularization=L2Regularization(8e-4), - gradient_clipping_threshold=25 -) + gradient_clipping_threshold=25) -bias_attr = ParamAttr(initial_std=0.,l2_rate=0.) +bias_attr = ParamAttr(initial_std=0., l2_rate=0.) data = data_layer(name="word", size=len(word_dict)) emb = embedding_layer(input=data, size=128) @@ -73,17 +72,15 @@ for i in range(3): # The input to the current layer is the sum of the hidden state # and input of the previous layer. current_input = addto_layer(input=[previous_input, previous_hidden_state]) - hidden_state = simple_lstm(input=current_input, size=128, - lstm_cell_attr=ExtraAttr(drop_rate=0.1)) + hidden_state = simple_lstm( + input=current_input, size=128, lstm_cell_attr=ExtraAttr(drop_rate=0.1)) previous_input, previous_hidden_state = current_input, hidden_state lstm = previous_hidden_state lstm_last = pooling_layer(input=lstm, pooling_type=MaxPooling()) -output = fc_layer(input=lstm_last, size=2, - bias_attr=bias_attr, - act=SoftmaxActivation()) - +output = fc_layer( + input=lstm_last, size=2, bias_attr=bias_attr, act=SoftmaxActivation()) if is_predict: maxid = maxid_layer(output) diff --git a/demo/semantic_role_labeling/data/extract_dict_feature.py b/demo/semantic_role_labeling/data/extract_dict_feature.py index 123df022f5..a02a49a86e 100644 --- a/demo/semantic_role_labeling/data/extract_dict_feature.py +++ b/demo/semantic_role_labeling/data/extract_dict_feature.py @@ -33,7 +33,7 @@ def extract_dict_features(pair_file, feature_file): ctx_n1 = sentence_list[verb_index - 1] else: ctx_n1 = 'bos' - + if verb_index > 1: mark[verb_index - 2] = 1 ctx_n2 = sentence_list[verb_index - 2] @@ -48,7 +48,7 @@ def extract_dict_features(pair_file, feature_file): ctx_p1 = sentence_list[verb_index + 1] else: ctx_p1 = 'eos' - + if verb_index < len(labels_list) - 3: mark[verb_index + 2] = 1 ctx_p2 = sentence_list[verb_index + 2] @@ -69,7 +69,6 @@ def extract_dict_features(pair_file, feature_file): feature_out.write(feature_str + '\n') - if __name__ == '__main__': usage = '-p pair_file -f feature_file' diff --git a/demo/semantic_role_labeling/data/extract_pairs.py b/demo/semantic_role_labeling/data/extract_pairs.py index 2d0d535c53..94a8488c16 100644 --- a/demo/semantic_role_labeling/data/extract_pairs.py +++ b/demo/semantic_role_labeling/data/extract_pairs.py @@ -66,8 +66,8 @@ def transform_labels(sentences, labels): else: verb_list = [] for x in labels[i][0]: - if x !='-': - verb_list.append(x) + if x != '-': + verb_list.append(x) for j in xrange(1, len(labels[i])): label_list = labels[i][j] @@ -93,7 +93,7 @@ def transform_labels(sentences, labels): is_in_bracket = True else: print 'error:', ll - sen_lab_pair.append((sentences[i], verb_list[j-1], label_seq)) + sen_lab_pair.append((sentences[i], verb_list[j - 1], label_seq)) return sen_lab_pair @@ -103,7 +103,7 @@ def write_file(sen_lab_pair, output_file): sentence = x[0] label_seq = ' '.join(x[2]) assert len(sentence.split()) == len(x[2]) - fout.write(sentence + '\t' + x[1]+'\t' +label_seq + '\n') + fout.write(sentence + '\t' + x[1] + '\t' + label_seq + '\n') if __name__ == '__main__': diff --git a/demo/semantic_role_labeling/dataprovider.py b/demo/semantic_role_labeling/dataprovider.py index d12f10bfcb..042cd4e7a9 100644 --- a/demo/semantic_role_labeling/dataprovider.py +++ b/demo/semantic_role_labeling/dataprovider.py @@ -21,7 +21,7 @@ def hook(settings, word_dict, label_dict, predicate_dict, **kwargs): settings.word_dict = word_dict settings.label_dict = label_dict settings.predicate_dict = predicate_dict - + #all inputs are integral and sequential type settings.slots = [ integer_value_sequence(len(word_dict)), @@ -29,25 +29,28 @@ def hook(settings, word_dict, label_dict, predicate_dict, **kwargs): integer_value_sequence(len(word_dict)), integer_value_sequence(len(word_dict)), integer_value_sequence(len(word_dict)), - integer_value_sequence(len(word_dict)), - integer_value_sequence(len(predicate_dict)), - integer_value_sequence(2), + integer_value_sequence(len(word_dict)), + integer_value_sequence(len(predicate_dict)), integer_value_sequence(2), integer_value_sequence(len(label_dict)) ] def get_batch_size(yeild_data): return len(yeild_data[0]) - -@provider(init_hook=hook, should_shuffle=True, calc_batch_size=get_batch_size, - can_over_batch_size=False, cache=CacheType.CACHE_PASS_IN_MEM) + +@provider( + init_hook=hook, + should_shuffle=True, + calc_batch_size=get_batch_size, + can_over_batch_size=False, + cache=CacheType.CACHE_PASS_IN_MEM) def process(settings, file_name): with open(file_name, 'r') as fdata: for line in fdata: sentence, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, label = \ line.strip().split('\t') - + words = sentence.split() sen_len = len(words) word_slot = [settings.word_dict.get(w, UNK_IDX) for w in words] diff --git a/demo/semantic_role_labeling/db_lstm.py b/demo/semantic_role_labeling/db_lstm.py index 75946bd72e..04e2a559b1 100644 --- a/demo/semantic_role_labeling/db_lstm.py +++ b/demo/semantic_role_labeling/db_lstm.py @@ -20,7 +20,7 @@ from paddle.trainer_config_helpers import * #file paths word_dict_file = './data/wordDict.txt' label_dict_file = './data/targetDict.txt' -predicate_file= './data/verbDict.txt' +predicate_file = './data/verbDict.txt' train_list_file = './data/train.list' test_list_file = './data/test.list' @@ -47,7 +47,6 @@ if not is_predict: w = line.strip() predicate_dict[w] = i - if is_test: train_list_file = None @@ -57,9 +56,11 @@ if not is_predict: test_list=test_list_file, module='dataprovider', obj='process', - args={'word_dict': word_dict, - 'label_dict': label_dict, - 'predicate_dict': predicate_dict }) + args={ + 'word_dict': word_dict, + 'label_dict': label_dict, + 'predicate_dict': predicate_dict + }) word_dict_len = len(word_dict) label_dict_len = len(label_dict) @@ -77,24 +78,16 @@ mark_dim = 5 hidden_dim = 512 depth = 8 - - ########################### Optimizer ####################################### - settings( batch_size=150, learning_method=MomentumOptimizer(momentum=0), learning_rate=2e-2, regularization=L2Regularization(8e-4), is_async=False, - model_average=ModelAverage(average_window=0.5, - max_average_window=10000), - -) - - - + model_average=ModelAverage( + average_window=0.5, max_average_window=10000), ) ####################################### network ############################## #8 features and 1 target @@ -108,22 +101,28 @@ ctx_p1 = data_layer(name='ctx_p1_data', size=word_dict_len) ctx_p2 = data_layer(name='ctx_p2_data', size=word_dict_len) mark = data_layer(name='mark_data', size=mark_dict_len) - if not is_predict: target = data_layer(name='target', size=label_dict_len) - -default_std=1/math.sqrt(hidden_dim)/3.0 +default_std = 1 / math.sqrt(hidden_dim) / 3.0 emb_para = ParameterAttribute(name='emb', initial_std=0., learning_rate=0.) std_0 = ParameterAttribute(initial_std=0.) -std_default = ParameterAttribute(initial_std=default_std) - -predicate_embedding = embedding_layer(size=word_dim, input=predicate, param_attr=ParameterAttribute(name='vemb',initial_std=default_std)) -mark_embedding = embedding_layer(name='word_ctx-in_embedding', size=mark_dim, input=mark, param_attr=std_0) - -word_input=[word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] -emb_layers = [embedding_layer(size=word_dim, input=x, param_attr=emb_para) for x in word_input] +std_default = ParameterAttribute(initial_std=default_std) + +predicate_embedding = embedding_layer( + size=word_dim, + input=predicate, + param_attr=ParameterAttribute( + name='vemb', initial_std=default_std)) +mark_embedding = embedding_layer( + name='word_ctx-in_embedding', size=mark_dim, input=mark, param_attr=std_0) + +word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] +emb_layers = [ + embedding_layer( + size=word_dim, input=x, param_attr=emb_para) for x in word_input +] emb_layers.append(predicate_embedding) emb_layers.append(mark_embedding) @@ -131,84 +130,89 @@ hidden_0 = mixed_layer( name='hidden0', size=hidden_dim, bias_attr=std_default, - input=[ full_matrix_projection(input=emb, param_attr=std_default ) for emb in emb_layers ]) - + input=[ + full_matrix_projection( + input=emb, param_attr=std_default) for emb in emb_layers + ]) mix_hidden_lr = 1e-3 lstm_para_attr = ParameterAttribute(initial_std=0.0, learning_rate=1.0) -hidden_para_attr = ParameterAttribute(initial_std=default_std, learning_rate=mix_hidden_lr) - -lstm_0 = lstmemory(name='lstm0', - input=hidden_0, - act=ReluActivation(), - gate_act=SigmoidActivation(), - state_act=SigmoidActivation(), - bias_attr=std_0, - param_attr=lstm_para_attr) +hidden_para_attr = ParameterAttribute( + initial_std=default_std, learning_rate=mix_hidden_lr) + +lstm_0 = lstmemory( + name='lstm0', + input=hidden_0, + act=ReluActivation(), + gate_act=SigmoidActivation(), + state_act=SigmoidActivation(), + bias_attr=std_0, + param_attr=lstm_para_attr) #stack L-LSTM and R-LSTM with direct edges input_tmp = [hidden_0, lstm_0] - for i in range(1, depth): - mix_hidden = mixed_layer(name='hidden'+str(i), - size=hidden_dim, - bias_attr=std_default, - input=[full_matrix_projection(input=input_tmp[0], param_attr=hidden_para_attr), - full_matrix_projection(input=input_tmp[1], param_attr=lstm_para_attr) - ] - ) - - lstm = lstmemory(name='lstm'+str(i), - input=mix_hidden, - act=ReluActivation(), - gate_act=SigmoidActivation(), - state_act=SigmoidActivation(), - reverse=((i % 2)==1), - bias_attr=std_0, - param_attr=lstm_para_attr) + mix_hidden = mixed_layer( + name='hidden' + str(i), + size=hidden_dim, + bias_attr=std_default, + input=[ + full_matrix_projection( + input=input_tmp[0], param_attr=hidden_para_attr), + full_matrix_projection( + input=input_tmp[1], param_attr=lstm_para_attr) + ]) + + lstm = lstmemory( + name='lstm' + str(i), + input=mix_hidden, + act=ReluActivation(), + gate_act=SigmoidActivation(), + state_act=SigmoidActivation(), + reverse=((i % 2) == 1), + bias_attr=std_0, + param_attr=lstm_para_attr) input_tmp = [mix_hidden, lstm] -feature_out = mixed_layer(name='output', - size=label_dict_len, - bias_attr=std_default, - input=[full_matrix_projection(input=input_tmp[0], param_attr=hidden_para_attr), - full_matrix_projection(input=input_tmp[1], param_attr=lstm_para_attr) - ], - ) - - +feature_out = mixed_layer( + name='output', + size=label_dict_len, + bias_attr=std_default, + input=[ + full_matrix_projection( + input=input_tmp[0], param_attr=hidden_para_attr), + full_matrix_projection( + input=input_tmp[1], param_attr=lstm_para_attr) + ], ) if not is_predict: - crf_l = crf_layer( name = 'crf', - size = label_dict_len, - input = feature_out, - label = target, - param_attr=ParameterAttribute(name='crfw',initial_std=default_std, learning_rate=mix_hidden_lr) - - ) - - - crf_dec_l = crf_decoding_layer(name = 'crf_dec_l', - size = label_dict_len, - input = feature_out, - label = target, - param_attr=ParameterAttribute(name='crfw') - ) - + crf_l = crf_layer( + name='crf', + size=label_dict_len, + input=feature_out, + label=target, + param_attr=ParameterAttribute( + name='crfw', initial_std=default_std, learning_rate=mix_hidden_lr)) + + crf_dec_l = crf_decoding_layer( + name='crf_dec_l', + size=label_dict_len, + input=feature_out, + label=target, + param_attr=ParameterAttribute(name='crfw')) eval = sum_evaluator(input=crf_dec_l) - + outputs(crf_l) else: - crf_dec_l = crf_decoding_layer(name = 'crf_dec_l', - size = label_dict_len, - input = feature_out, - param_attr=ParameterAttribute(name='crfw') - ) + crf_dec_l = crf_decoding_layer( + name='crf_dec_l', + size=label_dict_len, + input=feature_out, + param_attr=ParameterAttribute(name='crfw')) outputs(crf_dec_l) - diff --git a/demo/semantic_role_labeling/predict.py b/demo/semantic_role_labeling/predict.py index 15145fafce..372fd090b6 100644 --- a/demo/semantic_role_labeling/predict.py +++ b/demo/semantic_role_labeling/predict.py @@ -26,7 +26,8 @@ UNK_IDX = 0 class Prediction(): - def __init__(self, train_conf, dict_file, model_dir, label_file, predicate_dict_file): + def __init__(self, train_conf, dict_file, model_dir, label_file, + predicate_dict_file): """ train_conf: trainer configure. dict_file: word dictionary file name. @@ -35,7 +36,7 @@ class Prediction(): self.dict = {} self.labels = {} - self.predicate_dict={} + self.predicate_dict = {} self.labels_reverse = {} self.load_dict_label(dict_file, label_file, predicate_dict_file) @@ -44,25 +45,18 @@ class Prediction(): len_pred = len(self.predicate_dict) conf = parse_config( - train_conf, - 'dict_len=' + str(len_dict) + - ',label_len=' + str(len_label) + - ',pred_len=' + str(len_pred) + - ',is_predict=True') + train_conf, 'dict_len=' + str(len_dict) + ',label_len=' + + str(len_label) + ',pred_len=' + str(len_pred) + ',is_predict=True') self.network = swig_paddle.GradientMachine.createFromConfigProto( conf.model_config) self.network.loadParameters(model_dir) slots = [ - integer_value_sequence(len_dict), - integer_value_sequence(len_dict), - integer_value_sequence(len_dict), - integer_value_sequence(len_dict), - integer_value_sequence(len_dict), - integer_value_sequence(len_dict), - integer_value_sequence(len_pred), - integer_value_sequence(2) - ] + integer_value_sequence(len_dict), integer_value_sequence(len_dict), + integer_value_sequence(len_dict), integer_value_sequence(len_dict), + integer_value_sequence(len_dict), integer_value_sequence(len_dict), + integer_value_sequence(len_pred), integer_value_sequence(2) + ] self.converter = DataProviderConverter(slots) def load_dict_label(self, dict_file, label_file, predicate_dict_file): @@ -78,6 +72,7 @@ class Prediction(): for line_count, line in enumerate(open(predicate_dict_file, 'r')): self.predicate_dict[line.strip()] = line_count + def get_data(self, data_file): """ Get input data of paddle format. @@ -88,9 +83,10 @@ class Prediction(): ).split('\t') words = sentence.split() sen_len = len(words) - + word_slot = [self.dict.get(w, UNK_IDX) for w in words] - predicate_slot = [self.predicate_dict.get(predicate, UNK_IDX)] * sen_len + predicate_slot = [self.predicate_dict.get(predicate, UNK_IDX) + ] * sen_len ctx_n2_slot = [self.dict.get(ctx_n2, UNK_IDX)] * sen_len ctx_n1_slot = [self.dict.get(ctx_n1, UNK_IDX)] * sen_len ctx_0_slot = [self.dict.get(ctx_0, UNK_IDX)] * sen_len @@ -99,7 +95,7 @@ class Prediction(): marks = mark.split() mark_slot = [int(w) for w in marks] - + yield word_slot, ctx_n2_slot, ctx_n1_slot, \ ctx_0_slot, ctx_p1_slot, ctx_p2_slot, predicate_slot, mark_slot @@ -123,8 +119,9 @@ class Prediction(): def option_parser(): - usage = ("python predict.py -c config -w model_dir " - "-d word dictionary -l label_file -i input_file -p pred_dict_file") + usage = ( + "python predict.py -c config -w model_dir " + "-d word dictionary -l label_file -i input_file -p pred_dict_file") parser = OptionParser(usage="usage: %s [options]" % usage) parser.add_option( "-c", @@ -187,8 +184,9 @@ def main(): output_file = options.output_file swig_paddle.initPaddle("--use_gpu=0") - predict = Prediction(train_conf, dict_file, model_path, label_file, predict_dict_file) - predict.predict(data_file,output_file) + predict = Prediction(train_conf, dict_file, model_path, label_file, + predict_dict_file) + predict.predict(data_file, output_file) if __name__ == '__main__': diff --git a/demo/sentiment/predict.py b/demo/sentiment/predict.py index 0095c6f727..8ec490f646 100755 --- a/demo/sentiment/predict.py +++ b/demo/sentiment/predict.py @@ -71,9 +71,7 @@ class SentimentPrediction(): transform word into integer index according to the dictionary. """ words = data.strip().split() - word_slot = [ - self.word_dict[w] for w in words if w in self.word_dict - ] + word_slot = [self.word_dict[w] for w in words if w in self.word_dict] return word_slot def batch_predict(self, data_batch): @@ -85,8 +83,8 @@ class SentimentPrediction(): if self.label is None: print("predicting label is %d" % (lab[0])) else: - print("predicting label is %s" % - (self.label[lab[0]])) + print("predicting label is %s" % (self.label[lab[0]])) + def option_parser(): usage = "python predict.py -n config -w model_dir -d dictionary -i input_file " @@ -143,9 +141,10 @@ def main(): batch.append([predict.get_index(line)]) if len(batch) == batch_size: predict.batch_predict(batch) - batch=[] + batch = [] if len(batch) > 0: predict.batch_predict(batch) + if __name__ == '__main__': main() diff --git a/doc_cn/cluster/k8s/distributed_training_on_kubernetes.md b/doc_cn/cluster/k8s/distributed_training_on_kubernetes.md index d9ed431ec0..64f8fd4b43 100644 --- a/doc_cn/cluster/k8s/distributed_training_on_kubernetes.md +++ b/doc_cn/cluster/k8s/distributed_training_on_kubernetes.md @@ -306,4 +306,4 @@ I1116 09:10:18.019069 50 ParameterClient2.cpp:122] pserver 2 192.168.223.143: I1116 09:10:18.019492 50 ParameterClient2.cpp:122] pserver 3 192.168.223.143:7165 I1116 09:10:18.019716 50 ParameterClient2.cpp:122] pserver 4 192.168.129.71:7164 I1116 09:10:18.019836 50 ParameterClient2.cpp:122] pserver 5 192.168.129.71:7165 -``` \ No newline at end of file +``` diff --git a/doc_cn/cluster/k8s/job.yaml b/doc_cn/cluster/k8s/job.yaml index 1e0ac464b2..488aad0bed 100644 --- a/doc_cn/cluster/k8s/job.yaml +++ b/doc_cn/cluster/k8s/job.yaml @@ -40,4 +40,4 @@ spec: - name: jobpath mountPath: /home/jobpath restartPolicy: Never - \ No newline at end of file + diff --git a/doc_cn/cluster/k8s/start_paddle.py b/doc_cn/cluster/k8s/start_paddle.py index 6a46161410..df00d82919 100755 --- a/doc_cn/cluster/k8s/start_paddle.py +++ b/doc_cn/cluster/k8s/start_paddle.py @@ -19,7 +19,6 @@ import socket import os import argparse - # configuration for cluster API = "/api/v1/namespaces/" JOBSELECTOR = "labelSelector=job-name=" @@ -145,8 +144,8 @@ def startPaddle(idMap={}, train_args_dict=None): if __name__ == '__main__': - parser = argparse.ArgumentParser(prog="start_paddle.py", - description='simple tool for k8s') + parser = argparse.ArgumentParser( + prog="start_paddle.py", description='simple tool for k8s') args, train_args_list = parser.parse_known_args() train_args = refine_unknown_args(train_args_list) train_args_dict = dict(zip(train_args[:-1:2], train_args[1::2])) diff --git a/doc_cn/demo/sentiment_analysis/index.rst b/doc_cn/demo/sentiment_analysis/index.rst index 82400b2459..9d7972b219 100644 --- a/doc_cn/demo/sentiment_analysis/index.rst +++ b/doc_cn/demo/sentiment_analysis/index.rst @@ -1,8 +1,8 @@ -情感分析教程 -=========================== - -.. toctree:: - :maxdepth: 3 - :glob: - +情感分析教程 +=========================== + +.. toctree:: + :maxdepth: 3 + :glob: + Training Locally \ No newline at end of file diff --git a/doc_theme/static/js/paddle_doc_init.js b/doc_theme/static/js/paddle_doc_init.js index 5c815a8d3a..153ce30745 100644 --- a/doc_theme/static/js/paddle_doc_init.js +++ b/doc_theme/static/js/paddle_doc_init.js @@ -28,4 +28,4 @@ $(document).ready(function(){ $('.doc-menu-vertical').find('li.current').last().addClass('active'); $('.doc-menu-vertical').perfectScrollbar(); -}); \ No newline at end of file +}); diff --git a/paddle/api/GradientMachine.cpp b/paddle/api/GradientMachine.cpp index c1b546dbcb..297eaa19bb 100644 --- a/paddle/api/GradientMachine.cpp +++ b/paddle/api/GradientMachine.cpp @@ -15,8 +15,8 @@ limitations under the License. */ #include "PaddleAPI.h" #include "PaddleAPIPrivate.h" -#include "paddle/gserver/gradientmachines/NeuralNetwork.h" #include "Internal.h" +#include "paddle/gserver/gradientmachines/NeuralNetwork.h" std::vector GradientMachine::defaultParamTypes = { PARAMETER_VALUE, PARAMETER_GRADIENT, PARAMETER_MOMENTUM}; diff --git a/paddle/api/Internal.h b/paddle/api/Internal.h index 4a07880d80..d48dd3a04c 100644 --- a/paddle/api/Internal.h +++ b/paddle/api/Internal.h @@ -16,14 +16,13 @@ limitations under the License. */ #include "PaddleAPI.h" -#include #include +#include template void staticCastVector(std::vector* dest, const std::vector& src) { dest->resize(src.size()); - std::transform(src.begin(), - src.end(), - dest->begin(), - [](T1 t) { return static_cast(t); }); + std::transform(src.begin(), src.end(), dest->begin(), [](T1 t) { + return static_cast(t); + }); } diff --git a/paddle/api/Matrix.cpp b/paddle/api/Matrix.cpp index d4c00e7093..7c375e5cfb 100644 --- a/paddle/api/Matrix.cpp +++ b/paddle/api/Matrix.cpp @@ -12,12 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "PaddleAPI.h" #include "paddle/math/Matrix.h" -#include "paddle/math/SparseMatrix.h" -#include "paddle/math/CpuSparseMatrix.h" -#include #include +#include +#include "PaddleAPI.h" +#include "paddle/math/CpuSparseMatrix.h" +#include "paddle/math/SparseMatrix.h" struct MatrixPrivate { std::shared_ptr mat; diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index f3c80e3b06..84a66719c3 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -16,8 +16,8 @@ limitations under the License. */ #include #include -#include #include +#include #include #include "paddle/utils/GlobalConstants.h" #include "paddle/utils/TypeDefs.h" diff --git a/paddle/api/Parameter.cpp b/paddle/api/Parameter.cpp index 742ad0679c..4eed00a84a 100644 --- a/paddle/api/Parameter.cpp +++ b/paddle/api/Parameter.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "PaddleAPI.h" #include "paddle/parameter/Parameter.h" +#include "PaddleAPI.h" struct ParameterPrivate { std::shared_ptr sharedPtr; diff --git a/paddle/api/ParameterOptimizer.cpp b/paddle/api/ParameterOptimizer.cpp index 606dccd5ac..21b851dd5e 100644 --- a/paddle/api/ParameterOptimizer.cpp +++ b/paddle/api/ParameterOptimizer.cpp @@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "PaddleAPI.h" -#include "PaddleAPIPrivate.h" #include "paddle/parameter/ParameterOptimizer.h" -#include "Internal.h" #include +#include "Internal.h" +#include "PaddleAPI.h" +#include "PaddleAPIPrivate.h" struct ParameterOptimizerPrivate { std::unique_ptr optimizer; @@ -36,16 +36,13 @@ struct ParameterTraverseCallbackPrivate { size_t sparseId) { std::vector real_vecs; real_vecs.resize(vecs.size()); - std::transform(vecs.begin(), - vecs.end(), - real_vecs.begin(), - [](Vector* v) { - if (v) { - return *(paddle::VectorPtr*)(v->getSharedPtr()); - } else { - return paddle::VectorPtr(); - } - }); + std::transform(vecs.begin(), vecs.end(), real_vecs.begin(), [](Vector* v) { + if (v) { + return *(paddle::VectorPtr*)(v->getSharedPtr()); + } else { + return paddle::VectorPtr(); + } + }); paddle::ParameterConfig& real_conf = *(paddle::ParameterConfig*)(const_cast(conf) diff --git a/paddle/api/SequenceGenerator.cpp b/paddle/api/SequenceGenerator.cpp index 5c65b34f23..8428edc60d 100644 --- a/paddle/api/SequenceGenerator.cpp +++ b/paddle/api/SequenceGenerator.cpp @@ -12,14 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include +#include +#include +#include #include "PaddleAPI.h" #include "paddle/gserver/gradientmachines/GradientMachine.h" #include "paddle/parameter/Argument.h" #include "paddle/utils/Flags.h" -#include -#include -#include -#include // used to represent partial sequence struct Path { diff --git a/paddle/api/Trainer.cpp b/paddle/api/Trainer.cpp index 9aeb874bdc..59b47d4b1c 100644 --- a/paddle/api/Trainer.cpp +++ b/paddle/api/Trainer.cpp @@ -16,12 +16,12 @@ limitations under the License. */ #include "PaddleAPIPrivate.h" #include -#include #include +#include +#include "paddle/gserver/gradientmachines/NeuralNetwork.h" #include "paddle/trainer/ParamUtil.h" #include "paddle/trainer/Trainer.h" -#include "paddle/gserver/gradientmachines/NeuralNetwork.h" #include "paddle/trainer/TrainerInternal.h" #include "paddle/utils/Flags.h" diff --git a/paddle/api/Util.cpp b/paddle/api/Util.cpp index 0c9c048099..c3f739568f 100644 --- a/paddle/api/Util.cpp +++ b/paddle/api/Util.cpp @@ -14,16 +14,16 @@ limitations under the License. */ #include "PaddleAPI.h" -#include "paddle/utils/Util.h" -#include "paddle/utils/PythonUtil.h" -#include "paddle/utils/Flags.h" -#include "paddle/utils/Excepts.h" #include "paddle/parameter/Parameter.h" +#include "paddle/utils/Excepts.h" +#include "paddle/utils/Flags.h" +#include "paddle/utils/PythonUtil.h" +#include "paddle/utils/Util.h" #include +#include #include #include -#include void initPaddle(int argc, char** argv) { paddle::initMain(argc, argv); diff --git a/paddle/api/Vector.cpp b/paddle/api/Vector.cpp index 4f3ab7de60..874f2fd044 100644 --- a/paddle/api/Vector.cpp +++ b/paddle/api/Vector.cpp @@ -282,7 +282,7 @@ FloatArray Vector::getData() const { } void Vector::copyFrom(Vector* src) throw(RangeError) { - if (src->m->vec->getSize() != m->vec->getSize()) { + if (src->m->vec->getSize() != m->vec->getSize()) { throw RangeError(); } m->vec->copyFrom(*src->m->vec); diff --git a/paddle/api/test/testMatrix.py b/paddle/api/test/testMatrix.py index f76f84d2e1..37666bdccc 100644 --- a/paddle/api/test/testMatrix.py +++ b/paddle/api/test/testMatrix.py @@ -100,11 +100,12 @@ class TestMatrix(unittest.TestCase): for a, e in zip(gpu_m.getData(), [1.0, 3.23, 3.0, 4.0, 5.0, 6.0]): self.assertAlmostEqual(a, e) - + def test_numpy(self): numpy_mat = np.matrix([[1, 2], [3, 4], [5, 6]], dtype="float32") m = swig_paddle.Matrix.createDenseFromNumpy(numpy_mat) - self.assertEqual((int(m.getHeight()), int(m.getWidth())), numpy_mat.shape) + self.assertEqual((int(m.getHeight()), int(m.getWidth())), + numpy_mat.shape) self.assertEqual(m.isGpu(), swig_paddle.isUsingGpu()) for a, e in zip(m.getData(), [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]): self.assertAlmostEqual(a, e) diff --git a/paddle/api/test/testVector.py b/paddle/api/test/testVector.py index 525ed97edd..1ab095c1d3 100644 --- a/paddle/api/test/testVector.py +++ b/paddle/api/test/testVector.py @@ -26,17 +26,17 @@ class TestIVector(unittest.TestCase): self.assertEqual(m[i], 0) m[i] = i self.assertEqual(m[i], i) - + m = swig_paddle.IVector.createZero(10) self.assertEqual(m.isGpu(), swig_paddle.isUsingGpu()) - self.assertEqual(m.getData(), [0]*10) + self.assertEqual(m.getData(), [0] * 10) def test_create(self): m = swig_paddle.IVector.create(range(10), False) self.assertIsNotNone(m) for i in xrange(10): self.assertEqual(m[i], i) - + m = swig_paddle.IVector.create(range(10)) self.assertEqual(m.isGpu(), swig_paddle.isUsingGpu()) self.assertEqual(m.getData(), range(10)) @@ -69,7 +69,7 @@ class TestIVector(unittest.TestCase): expect_vec = range(0, 10) expect_vec[4] = 7 self.assertEqual(vec.getData(), expect_vec) - + def test_numpy(self): vec = np.array([1, 3, 4, 65, 78, 1, 4], dtype="int32") iv = swig_paddle.IVector.createVectorFromNumpy(vec) @@ -85,10 +85,10 @@ class TestVector(unittest.TestCase): self.assertTrue(util.doubleEqual(v[i], 0)) v[i] = i self.assertTrue(util.doubleEqual(v[i], i)) - + v = swig_paddle.Vector.createZero(10) self.assertEqual(v.isGpu(), swig_paddle.isUsingGpu()) - self.assertEqual(v.getData(), [0]*10) + self.assertEqual(v.getData(), [0] * 10) def testCreate(self): v = swig_paddle.Vector.create([x / 100.0 for x in xrange(100)], False) @@ -96,14 +96,13 @@ class TestVector(unittest.TestCase): for i in xrange(len(v)): self.assertTrue(util.doubleEqual(v[i], i / 100.0)) self.assertEqual(100, len(v)) - + v = swig_paddle.Vector.create([x / 100.0 for x in xrange(100)]) self.assertEqual(v.isGpu(), swig_paddle.isUsingGpu()) self.assertEqual(100, len(v)) vdata = v.getData() for i in xrange(len(v)): self.assertTrue(util.doubleEqual(vdata[i], i / 100.0)) - def testCpuNumpy(self): numpy_arr = np.array([1.2, 2.3, 3.4, 4.5], dtype="float32") @@ -128,7 +127,7 @@ class TestVector(unittest.TestCase): for i in xrange(1, len(numpy_3)): util.doubleEqual(numpy_3[i], vec[i]) - + def testNumpy(self): numpy_arr = np.array([1.2, 2.3, 3.4, 4.5], dtype="float32") vec = swig_paddle.Vector.createVectorFromNumpy(numpy_arr) @@ -136,7 +135,6 @@ class TestVector(unittest.TestCase): vecData = vec.getData() for n, v in zip(numpy_arr, vecData): self.assertTrue(util.doubleEqual(n, v)) - def testCopyFromNumpy(self): vec = swig_paddle.Vector.createZero(1, False) diff --git a/paddle/cuda/include/hl_base.h b/paddle/cuda/include/hl_base.h index 0b9dfc6117..84c5f2d5c9 100644 --- a/paddle/cuda/include/hl_base.h +++ b/paddle/cuda/include/hl_base.h @@ -223,9 +223,9 @@ typedef struct { #ifdef __NVCC__ -#include "paddle/utils/Logging.h" -#include "hl_cuda.h" #include "cuda_runtime.h" +#include "hl_cuda.h" +#include "paddle/utils/Logging.h" extern __thread bool g_sync_flag; extern __thread cudaStream_t default_stream; diff --git a/paddle/cuda/include/hl_dso_loader.h b/paddle/cuda/include/hl_dso_loader.h index 9ddf0e61ee..20c13f21e6 100644 --- a/paddle/cuda/include/hl_dso_loader.h +++ b/paddle/cuda/include/hl_dso_loader.h @@ -16,8 +16,8 @@ limitations under the License. */ #define HL_DSO_LOADER_H_ #include -#include #include +#include #include "hl_base.h" /** diff --git a/paddle/cuda/include/hl_gpu.h b/paddle/cuda/include/hl_gpu.h index aad0450c8c..ede2670882 100644 --- a/paddle/cuda/include/hl_gpu.h +++ b/paddle/cuda/include/hl_gpu.h @@ -15,28 +15,28 @@ limitations under the License. */ #ifndef HL_GPU_H_ #define HL_GPU_H_ +#include "hl_aggregate.h" #include "hl_base.h" +#include "hl_cnn.h" #include "hl_cuda.h" #include "hl_cuda_cublas.h" #include "hl_cuda_cudnn.h" -#include "hl_matrix.h" -#include "hl_aggregate.h" -#include "hl_cnn.h" -#include "hl_sparse.h" #include "hl_lstm.h" +#include "hl_matrix.h" #include "hl_sequence.h" +#include "hl_sparse.h" #include "hl_warpctc_wrap.h" #ifdef HPPL_STUB_FUNC -#include "stub/hl_cuda_stub.h" -#include "stub/hl_cuda_cublas_stub.h" -#include "stub/hl_cuda_cudnn_stub.h" -#include "stub/hl_matrix_stub.h" #include "stub/hl_aggregate_stub.h" #include "stub/hl_cnn_stub.h" -#include "stub/hl_sparse_stub.h" +#include "stub/hl_cuda_cublas_stub.h" +#include "stub/hl_cuda_cudnn_stub.h" +#include "stub/hl_cuda_stub.h" #include "stub/hl_lstm_stub.h" +#include "stub/hl_matrix_stub.h" #include "stub/hl_sequence_stub.h" +#include "stub/hl_sparse_stub.h" #endif #endif /* HL_GPU_H_ */ diff --git a/paddle/cuda/include/hl_time.h b/paddle/cuda/include/hl_time.h index f214b055f9..f63f025820 100644 --- a/paddle/cuda/include/hl_time.h +++ b/paddle/cuda/include/hl_time.h @@ -14,7 +14,7 @@ limitations under the License. */ #ifndef HL_TIME_H_ #define HL_TIME_H_ - +#include /** * @brief High resolution timer. * diff --git a/paddle/cuda/src/hl_cuda_cublas.cc b/paddle/cuda/src/hl_cuda_cublas.cc index 7cede8c63c..182e8ab218 100644 --- a/paddle/cuda/src/hl_cuda_cublas.cc +++ b/paddle/cuda/src/hl_cuda_cublas.cc @@ -12,12 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "hl_cuda_cublas.h" #include #include #include "hl_cuda.h" -#include "hl_cuda_cublas.h" -#include "hl_thread.ph" #include "hl_dso_loader.h" +#include "hl_thread.ph" #include "paddle/utils/Logging.h" namespace dynload { diff --git a/paddle/cuda/src/hl_cuda_cudnn.cc b/paddle/cuda/src/hl_cuda_cudnn.cc index 9c9b8906c2..7111224d59 100644 --- a/paddle/cuda/src/hl_cuda_cudnn.cc +++ b/paddle/cuda/src/hl_cuda_cudnn.cc @@ -12,14 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "hl_cuda_cudnn.h" #include #include -#include "hl_cuda_cudnn.h" #include "hl_cuda_cudnn.ph" -#include "hl_thread.ph" #include "hl_dso_loader.h" -#include "paddle/utils/Logging.h" +#include "hl_thread.ph" #include "paddle/utils/CommandLineParser.h" +#include "paddle/utils/Logging.h" P_DEFINE_int32(cudnn_conv_workspace_limit_in_mb, 4096, diff --git a/paddle/cuda/src/hl_cuda_device.cc b/paddle/cuda/src/hl_cuda_device.cc index d181448292..b0bba73594 100644 --- a/paddle/cuda/src/hl_cuda_device.cc +++ b/paddle/cuda/src/hl_cuda_device.cc @@ -12,13 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "hl_cuda.h" #include #include #include #include #include #include -#include "hl_cuda.h" #include "hl_cuda.ph" #include "hl_dso_loader.h" #include "hl_thread.ph" diff --git a/paddle/cuda/src/hl_cudart_wrap.cc b/paddle/cuda/src/hl_cudart_wrap.cc index a3ac750b53..ecc03a729d 100644 --- a/paddle/cuda/src/hl_cudart_wrap.cc +++ b/paddle/cuda/src/hl_cudart_wrap.cc @@ -14,8 +14,8 @@ limitations under the License. */ #ifdef PADDLE_USE_DSO -#include #include +#include #include "hl_dso_loader.h" /** diff --git a/paddle/cuda/src/hl_time.cc b/paddle/cuda/src/hl_time.cc index 3005065899..7e5d7e8aae 100644 --- a/paddle/cuda/src/hl_time.cc +++ b/paddle/cuda/src/hl_time.cc @@ -12,10 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#include "hl_time.h" #include +#include +#include #include -#include "hl_time.h" using std::chrono::high_resolution_clock; diff --git a/paddle/cuda/src/hl_warpctc_wrap.cc b/paddle/cuda/src/hl_warpctc_wrap.cc index 619b90120f..9ae8bc0f22 100644 --- a/paddle/cuda/src/hl_warpctc_wrap.cc +++ b/paddle/cuda/src/hl_warpctc_wrap.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "hl_warpctc_wrap.h" +#include #include "hl_dso_loader.h" #include "paddle/utils/Logging.h" diff --git a/paddle/gserver/activations/ActivationFunction.cpp b/paddle/gserver/activations/ActivationFunction.cpp index f1d09c568d..f8c4bcac2f 100644 --- a/paddle/gserver/activations/ActivationFunction.cpp +++ b/paddle/gserver/activations/ActivationFunction.cpp @@ -15,13 +15,13 @@ limitations under the License. */ #include "ActivationFunction.h" #include -#include #include -#include +#include #include #include -#include "paddle/utils/ClassRegistrar.h" +#include #include "paddle/parameter/Argument.h" +#include "paddle/utils/ClassRegistrar.h" #include "paddle/utils/Logging.h" diff --git a/paddle/gserver/dataproviders/DataProvider.cpp b/paddle/gserver/dataproviders/DataProvider.cpp index 55ca62543a..0478256f9c 100644 --- a/paddle/gserver/dataproviders/DataProvider.cpp +++ b/paddle/gserver/dataproviders/DataProvider.cpp @@ -14,12 +14,12 @@ limitations under the License. */ #include "DataProvider.h" -#include "paddle/utils/Util.h" -#include "paddle/utils/StringUtil.h" -#include "paddle/utils/Logging.h" -#include #include +#include #include "ProtoDataProvider.h" +#include "paddle/utils/Logging.h" +#include "paddle/utils/StringUtil.h" +#include "paddle/utils/Util.h" namespace paddle { diff --git a/paddle/gserver/dataproviders/DataProvider.h b/paddle/gserver/dataproviders/DataProvider.h index 5b854936c6..9b7f7e36ce 100644 --- a/paddle/gserver/dataproviders/DataProvider.h +++ b/paddle/gserver/dataproviders/DataProvider.h @@ -14,28 +14,28 @@ limitations under the License. */ #pragma once -#include -#include -#include -#include -#include #include -#include -#include #include +#include +#include +#include +#include +#include +#include +#include +#include "DataConfig.pb.h" +#include "paddle/math/Matrix.h" +#include "paddle/math/SparseMatrix.h" +#include "paddle/math/Vector.h" +#include "paddle/parameter/Argument.h" +#include "paddle/utils/ClassRegistrar.h" +#include "paddle/utils/Locks.h" #include "paddle/utils/Logging.h" #include "paddle/utils/Queue.h" -#include "paddle/utils/Locks.h" #include "paddle/utils/ThreadLocal.h" #include "paddle/utils/TypeDefs.h" -#include "paddle/math/Matrix.h" -#include "paddle/math/SparseMatrix.h" #include "paddle/utils/Util.h" -#include "paddle/math/Vector.h" -#include "DataConfig.pb.h" -#include "paddle/utils/ClassRegistrar.h" -#include "paddle/parameter/Argument.h" namespace paddle { /** diff --git a/paddle/gserver/dataproviders/MultiDataProvider.cpp b/paddle/gserver/dataproviders/MultiDataProvider.cpp index e1fc4c9365..46fe053768 100644 --- a/paddle/gserver/dataproviders/MultiDataProvider.cpp +++ b/paddle/gserver/dataproviders/MultiDataProvider.cpp @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Util.h" #include "MultiDataProvider.h" -#include "paddle/utils/Logging.h" #include +#include "paddle/utils/Logging.h" +#include "paddle/utils/Util.h" namespace paddle { diff --git a/paddle/gserver/dataproviders/ProtoDataProvider.cpp b/paddle/gserver/dataproviders/ProtoDataProvider.cpp index 6a0cb5ef63..d16ecca2d9 100644 --- a/paddle/gserver/dataproviders/ProtoDataProvider.cpp +++ b/paddle/gserver/dataproviders/ProtoDataProvider.cpp @@ -13,14 +13,14 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "ProtoDataProvider.h" -#include "paddle/utils/Util.h" -#include "paddle/utils/StringUtil.h" #include #include #include +#include "paddle/utils/StringUtil.h" +#include "paddle/utils/Util.h" -#include "paddle/utils/Logging.h" #include "DataProviderGroup.h" +#include "paddle/utils/Logging.h" P_DEFINE_double(memory_threshold_on_load_data, 1.0, @@ -562,16 +562,16 @@ int64_t ProtoDataProvider::getNextBatchInternal(int64_t size, auto mat = cpuArguments[slot].value; mat->resize(size, dim); if (std::dynamic_pointer_cast(mat)) { - std::dynamic_pointer_cast(mat) - ->copyFrom(dataPos.data(), - slots_[slot].indices.data(), - slots_[slot].sparseNonValueData.data(), - HPPL_STREAM_1); + std::dynamic_pointer_cast(mat)->copyFrom( + dataPos.data(), + slots_[slot].indices.data(), + slots_[slot].sparseNonValueData.data(), + HPPL_STREAM_1); } else if (std::dynamic_pointer_cast(mat)) { - std::dynamic_pointer_cast(mat) - ->copyFrom(dataPos.data(), - slots_[slot].indices.data(), - slots_[slot].sparseNonValueData.data()); + std::dynamic_pointer_cast(mat)->copyFrom( + dataPos.data(), + slots_[slot].indices.data(), + slots_[slot].sparseNonValueData.data()); } else { LOG(FATAL) << "Not Supported"; } @@ -598,16 +598,16 @@ int64_t ProtoDataProvider::getNextBatchInternal(int64_t size, auto mat = cpuArguments[slot].value; mat->resize(size, dim); if (std::dynamic_pointer_cast(mat)) { - std::dynamic_pointer_cast(mat) - ->copyFrom(dataPos.data(), - slots_[slot].indices.data(), - slots_[slot].sparseFloatValueData.data(), - HPPL_STREAM_1); + std::dynamic_pointer_cast(mat)->copyFrom( + dataPos.data(), + slots_[slot].indices.data(), + slots_[slot].sparseFloatValueData.data(), + HPPL_STREAM_1); } else if (std::dynamic_pointer_cast(mat)) { - std::dynamic_pointer_cast(mat) - ->copyFrom(dataPos.data(), - slots_[slot].indices.data(), - slots_[slot].sparseFloatValueData.data()); + std::dynamic_pointer_cast(mat)->copyFrom( + dataPos.data(), + slots_[slot].indices.data(), + slots_[slot].sparseFloatValueData.data()); } else { LOG(FATAL) << "Not Supported"; } diff --git a/paddle/gserver/dataproviders/ProtoDataProvider.h b/paddle/gserver/dataproviders/ProtoDataProvider.h index 9ec5cb97c0..7dd45e0622 100644 --- a/paddle/gserver/dataproviders/ProtoDataProvider.h +++ b/paddle/gserver/dataproviders/ProtoDataProvider.h @@ -16,8 +16,8 @@ limitations under the License. */ #include -#include "paddle/utils/Stat.h" #include "DataFormat.pb.h" +#include "paddle/utils/Stat.h" #include "DataProvider.h" #include "ProtoReader.h" diff --git a/paddle/gserver/dataproviders/ProtoReader.h b/paddle/gserver/dataproviders/ProtoReader.h index 6708e7cde7..4e6f58a529 100644 --- a/paddle/gserver/dataproviders/ProtoReader.h +++ b/paddle/gserver/dataproviders/ProtoReader.h @@ -16,10 +16,10 @@ limitations under the License. */ #include -#include #include -#include #include +#include +#include namespace paddle { diff --git a/paddle/gserver/dataproviders/PyDataProvider.cpp b/paddle/gserver/dataproviders/PyDataProvider.cpp index f5dcbfcf34..5bdd55309c 100644 --- a/paddle/gserver/dataproviders/PyDataProvider.cpp +++ b/paddle/gserver/dataproviders/PyDataProvider.cpp @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "PyDataProvider.h" -#include "paddle/utils/PythonUtil.h" #include -#include "paddle/utils/Util.h" #include "paddle/utils/Excepts.h" +#include "paddle/utils/PythonUtil.h" +#include "paddle/utils/Util.h" namespace paddle { @@ -316,16 +316,16 @@ void PyDataProvider::handleSparseNonValueSlot( auto mat = cpuArguments[slotIndex].value; mat->resize(slot.sampleNum, dim, slot.sampleNum, NO_VALUE, SPARSE_CSR); if (std::dynamic_pointer_cast(mat)) { - std::dynamic_pointer_cast(mat) - ->copyFrom(slot.sampleSequenceIdVec.data(), - slot.indices.data(), - slot.sparseNonValueData.data(), - HPPL_STREAM_1); + std::dynamic_pointer_cast(mat)->copyFrom( + slot.sampleSequenceIdVec.data(), + slot.indices.data(), + slot.sparseNonValueData.data(), + HPPL_STREAM_1); } else if (std::dynamic_pointer_cast(mat)) { - std::dynamic_pointer_cast(mat) - ->copyFrom(slot.sampleSequenceIdVec.data(), - slot.indices.data(), - slot.sparseNonValueData.data()); + std::dynamic_pointer_cast(mat)->copyFrom( + slot.sampleSequenceIdVec.data(), + slot.indices.data(), + slot.sparseNonValueData.data()); } else { LOG(FATAL) << "Not Supported"; } @@ -347,16 +347,16 @@ void PyDataProvider::handleSparseValueSlot( auto mat = cpuArguments[slotIndex].value; mat->resize(slot.sampleNum, dim, slot.sampleNum, FLOAT_VALUE, SPARSE_CSR); if (std::dynamic_pointer_cast(mat)) { - std::dynamic_pointer_cast(mat) - ->copyFrom(slot.sampleSequenceIdVec.data(), - slot.indices.data(), - slot.sparseFloatValueData.data(), - HPPL_STREAM_DEFAULT); + std::dynamic_pointer_cast(mat)->copyFrom( + slot.sampleSequenceIdVec.data(), + slot.indices.data(), + slot.sparseFloatValueData.data(), + HPPL_STREAM_DEFAULT); } else if (std::dynamic_pointer_cast(mat)) { - std::dynamic_pointer_cast(mat) - ->copyFrom(slot.sampleSequenceIdVec.data(), - slot.indices.data(), - slot.sparseFloatValueData.data()); + std::dynamic_pointer_cast(mat)->copyFrom( + slot.sampleSequenceIdVec.data(), + slot.indices.data(), + slot.sparseFloatValueData.data()); } else { LOG(FATAL) << "Not Supported"; } diff --git a/paddle/gserver/dataproviders/PyDataProvider2.cpp b/paddle/gserver/dataproviders/PyDataProvider2.cpp index 8b04a03f6d..460efc5adc 100644 --- a/paddle/gserver/dataproviders/PyDataProvider2.cpp +++ b/paddle/gserver/dataproviders/PyDataProvider2.cpp @@ -15,18 +15,18 @@ limitations under the License. */ #ifndef PADDLE_NO_PYTHON #include +#include #include #include -#include #include -#include +#include #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #include #include "DataProvider.h" -#include "paddle/utils/PythonUtil.h" #include "paddle/utils/Locks.h" +#include "paddle/utils/PythonUtil.h" #include "paddle/utils/Stat.h" namespace paddle { @@ -400,10 +400,9 @@ private: if (this->loadThread_) { // wait poolActualSize < poolSize; std::unique_lock l(mtx_); - pushCV_.wait(l, - [this, additionalBatchSize] { - return this->poolActualSize_ < poolSize_; - }); + pushCV_.wait(l, [this, additionalBatchSize] { + return this->poolActualSize_ < poolSize_; + }); } { @@ -529,12 +528,10 @@ public: // but, loading from cache, cache object should ensure // data pool ready. std::unique_lock l(mtx_); - pullCV_.wait(l, - [this, &size] { - return this->poolActualSize_ >= - std::max(size, this->minPoolSize_) || - callingContexts_.empty(); - }); + pullCV_.wait(l, [this, &size] { + return this->poolActualSize_ >= std::max(size, this->minPoolSize_) || + callingContexts_.empty(); + }); if (unittest::OnPoolFilled) { (*unittest::OnPoolFilled)(this->poolActualSize_); diff --git a/paddle/gserver/evaluators/Evaluator.cpp b/paddle/gserver/evaluators/Evaluator.cpp index aa6dc7cb86..7556d21e01 100644 --- a/paddle/gserver/evaluators/Evaluator.cpp +++ b/paddle/gserver/evaluators/Evaluator.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Stat.h" #include "paddle/gserver/evaluators/Evaluator.h" +#include "paddle/utils/Stat.h" #include "paddle/gserver/gradientmachines/NeuralNetwork.h" @@ -842,9 +842,9 @@ void PnpairEvaluator::calc(std::vector& predictArray) { auto start = predictArray.begin(); while (start != predictArray.end()) { auto end = std::find_if( - start + 1, - predictArray.end(), - [=](const PredictionResult& x) { return x.queryid != start->queryid; }); + start + 1, predictArray.end(), [=](const PredictionResult& x) { + return x.queryid != start->queryid; + }); CHECK(end != start); stat(start - predictArray.begin(), end - predictArray.begin(), diff --git a/paddle/gserver/evaluators/Evaluator.h b/paddle/gserver/evaluators/Evaluator.h index a26c650c38..5770847309 100644 --- a/paddle/gserver/evaluators/Evaluator.h +++ b/paddle/gserver/evaluators/Evaluator.h @@ -14,11 +14,11 @@ limitations under the License. */ #pragma once -#include "paddle/pserver/ParameterClient2.h" -#include "paddle/utils/ClassRegistrar.h" +#include #include "ModelConfig.pb.h" #include "paddle/parameter/Argument.h" -#include +#include "paddle/pserver/ParameterClient2.h" +#include "paddle/utils/ClassRegistrar.h" namespace paddle { diff --git a/paddle/gserver/gradientmachines/GradientMachine.cpp b/paddle/gserver/gradientmachines/GradientMachine.cpp index 6adee05dbe..36ca05b919 100644 --- a/paddle/gserver/gradientmachines/GradientMachine.cpp +++ b/paddle/gserver/gradientmachines/GradientMachine.cpp @@ -14,16 +14,16 @@ limitations under the License. */ #include "GradientMachine.h" -#include "paddle/utils/Logging.h" #include +#include "paddle/utils/Logging.h" -#include "hl_gpu.h" -#include "NeuralNetwork.h" -#include "ParallelNeuralNetwork.h" +#include "GradientMachineMode.h" #include "MultiGradientMachine.h" -#include "NeuralNetwork.h" #include "MultiNetwork.h" -#include "GradientMachineMode.h" +#include "NeuralNetwork.h" +#include "NeuralNetwork.h" +#include "ParallelNeuralNetwork.h" +#include "hl_gpu.h" namespace paddle { diff --git a/paddle/gserver/gradientmachines/GradientMachine.h b/paddle/gserver/gradientmachines/GradientMachine.h index f3e44a9e39..579eca71d4 100644 --- a/paddle/gserver/gradientmachines/GradientMachine.h +++ b/paddle/gserver/gradientmachines/GradientMachine.h @@ -17,15 +17,15 @@ limitations under the License. */ #include #include -#include "paddle/math/Matrix.h" -#include "paddle/parameter/Parameter.h" -#include "paddle/parameter/ParameterUpdaterBase.h" -#include "paddle/utils/Thread.h" -#include "TrainerConfig.pb.h" #include "ModelConfig.pb.h" +#include "TrainerConfig.pb.h" #include "paddle/gserver/dataproviders/DataProvider.h" #include "paddle/gserver/evaluators/Evaluator.h" #include "paddle/gserver/layers/Layer.h" +#include "paddle/math/Matrix.h" +#include "paddle/parameter/Parameter.h" +#include "paddle/parameter/ParameterUpdaterBase.h" +#include "paddle/utils/Thread.h" namespace paddle { /** diff --git a/paddle/gserver/gradientmachines/MultiGradientMachine.h b/paddle/gserver/gradientmachines/MultiGradientMachine.h index fe6d96e8ea..5f9855c4be 100644 --- a/paddle/gserver/gradientmachines/MultiGradientMachine.h +++ b/paddle/gserver/gradientmachines/MultiGradientMachine.h @@ -18,9 +18,9 @@ limitations under the License. */ #include "GradientMachine.h" -#include "paddle/utils/Queue.h" -#include "paddle/utils/Locks.h" #include "hl_gpu.h" +#include "paddle/utils/Locks.h" +#include "paddle/utils/Queue.h" namespace paddle { diff --git a/paddle/gserver/gradientmachines/MultiNetwork.cpp b/paddle/gserver/gradientmachines/MultiNetwork.cpp index 61af82fcb7..6eb3d8db96 100644 --- a/paddle/gserver/gradientmachines/MultiNetwork.cpp +++ b/paddle/gserver/gradientmachines/MultiNetwork.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include "paddle/utils/Stat.h" #include "paddle/utils/Util.h" -#include #include "MultiNetwork.h" diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.cpp b/paddle/gserver/gradientmachines/NeuralNetwork.cpp index dbcb97b42b..ee36a87b9d 100644 --- a/paddle/gserver/gradientmachines/NeuralNetwork.cpp +++ b/paddle/gserver/gradientmachines/NeuralNetwork.cpp @@ -14,15 +14,15 @@ limitations under the License. */ #include "paddle/utils/Util.h" -#include "paddle/utils/Logging.h" #include "paddle/utils/CustomStackTrace.h" +#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" -#include "hl_gpu.h" +#include "MultiNetwork.h" #include "NeuralNetwork.h" #include "RecurrentGradientMachine.h" -#include "MultiNetwork.h" +#include "hl_gpu.h" #include "paddle/gserver/layers/AgentLayer.h" +#include "paddle/utils/Stat.h" namespace paddle { void parameterInitNN(int paramId, diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.h b/paddle/gserver/gradientmachines/NeuralNetwork.h index fd885b436a..384ca88f47 100644 --- a/paddle/gserver/gradientmachines/NeuralNetwork.h +++ b/paddle/gserver/gradientmachines/NeuralNetwork.h @@ -14,18 +14,18 @@ limitations under the License. */ #pragma once -#include -#include #include +#include +#include -#include "paddle/utils/ClassRegistrar.h" -#include "paddle/parameter/Parameter.h" #include "ModelConfig.pb.h" +#include "paddle/gserver/dataproviders/DataProvider.h" #include "paddle/gserver/gradientmachines/GradientMachine.h" #include "paddle/gserver/layers/CostLayer.h" #include "paddle/gserver/layers/DataLayer.h" -#include "paddle/gserver/dataproviders/DataProvider.h" #include "paddle/gserver/layers/Layer.h" +#include "paddle/parameter/Parameter.h" +#include "paddle/utils/ClassRegistrar.h" namespace paddle { /* @@ -57,14 +57,13 @@ void parameterInitNN(int paramId, class NeuralNetwork : public GradientMachine { public: - virtual void init( - const ModelConfig& config, - ParamInitCallback callback = nullptr, - const std::vector& - parameterTypes = std::vector{PARAMETER_VALUE, - PARAMETER_GRADIENT, - PARAMETER_MOMENTUM}, - bool useGpu = FLAGS_use_gpu); + virtual void init(const ModelConfig& config, + ParamInitCallback callback = nullptr, + const std::vector& parameterTypes = + std::vector{PARAMETER_VALUE, + PARAMETER_GRADIENT, + PARAMETER_MOMENTUM}, + bool useGpu = FLAGS_use_gpu); /** * Connect two submodels and diff --git a/paddle/gserver/gradientmachines/ParallelNeuralNetwork.h b/paddle/gserver/gradientmachines/ParallelNeuralNetwork.h index 934a7cfc7b..8f445b1ded 100644 --- a/paddle/gserver/gradientmachines/ParallelNeuralNetwork.h +++ b/paddle/gserver/gradientmachines/ParallelNeuralNetwork.h @@ -37,14 +37,13 @@ public: NeuralNetwork *rootNetwork = nullptr) : NeuralNetwork(subModelName, rootNetwork) {} - virtual void init( - const ModelConfig &config, - ParamInitCallback callback = nullptr, - const std::vector - ¶meterTypes = std::vector{PARAMETER_VALUE, - PARAMETER_GRADIENT, - PARAMETER_MOMENTUM}, - bool useGpu = FLAGS_use_gpu); + virtual void init(const ModelConfig &config, + ParamInitCallback callback = nullptr, + const std::vector ¶meterTypes = + std::vector{PARAMETER_VALUE, + PARAMETER_GRADIENT, + PARAMETER_MOMENTUM}, + bool useGpu = FLAGS_use_gpu); virtual void forward(const std::vector &inArgs, std::vector *outArgs, diff --git a/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp b/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp index 4fb1a44ab7..ee1c92bdf5 100644 --- a/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp +++ b/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp @@ -12,17 +12,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Stat.h" -#include "paddle/utils/Util.h" -#include "paddle/utils/Flags.h" +#include "RecurrentGradientMachine.h" +#include #include +#include #include -#include #include -#include -#include "RecurrentGradientMachine.h" #include "NeuralNetwork.h" #include "paddle/gserver/layers/AgentLayer.h" +#include "paddle/utils/Flags.h" +#include "paddle/utils/Stat.h" +#include "paddle/utils/Util.h" P_DEFINE_string(diy_beam_search_prob_so, "", "the diy beam search cost so"); @@ -78,20 +78,22 @@ static inline SymbolType loadDiySymbol(const char* symbolName) { return reinterpret_cast(sym); } -static InitFunction __init__diy_prob_method([] { - std::string soName = FLAGS_diy_beam_search_prob_so; - if (!soName.empty()) { - gDiyProbHandle = dlopen(soName.c_str(), RTLD_LAZY); - CHECK(gDiyProbHandle) << "Cannot Open DIY Prob So " << soName; - atexit(exit_diy_prob); - gDiyProbMethod = - loadDiySymbol(DIY_CALC_PROB_SYMBOL_NAME); - gDiyProbStart = - loadDiySymbol(DIY_START_CALC_PROB_SYMBOL_NAME); - gDiyProbStop = - loadDiySymbol(DIY_FINISH_CALC_PROB_SYMBOL_NAME); - } -}, std::numeric_limits::max()); +static InitFunction __init__diy_prob_method( + [] { + std::string soName = FLAGS_diy_beam_search_prob_so; + if (!soName.empty()) { + gDiyProbHandle = dlopen(soName.c_str(), RTLD_LAZY); + CHECK(gDiyProbHandle) << "Cannot Open DIY Prob So " << soName; + atexit(exit_diy_prob); + gDiyProbMethod = + loadDiySymbol(DIY_CALC_PROB_SYMBOL_NAME); + gDiyProbStart = loadDiySymbol( + DIY_START_CALC_PROB_SYMBOL_NAME); + gDiyProbStop = loadDiySymbol( + DIY_FINISH_CALC_PROB_SYMBOL_NAME); + } + }, + std::numeric_limits::max()); class BeamSearchControlCallbacks { public: @@ -1281,10 +1283,9 @@ void RecurrentGradientMachine::beamSearch(size_t batchSize) { std::vector*> prefixes; prefixes.resize(paths.size()); std::transform( - paths.begin(), - paths.end(), - prefixes.begin(), - [](const Path& p) { return const_cast*>(&p.ids); }); + paths.begin(), paths.end(), prefixes.begin(), [](const Path& p) { + return const_cast*>(&p.ids); + }); beamSearchCtrlCallbacks_->beamSearchCandidateAdjust( prefixes, frames_[machineCur].get(), i); } diff --git a/paddle/gserver/gradientmachines/RecurrentGradientMachine.h b/paddle/gserver/gradientmachines/RecurrentGradientMachine.h index 369c8c3d98..db7d8aff6d 100644 --- a/paddle/gserver/gradientmachines/RecurrentGradientMachine.h +++ b/paddle/gserver/gradientmachines/RecurrentGradientMachine.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once +#include #include "GradientMachine.h" #include "NeuralNetwork.h" -#include #include "paddle/utils/Locks.h" diff --git a/paddle/gserver/layers/BatchNormBaseLayer.cpp b/paddle/gserver/layers/BatchNormBaseLayer.cpp index 51463f1118..1ceaaaa206 100644 --- a/paddle/gserver/layers/BatchNormBaseLayer.cpp +++ b/paddle/gserver/layers/BatchNormBaseLayer.cpp @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Stat.h" -#include "Layer.h" #include "BatchNormBaseLayer.h" #include "BatchNormalizationLayer.h" +#include "Layer.h" +#include "paddle/utils/Stat.h" #ifndef PADDLE_ONLY_CPU #include "CudnnBatchNormLayer.h" #endif diff --git a/paddle/gserver/layers/BatchNormBaseLayer.h b/paddle/gserver/layers/BatchNormBaseLayer.h index f5a555a6d0..75bda95de1 100644 --- a/paddle/gserver/layers/BatchNormBaseLayer.h +++ b/paddle/gserver/layers/BatchNormBaseLayer.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/utils/Stat.h" #include "Layer.h" +#include "paddle/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/BatchNormalizationLayer.h b/paddle/gserver/layers/BatchNormalizationLayer.h index 56be473568..052c207732 100644 --- a/paddle/gserver/layers/BatchNormalizationLayer.h +++ b/paddle/gserver/layers/BatchNormalizationLayer.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "Layer.h" #include "BatchNormBaseLayer.h" +#include "Layer.h" namespace paddle { diff --git a/paddle/gserver/layers/ConcatenateLayer.cpp b/paddle/gserver/layers/ConcatenateLayer.cpp index f6b3d86b8c..d19adace7d 100644 --- a/paddle/gserver/layers/ConcatenateLayer.cpp +++ b/paddle/gserver/layers/ConcatenateLayer.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Stat.h" #include "Layer.h" #include "Projection.h" +#include "paddle/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/ContextProjection.cpp b/paddle/gserver/layers/ContextProjection.cpp index 6080aa51b9..7ac56e3a2a 100644 --- a/paddle/gserver/layers/ContextProjection.cpp +++ b/paddle/gserver/layers/ContextProjection.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Stat.h" #include "ContextProjection.h" +#include "paddle/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/ConvBaseLayer.cpp b/paddle/gserver/layers/ConvBaseLayer.cpp index 473ca24a94..7b234dc2a6 100644 --- a/paddle/gserver/layers/ConvBaseLayer.cpp +++ b/paddle/gserver/layers/ConvBaseLayer.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" #include "ConvBaseLayer.h" #include "paddle/math/MathUtils.h" +#include "paddle/utils/Logging.h" namespace paddle { bool ConvBaseLayer::init(const LayerMap& layerMap, diff --git a/paddle/gserver/layers/ConvOperator.cpp b/paddle/gserver/layers/ConvOperator.cpp index 3ede98ba4b..f943410dee 100644 --- a/paddle/gserver/layers/ConvOperator.cpp +++ b/paddle/gserver/layers/ConvOperator.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/math/Matrix.h" -#include "paddle/math/MathUtils.h" #include "Operator.h" +#include "paddle/math/MathUtils.h" +#include "paddle/math/Matrix.h" namespace paddle { diff --git a/paddle/gserver/layers/ConvProjection.cpp b/paddle/gserver/layers/ConvProjection.cpp index e72dc37ec8..aa634b3287 100644 --- a/paddle/gserver/layers/ConvProjection.cpp +++ b/paddle/gserver/layers/ConvProjection.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Stat.h" #include "ConvProjection.h" +#include "paddle/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/ConvShiftLayer.cpp b/paddle/gserver/layers/ConvShiftLayer.cpp index 527d885d86..9bfb1ab7a4 100644 --- a/paddle/gserver/layers/ConvShiftLayer.cpp +++ b/paddle/gserver/layers/ConvShiftLayer.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" #include "Layer.h" #include "paddle/math/Matrix.h" +#include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/ConvexCombinationLayer.cpp b/paddle/gserver/layers/ConvexCombinationLayer.cpp index 57ff95fe37..3f4d77a2fe 100644 --- a/paddle/gserver/layers/ConvexCombinationLayer.cpp +++ b/paddle/gserver/layers/ConvexCombinationLayer.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" #include "Layer.h" #include "paddle/math/Matrix.h" +#include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/CosSimVecMatLayer.cpp b/paddle/gserver/layers/CosSimVecMatLayer.cpp index e8a7f671ee..ad490b0b8c 100644 --- a/paddle/gserver/layers/CosSimVecMatLayer.cpp +++ b/paddle/gserver/layers/CosSimVecMatLayer.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" #include "Layer.h" #include "paddle/math/Matrix.h" +#include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/CostLayer.cpp b/paddle/gserver/layers/CostLayer.cpp index 90cd473c42..7e9519f6b3 100644 --- a/paddle/gserver/layers/CostLayer.cpp +++ b/paddle/gserver/layers/CostLayer.cpp @@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#include "CostLayer.h" #include -#include "paddle/utils/Logging.h" #include -#include "CostLayer.h" +#include +#include "paddle/utils/Logging.h" #include "paddle/math/SparseMatrix.h" diff --git a/paddle/gserver/layers/CudnnBatchNormLayer.cpp b/paddle/gserver/layers/CudnnBatchNormLayer.cpp index d44c217105..09dac05a7a 100644 --- a/paddle/gserver/layers/CudnnBatchNormLayer.cpp +++ b/paddle/gserver/layers/CudnnBatchNormLayer.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Stat.h" -#include "Layer.h" #include "CudnnBatchNormLayer.h" +#include "Layer.h" +#include "paddle/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/CudnnBatchNormLayer.h b/paddle/gserver/layers/CudnnBatchNormLayer.h index a52a683e15..b1e7d2082f 100644 --- a/paddle/gserver/layers/CudnnBatchNormLayer.h +++ b/paddle/gserver/layers/CudnnBatchNormLayer.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include "paddle/utils/Stat.h" -#include "Layer.h" #include "BatchNormBaseLayer.h" +#include "Layer.h" +#include "paddle/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/CudnnConvLayer.cpp b/paddle/gserver/layers/CudnnConvLayer.cpp index 6e28d5eb42..978c2c1479 100644 --- a/paddle/gserver/layers/CudnnConvLayer.cpp +++ b/paddle/gserver/layers/CudnnConvLayer.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "CudnnConvLayer.h" #include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" -#include "CudnnConvLayer.h" namespace paddle { diff --git a/paddle/gserver/layers/CudnnConvLayer.h b/paddle/gserver/layers/CudnnConvLayer.h index 6317fab6f8..b869c695bd 100644 --- a/paddle/gserver/layers/CudnnConvLayer.h +++ b/paddle/gserver/layers/CudnnConvLayer.h @@ -14,10 +14,10 @@ limitations under the License. */ #pragma once +#include #include "ConvBaseLayer.h" -#include "paddle/math/Matrix.h" #include "Projection.h" -#include +#include "paddle/math/Matrix.h" namespace paddle { diff --git a/paddle/gserver/layers/CudnnPoolLayer.cpp b/paddle/gserver/layers/CudnnPoolLayer.cpp index d0e71c6345..4adb2d4709 100644 --- a/paddle/gserver/layers/CudnnPoolLayer.cpp +++ b/paddle/gserver/layers/CudnnPoolLayer.cpp @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "CudnnPoolLayer.h" +#include "paddle/math/Matrix.h" #include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" -#include "paddle/math/Matrix.h" -#include "CudnnPoolLayer.h" namespace paddle { diff --git a/paddle/gserver/layers/EosIdCheckLayer.cpp b/paddle/gserver/layers/EosIdCheckLayer.cpp index dc3c6e6b64..fa53e2e4cf 100644 --- a/paddle/gserver/layers/EosIdCheckLayer.cpp +++ b/paddle/gserver/layers/EosIdCheckLayer.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" #include "Layer.h" +#include "paddle/utils/Logging.h" namespace paddle { /** diff --git a/paddle/gserver/layers/ExpandConvBaseLayer.h b/paddle/gserver/layers/ExpandConvBaseLayer.h index e14f6e6f44..8445642217 100644 --- a/paddle/gserver/layers/ExpandConvBaseLayer.h +++ b/paddle/gserver/layers/ExpandConvBaseLayer.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once +#include #include "ConvBaseLayer.h" #include "paddle/math/Matrix.h" -#include namespace paddle { diff --git a/paddle/gserver/layers/ExpandConvLayer.cpp b/paddle/gserver/layers/ExpandConvLayer.cpp index dcc7839960..f9267b81a7 100644 --- a/paddle/gserver/layers/ExpandConvLayer.cpp +++ b/paddle/gserver/layers/ExpandConvLayer.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "ExpandConvLayer.h" #include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" -#include "ExpandConvLayer.h" namespace paddle { diff --git a/paddle/gserver/layers/ExpandConvLayer.h b/paddle/gserver/layers/ExpandConvLayer.h index 6f8504b50a..de81a017e1 100644 --- a/paddle/gserver/layers/ExpandConvLayer.h +++ b/paddle/gserver/layers/ExpandConvLayer.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include "paddle/math/Matrix.h" #include #include "ExpandConvBaseLayer.h" +#include "paddle/math/Matrix.h" namespace paddle { diff --git a/paddle/gserver/layers/ExpandConvTransLayer.cpp b/paddle/gserver/layers/ExpandConvTransLayer.cpp index cd4965c3c5..520586b138 100644 --- a/paddle/gserver/layers/ExpandConvTransLayer.cpp +++ b/paddle/gserver/layers/ExpandConvTransLayer.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "ExpandConvTransLayer.h" #include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" -#include "ExpandConvTransLayer.h" /* The implementation of the convTransLayer is basically a swap of forward and * backward of the original convLayer. diff --git a/paddle/gserver/layers/ExpandConvTransLayer.h b/paddle/gserver/layers/ExpandConvTransLayer.h index fa9d7fb481..4a527d6799 100644 --- a/paddle/gserver/layers/ExpandConvTransLayer.h +++ b/paddle/gserver/layers/ExpandConvTransLayer.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include "paddle/math/Matrix.h" #include #include "ExpandConvBaseLayer.h" +#include "paddle/math/Matrix.h" namespace paddle { diff --git a/paddle/gserver/layers/FullyConnectedLayer.cpp b/paddle/gserver/layers/FullyConnectedLayer.cpp index d2a028dd80..89afe33c36 100644 --- a/paddle/gserver/layers/FullyConnectedLayer.cpp +++ b/paddle/gserver/layers/FullyConnectedLayer.cpp @@ -13,11 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "FullyConnectedLayer.h" +#include +#include +#include "paddle/math/SparseMatrix.h" #include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" -#include "paddle/math/SparseMatrix.h" -#include -#include namespace paddle { diff --git a/paddle/gserver/layers/GatedRecurrentLayer.cpp b/paddle/gserver/layers/GatedRecurrentLayer.cpp index 01b210ba70..930d9a0561 100644 --- a/paddle/gserver/layers/GatedRecurrentLayer.cpp +++ b/paddle/gserver/layers/GatedRecurrentLayer.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "Layer.h" #include "GatedRecurrentLayer.h" +#include "Layer.h" #include "paddle/utils/Stat.h" namespace paddle { @@ -386,8 +386,9 @@ void GatedRecurrentLayer::backwardBatch(int batchSize, MatrixPtr inputGrad) { { batchSize = outputGradTmp->getHeight(); gruValue.prevOutValue = - (n == 0 ? nullptr : (batchValue_->getBatchValue(n - 1, batchSize)) - ->getData()); + (n == 0 + ? nullptr + : (batchValue_->getBatchValue(n - 1, batchSize))->getData()); gruGrad.prevOutGrad = (n == 0 ? nullptr : (batchGrad_->getBatchValue(n - 1, batchSize))->getData()); diff --git a/paddle/gserver/layers/GatedRecurrentLayer.h b/paddle/gserver/layers/GatedRecurrentLayer.h index e099b4d18b..25770ce57f 100644 --- a/paddle/gserver/layers/GatedRecurrentLayer.h +++ b/paddle/gserver/layers/GatedRecurrentLayer.h @@ -14,10 +14,10 @@ limitations under the License. */ #pragma once -#include "paddle/math/Matrix.h" -#include "SequenceToBatch.h" #include "GruCompute.h" #include "Layer.h" +#include "SequenceToBatch.h" +#include "paddle/math/Matrix.h" namespace paddle { diff --git a/paddle/gserver/layers/GruCompute.cpp b/paddle/gserver/layers/GruCompute.cpp index 7d4e8001a8..06907768e9 100644 --- a/paddle/gserver/layers/GruCompute.cpp +++ b/paddle/gserver/layers/GruCompute.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Util.h" #include "GruCompute.h" #include "hl_recurrent_apply.cuh" +#include "paddle/utils/Util.h" namespace paddle { diff --git a/paddle/gserver/layers/GruCompute.h b/paddle/gserver/layers/GruCompute.h index 2a5da72068..42c0019319 100644 --- a/paddle/gserver/layers/GruCompute.h +++ b/paddle/gserver/layers/GruCompute.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include "paddle/utils/TypeDefs.h" #include "ModelConfig.pb.h" #include "hl_gpu.h" +#include "paddle/utils/TypeDefs.h" namespace paddle { diff --git a/paddle/gserver/layers/GruStepLayer.cpp b/paddle/gserver/layers/GruStepLayer.cpp index c48b5e40e6..4a1006aa94 100644 --- a/paddle/gserver/layers/GruStepLayer.cpp +++ b/paddle/gserver/layers/GruStepLayer.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "Layer.h" #include "GruCompute.h" +#include "Layer.h" #include "paddle/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/IdentityProjection.cpp b/paddle/gserver/layers/IdentityProjection.cpp index 8660631b5a..f1d41a33d4 100644 --- a/paddle/gserver/layers/IdentityProjection.cpp +++ b/paddle/gserver/layers/IdentityProjection.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Stat.h" #include "Projection.h" +#include "paddle/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/InterpolationLayer.cpp b/paddle/gserver/layers/InterpolationLayer.cpp index 94d4614b21..44fe1fb1fe 100644 --- a/paddle/gserver/layers/InterpolationLayer.cpp +++ b/paddle/gserver/layers/InterpolationLayer.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" #include "Layer.h" #include "paddle/math/Matrix.h" +#include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/Layer.cpp b/paddle/gserver/layers/Layer.cpp index 3c539f3076..c9e121047b 100644 --- a/paddle/gserver/layers/Layer.cpp +++ b/paddle/gserver/layers/Layer.cpp @@ -14,15 +14,15 @@ limitations under the License. */ #include "paddle/utils/Util.h" -#include "paddle/utils/Logging.h" #include "paddle/math/SparseMatrix.h" +#include "paddle/utils/Logging.h" #include "AddtoLayer.h" +#include "CRFLayer.h" #include "CosSimLayer.h" #include "CostLayer.h" -#include "ExpandConvLayer.h" -#include "CRFLayer.h" #include "DataLayer.h" +#include "ExpandConvLayer.h" #include "FullyConnectedLayer.h" #include "HierarchicalSigmoidLayer.h" #include "MaxLayer.h" diff --git a/paddle/gserver/layers/Layer.h b/paddle/gserver/layers/Layer.h index 6609e16c4c..172e558b82 100644 --- a/paddle/gserver/layers/Layer.h +++ b/paddle/gserver/layers/Layer.h @@ -14,18 +14,18 @@ limitations under the License. */ #pragma once -#include -#include #include -#include "paddle/utils/ClassRegistrar.h" +#include +#include +#include "ModelConfig.pb.h" #include "paddle/math/CpuSparseMatrix.h" #include "paddle/parameter/Parameter.h" +#include "paddle/utils/ClassRegistrar.h" #include "paddle/utils/Util.h" -#include "ModelConfig.pb.h" -#include "paddle/gserver/activations/ActivationFunction.h" #include #include +#include "paddle/gserver/activations/ActivationFunction.h" /// Macro for registering a layer type. /// Example: REGISTER_LAYER(crf_error, CRFDecodingErrorLayer); diff --git a/paddle/gserver/layers/LinearChainCRF.cpp b/paddle/gserver/layers/LinearChainCRF.cpp index c6414c822e..af550c7a01 100644 --- a/paddle/gserver/layers/LinearChainCRF.cpp +++ b/paddle/gserver/layers/LinearChainCRF.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "LinearChainCRF.h" +#include namespace paddle { diff --git a/paddle/gserver/layers/LinearChainCTC.cpp b/paddle/gserver/layers/LinearChainCTC.cpp index 60e814fc30..cb2b249110 100644 --- a/paddle/gserver/layers/LinearChainCTC.cpp +++ b/paddle/gserver/layers/LinearChainCTC.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "LinearChainCTC.h" +#include #include namespace paddle { diff --git a/paddle/gserver/layers/LstmCompute.cpp b/paddle/gserver/layers/LstmCompute.cpp index 18f7996958..4c42970964 100644 --- a/paddle/gserver/layers/LstmCompute.cpp +++ b/paddle/gserver/layers/LstmCompute.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Util.h" -#include "hl_recurrent_apply.cuh" #include "LstmCompute.h" +#include "hl_recurrent_apply.cuh" +#include "paddle/utils/Util.h" namespace paddle { diff --git a/paddle/gserver/layers/LstmCompute.h b/paddle/gserver/layers/LstmCompute.h index 9b7aee19dd..140a4c6ecf 100644 --- a/paddle/gserver/layers/LstmCompute.h +++ b/paddle/gserver/layers/LstmCompute.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include "paddle/utils/TypeDefs.h" #include "ModelConfig.pb.h" #include "hl_gpu.h" +#include "paddle/utils/TypeDefs.h" namespace paddle { diff --git a/paddle/gserver/layers/LstmLayer.cpp b/paddle/gserver/layers/LstmLayer.cpp index 975edcfe7f..452091eff4 100644 --- a/paddle/gserver/layers/LstmLayer.cpp +++ b/paddle/gserver/layers/LstmLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "LstmLayer.h" -#include "paddle/math/Matrix.h" #include "paddle/math/BaseMatrix.h" +#include "paddle/math/Matrix.h" #include "paddle/utils/Stat.h" P_DECLARE_bool(prev_batch_state); diff --git a/paddle/gserver/layers/LstmLayer.h b/paddle/gserver/layers/LstmLayer.h index 16c62aa88d..f49df2c412 100644 --- a/paddle/gserver/layers/LstmLayer.h +++ b/paddle/gserver/layers/LstmLayer.h @@ -15,10 +15,10 @@ limitations under the License. */ #pragma once #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/math/BaseMatrix.h" -#include "SequenceToBatch.h" #include "LstmCompute.h" +#include "SequenceToBatch.h" +#include "paddle/math/BaseMatrix.h" +#include "paddle/math/Matrix.h" namespace paddle { /** diff --git a/paddle/gserver/layers/MDLstmLayer.cpp b/paddle/gserver/layers/MDLstmLayer.cpp index 9d3797d16f..1243c12889 100644 --- a/paddle/gserver/layers/MDLstmLayer.cpp +++ b/paddle/gserver/layers/MDLstmLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "LstmLayer.h" -#include "paddle/math/Matrix.h" #include "paddle/math/BaseMatrix.h" +#include "paddle/math/Matrix.h" namespace paddle { @@ -318,7 +318,7 @@ void MDLstmLayer::forward(PassType passType) { CHECK_EQ(starts[numSequences], batchSize); int* dimsData = input.cpuSequenceDims->getData(); - CHECK_EQ(int(input.cpuSequenceDims->getSize()), numDims_ * numSequences); + CHECK_EQ(int(input.cpuSequenceDims->getSize()), numDims_* numSequences); for (int i = 0; i < numSequences; i++) { std::vector dims; diff --git a/paddle/gserver/layers/MaxOutLayer.cpp b/paddle/gserver/layers/MaxOutLayer.cpp index 4fb99ce2a2..3a86a95321 100644 --- a/paddle/gserver/layers/MaxOutLayer.cpp +++ b/paddle/gserver/layers/MaxOutLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "MaxOutLayer.h" -#include "hl_gpu.h" #include "hl_cnn.h" +#include "hl_gpu.h" namespace paddle { diff --git a/paddle/gserver/layers/MixedLayer.cpp b/paddle/gserver/layers/MixedLayer.cpp index 490b217347..2525b1984b 100644 --- a/paddle/gserver/layers/MixedLayer.cpp +++ b/paddle/gserver/layers/MixedLayer.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Stat.h" #include "MixedLayer.h" +#include "paddle/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/MixedLayer.h b/paddle/gserver/layers/MixedLayer.h index d73ba6b7a1..9655a152c7 100644 --- a/paddle/gserver/layers/MixedLayer.h +++ b/paddle/gserver/layers/MixedLayer.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include "Layer.h" -#include "Projection.h" #include "Operator.h" +#include "Projection.h" namespace paddle { diff --git a/paddle/gserver/layers/MultiplexLayer.cpp b/paddle/gserver/layers/MultiplexLayer.cpp index dc4a1ec321..d09720c525 100644 --- a/paddle/gserver/layers/MultiplexLayer.cpp +++ b/paddle/gserver/layers/MultiplexLayer.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" #include "Layer.h" #include "paddle/math/Matrix.h" +#include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/NormLayer.cpp b/paddle/gserver/layers/NormLayer.cpp index b8682a1422..3db0af2515 100644 --- a/paddle/gserver/layers/NormLayer.cpp +++ b/paddle/gserver/layers/NormLayer.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" #include "NormLayer.h" #include "NormProjectionLayer.h" +#include "paddle/utils/Logging.h" namespace paddle { REGISTER_LAYER_CREATE_FUNC(norm, &NormLayer::create); diff --git a/paddle/gserver/layers/NormLayer.h b/paddle/gserver/layers/NormLayer.h index aedbb95b4f..86255b231b 100644 --- a/paddle/gserver/layers/NormLayer.h +++ b/paddle/gserver/layers/NormLayer.h @@ -16,8 +16,8 @@ limitations under the License. */ #include #include "Layer.h" -#include "paddle/math/Matrix.h" #include "NormLayer.h" +#include "paddle/math/Matrix.h" namespace paddle { diff --git a/paddle/gserver/layers/NormProjectionLayer.cpp b/paddle/gserver/layers/NormProjectionLayer.cpp index ea301292e0..934fc31e0a 100644 --- a/paddle/gserver/layers/NormProjectionLayer.cpp +++ b/paddle/gserver/layers/NormProjectionLayer.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "NormProjectionLayer.h" #include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" -#include "NormProjectionLayer.h" namespace paddle { size_t CMRProjectionNormLayer::getSize() { diff --git a/paddle/gserver/layers/NormProjectionLayer.h b/paddle/gserver/layers/NormProjectionLayer.h index 0db8e2551f..4f7b638334 100644 --- a/paddle/gserver/layers/NormProjectionLayer.h +++ b/paddle/gserver/layers/NormProjectionLayer.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once +#include #include "NormLayer.h" #include "paddle/math/Matrix.h" -#include namespace paddle { diff --git a/paddle/gserver/layers/Operator.h b/paddle/gserver/layers/Operator.h index b0586b59e9..6fd331382f 100644 --- a/paddle/gserver/layers/Operator.h +++ b/paddle/gserver/layers/Operator.h @@ -14,11 +14,11 @@ limitations under the License. */ #pragma once -#include "paddle/parameter/Parameter.h" #include "ModelConfig.pb.h" +#include "paddle/parameter/Parameter.h" -#include "paddle/parameter/Argument.h" #include "Layer.h" +#include "paddle/parameter/Argument.h" namespace paddle { diff --git a/paddle/gserver/layers/OuterProdLayer.cpp b/paddle/gserver/layers/OuterProdLayer.cpp index 42587dcce5..cf9a008318 100644 --- a/paddle/gserver/layers/OuterProdLayer.cpp +++ b/paddle/gserver/layers/OuterProdLayer.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" #include "Layer.h" #include "paddle/math/Matrix.h" +#include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/PoolLayer.cpp b/paddle/gserver/layers/PoolLayer.cpp index 36e396487e..96d5c54acc 100644 --- a/paddle/gserver/layers/PoolLayer.cpp +++ b/paddle/gserver/layers/PoolLayer.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" #include "PoolLayer.h" #include "PoolProjectionLayer.h" +#include "paddle/utils/Logging.h" #ifndef PADDLE_ONLY_CPU #include "CudnnPoolLayer.h" #endif diff --git a/paddle/gserver/layers/PoolLayer.h b/paddle/gserver/layers/PoolLayer.h index c05d7a364d..318b89d7c2 100644 --- a/paddle/gserver/layers/PoolLayer.h +++ b/paddle/gserver/layers/PoolLayer.h @@ -14,10 +14,10 @@ limitations under the License. */ #pragma once +#include #include "Layer.h" -#include "paddle/math/Matrix.h" #include "paddle/math/MathUtils.h" -#include +#include "paddle/math/Matrix.h" namespace paddle { diff --git a/paddle/gserver/layers/PoolProjectionLayer.cpp b/paddle/gserver/layers/PoolProjectionLayer.cpp index 392c548d45..ed5011ab89 100644 --- a/paddle/gserver/layers/PoolProjectionLayer.cpp +++ b/paddle/gserver/layers/PoolProjectionLayer.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "PoolProjectionLayer.h" #include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" -#include "PoolProjectionLayer.h" namespace paddle { diff --git a/paddle/gserver/layers/PowerLayer.cpp b/paddle/gserver/layers/PowerLayer.cpp index eb69249270..64fecab5b0 100644 --- a/paddle/gserver/layers/PowerLayer.cpp +++ b/paddle/gserver/layers/PowerLayer.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" #include "Layer.h" #include "paddle/math/Matrix.h" +#include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/RecurrentLayer.cpp b/paddle/gserver/layers/RecurrentLayer.cpp index 0832eeaa10..9f3bf76a2d 100644 --- a/paddle/gserver/layers/RecurrentLayer.cpp +++ b/paddle/gserver/layers/RecurrentLayer.cpp @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Layer.h" -#include "paddle/utils/Stat.h" #include "SequenceToBatch.h" #include "paddle/utils/CommandLineParser.h" +#include "paddle/utils/Stat.h" P_DEFINE_bool(rnn_use_batch, false, "Using the batch method for calculation."); diff --git a/paddle/gserver/layers/RecurrentLayerGroup.cpp b/paddle/gserver/layers/RecurrentLayerGroup.cpp index 5cb4220623..af8dd61d84 100644 --- a/paddle/gserver/layers/RecurrentLayerGroup.cpp +++ b/paddle/gserver/layers/RecurrentLayerGroup.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/gserver/layers/Layer.h" #include +#include "paddle/gserver/layers/Layer.h" #include "paddle/gserver/gradientmachines/RecurrentGradientMachine.h" #include "paddle/utils/Stat.h" diff --git a/paddle/gserver/layers/ResizeLayer.cpp b/paddle/gserver/layers/ResizeLayer.cpp index e79732155a..7fcb3adea0 100644 --- a/paddle/gserver/layers/ResizeLayer.cpp +++ b/paddle/gserver/layers/ResizeLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Layer.h" -#include "paddle/math/Matrix.h" #include "paddle/math/BaseMatrix.h" +#include "paddle/math/Matrix.h" namespace paddle { /** diff --git a/paddle/gserver/layers/ScalingLayer.cpp b/paddle/gserver/layers/ScalingLayer.cpp index 013bff6b98..7f0084be6b 100644 --- a/paddle/gserver/layers/ScalingLayer.cpp +++ b/paddle/gserver/layers/ScalingLayer.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" #include "Layer.h" #include "paddle/math/Matrix.h" +#include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/SelectiveFullyConnectedLayer.cpp b/paddle/gserver/layers/SelectiveFullyConnectedLayer.cpp index 75d9fa8a97..9200a01eee 100644 --- a/paddle/gserver/layers/SelectiveFullyConnectedLayer.cpp +++ b/paddle/gserver/layers/SelectiveFullyConnectedLayer.cpp @@ -13,11 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "SelectiveFullyConnectedLayer.h" +#include +#include +#include "paddle/math/SparseMatrix.h" #include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" -#include "paddle/math/SparseMatrix.h" -#include -#include namespace paddle { diff --git a/paddle/gserver/layers/SequenceConcatLayer.cpp b/paddle/gserver/layers/SequenceConcatLayer.cpp index d3e0e16e96..069bc26e60 100644 --- a/paddle/gserver/layers/SequenceConcatLayer.cpp +++ b/paddle/gserver/layers/SequenceConcatLayer.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" #include "Layer.h" #include "paddle/math/Matrix.h" +#include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/SequencePoolLayer.cpp b/paddle/gserver/layers/SequencePoolLayer.cpp index 856c889e3b..35260ca912 100644 --- a/paddle/gserver/layers/SequencePoolLayer.cpp +++ b/paddle/gserver/layers/SequencePoolLayer.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" #include "SequencePoolLayer.h" +#include "paddle/utils/Logging.h" namespace paddle { diff --git a/paddle/gserver/layers/SequenceReshapeLayer.cpp b/paddle/gserver/layers/SequenceReshapeLayer.cpp index 4b90424215..23924b0490 100644 --- a/paddle/gserver/layers/SequenceReshapeLayer.cpp +++ b/paddle/gserver/layers/SequenceReshapeLayer.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" #include "Layer.h" #include "paddle/math/Matrix.h" +#include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/SequenceToBatch.cpp b/paddle/gserver/layers/SequenceToBatch.cpp index c12ed82197..5fa7b6f488 100644 --- a/paddle/gserver/layers/SequenceToBatch.cpp +++ b/paddle/gserver/layers/SequenceToBatch.cpp @@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include -#include #include "SequenceToBatch.h" -#include #include +#include +#include +#include namespace paddle { diff --git a/paddle/gserver/layers/SequenceToBatch.h b/paddle/gserver/layers/SequenceToBatch.h index fe9b34b224..17e735a135 100644 --- a/paddle/gserver/layers/SequenceToBatch.h +++ b/paddle/gserver/layers/SequenceToBatch.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/math/Vector.h" #include "paddle/math/Matrix.h" +#include "paddle/math/Vector.h" namespace paddle { diff --git a/paddle/gserver/layers/SlopeInterceptLayer.cpp b/paddle/gserver/layers/SlopeInterceptLayer.cpp index 5c00e54f8c..b678f414b6 100644 --- a/paddle/gserver/layers/SlopeInterceptLayer.cpp +++ b/paddle/gserver/layers/SlopeInterceptLayer.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" #include "Layer.h" #include "paddle/math/Matrix.h" +#include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/SubSequenceLayer.cpp b/paddle/gserver/layers/SubSequenceLayer.cpp index 8b35456391..c52fbee262 100644 --- a/paddle/gserver/layers/SubSequenceLayer.cpp +++ b/paddle/gserver/layers/SubSequenceLayer.cpp @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" #include "Layer.h" #include "paddle/math/Matrix.h" #include "paddle/math/Vector.h" +#include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/SumToOneNormLayer.cpp b/paddle/gserver/layers/SumToOneNormLayer.cpp index e6759171cb..aa99b49380 100644 --- a/paddle/gserver/layers/SumToOneNormLayer.cpp +++ b/paddle/gserver/layers/SumToOneNormLayer.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" #include "Layer.h" #include "paddle/math/Matrix.h" +#include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/TransLayer.cpp b/paddle/gserver/layers/TransLayer.cpp index 5cbaaf8f08..d1fa90f384 100644 --- a/paddle/gserver/layers/TransLayer.cpp +++ b/paddle/gserver/layers/TransLayer.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" #include "TransLayer.h" +#include "paddle/utils/Logging.h" namespace paddle { REGISTER_LAYER(trans, TransLayer); diff --git a/paddle/gserver/layers/TransLayer.h b/paddle/gserver/layers/TransLayer.h index 8189700759..b43fa1ebfb 100644 --- a/paddle/gserver/layers/TransLayer.h +++ b/paddle/gserver/layers/TransLayer.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once +#include #include "Layer.h" #include "paddle/math/Matrix.h" -#include namespace paddle { /** diff --git a/paddle/gserver/layers/TransposedFullMatrixProjection.cpp b/paddle/gserver/layers/TransposedFullMatrixProjection.cpp index 8282584ab4..3f7ff04882 100644 --- a/paddle/gserver/layers/TransposedFullMatrixProjection.cpp +++ b/paddle/gserver/layers/TransposedFullMatrixProjection.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Stat.h" #include "Projection.h" +#include "paddle/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/ValidationLayer.cpp b/paddle/gserver/layers/ValidationLayer.cpp index f029ea4c51..5127bcaba3 100644 --- a/paddle/gserver/layers/ValidationLayer.cpp +++ b/paddle/gserver/layers/ValidationLayer.cpp @@ -12,12 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include #include +#include -#include "paddle/utils/Logging.h" #include "ValidationLayer.h" +#include "paddle/utils/Logging.h" namespace paddle { diff --git a/paddle/gserver/layers/ValidationLayer.h b/paddle/gserver/layers/ValidationLayer.h index f9c61503aa..471055429d 100644 --- a/paddle/gserver/layers/ValidationLayer.h +++ b/paddle/gserver/layers/ValidationLayer.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include -#include "paddle/gserver/evaluators/Evaluator.h" #include "Layer.h" +#include "paddle/gserver/evaluators/Evaluator.h" P_DECLARE_int32(trainer_id); diff --git a/paddle/gserver/tests/LayerGradUtil.h b/paddle/gserver/tests/LayerGradUtil.h index 2b8f334f19..62ac2d160f 100644 --- a/paddle/gserver/tests/LayerGradUtil.h +++ b/paddle/gserver/tests/LayerGradUtil.h @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/trainer/Trainer.h" -#include "paddle/gserver/layers/DataLayer.h" #include "ModelConfig.pb.h" +#include "paddle/gserver/layers/DataLayer.h" +#include "paddle/trainer/Trainer.h" #include "TestUtil.h" using namespace std; // NOLINT diff --git a/paddle/gserver/tests/TestUtil.cpp b/paddle/gserver/tests/TestUtil.cpp index dc00711697..e656da5b8f 100644 --- a/paddle/gserver/tests/TestUtil.cpp +++ b/paddle/gserver/tests/TestUtil.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "TestUtil.h" -#include "paddle/utils/CommandLineParser.h" #include "paddle/math/SparseMatrix.h" +#include "paddle/utils/CommandLineParser.h" P_DEFINE_int32(fixed_seq_length, 0, "Produce some sequence of fixed length"); @@ -63,8 +63,8 @@ MatrixPtr makeRandomSparseMatrix(size_t height, std::dynamic_pointer_cast(mat)->copyFrom( ids.data(), indices.data(), data.data(), HPPL_STREAM_DEFAULT); } else { - std::dynamic_pointer_cast(mat) - ->copyFrom(ids.data(), indices.data(), data.data()); + std::dynamic_pointer_cast(mat)->copyFrom( + ids.data(), indices.data(), data.data()); } return mat; } else { @@ -80,8 +80,8 @@ MatrixPtr makeRandomSparseMatrix(size_t height, std::dynamic_pointer_cast(mat)->copyFrom( ids.data(), indices.data(), data.data(), HPPL_STREAM_DEFAULT); } else { - std::dynamic_pointer_cast(mat) - ->copyFrom(ids.data(), indices.data(), data.data()); + std::dynamic_pointer_cast(mat)->copyFrom( + ids.data(), indices.data(), data.data()); } return mat; } diff --git a/paddle/gserver/tests/test_ActivationGrad.cpp b/paddle/gserver/tests/test_ActivationGrad.cpp index 0181d62519..20a6126d0b 100644 --- a/paddle/gserver/tests/test_ActivationGrad.cpp +++ b/paddle/gserver/tests/test_ActivationGrad.cpp @@ -13,14 +13,14 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include #include -#include "paddle/gserver/layers/DataLayer.h" +#include #include "ModelConfig.pb.h" +#include "paddle/gserver/layers/DataLayer.h" #include "paddle/trainer/Trainer.h" -#include "TestUtil.h" #include "LayerGradUtil.h" +#include "TestUtil.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_BatchNorm.cpp b/paddle/gserver/tests/test_BatchNorm.cpp index 8575999aba..3bd4e321b7 100644 --- a/paddle/gserver/tests/test_BatchNorm.cpp +++ b/paddle/gserver/tests/test_BatchNorm.cpp @@ -13,16 +13,16 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include #include -#include "paddle/gserver/layers/DataLayer.h" +#include #include "ModelConfig.pb.h" +#include "paddle/gserver/layers/DataLayer.h" +#include "paddle/gserver/layers/ExpandConvTransLayer.h" #include "paddle/trainer/Trainer.h" #include "paddle/utils/GlobalConstants.h" -#include "paddle/gserver/layers/ExpandConvTransLayer.h" -#include "TestUtil.h" #include "LayerGradUtil.h" +#include "TestUtil.h" using namespace paddle; // NOLINT using namespace std; // NOLINT @@ -35,80 +35,87 @@ P_DECLARE_bool(prev_batch_state); // Test that the batchNormLayer can be followed by a ConvLayer TEST(Layer, batchNorm) { - FLAGS_use_gpu = false; - TestConfig configBN; - const int CHANNELS = 6272; - const int IMG_SIZE = 1; - configBN.layerConfig.set_type("batch_norm"); - configBN.layerConfig.set_name("bn"); - configBN.layerConfig.set_size(CHANNELS * IMG_SIZE * IMG_SIZE); - configBN.layerConfig.set_active_type("relu"); - configBN.biasSize = CHANNELS; - configBN.inputDefs.push_back({INPUT_DATA, "layer_0", + FLAGS_use_gpu = false; + TestConfig configBN; + const int CHANNELS = 6272; + const int IMG_SIZE = 1; + configBN.layerConfig.set_type("batch_norm"); + configBN.layerConfig.set_name("bn"); + configBN.layerConfig.set_size(CHANNELS * IMG_SIZE * IMG_SIZE); + configBN.layerConfig.set_active_type("relu"); + configBN.biasSize = CHANNELS; + configBN.inputDefs.push_back({INPUT_DATA, + "layer_0", /* dim= */ IMG_SIZE * IMG_SIZE * CHANNELS, /* paraSize= */ CHANNELS}); - configBN.inputDefs.push_back({INPUT_DATA, "layer_1_running_mean", - 1, CHANNELS}); - configBN.inputDefs.back().isStatic = true; - configBN.inputDefs.push_back({INPUT_DATA, "layer_2_running_var", - 1, CHANNELS}); - configBN.inputDefs.back().isStatic = true; - - LayerInputConfig* input = configBN.layerConfig.add_inputs(); - configBN.layerConfig.add_inputs(); - configBN.layerConfig.add_inputs(); - - ImageConfig* img_conf = input->mutable_image_conf(); - img_conf->set_channels(CHANNELS); - img_conf->set_img_size(IMG_SIZE); - - // Setting up conv-layer config - TestConfig config; - config.biasSize = 64; - config.layerConfig.set_type("exconv"); - config.layerConfig.set_num_filters(64); - config.layerConfig.set_partial_sum(1); - config.layerConfig.set_shared_biases(true); - - config.inputDefs.push_back({INPUT_DATA, "bn", 6272, 204800}); - input = config.layerConfig.add_inputs(); - ConvConfig* conv = input->mutable_conv_conf(); - conv->set_filter_size(5); - conv->set_filter_size_y(5); - conv->set_channels(128); - conv->set_padding(1); - conv->set_padding_y(1); - conv->set_stride(2); - conv->set_stride_y(2); - conv->set_groups(1); - conv->set_filter_channels(conv->channels() / conv->groups()); - conv->set_img_size(7); - conv->set_output_x(3); - config.layerConfig.set_size(conv->output_x() * conv->output_x() * - config.layerConfig.num_filters()); - config.layerConfig.set_name("conv"); - - // data layer initialize - std::vector dataLayers; - LayerMap layerMap; - vector datas; - initDataLayer(configBN, &dataLayers, &datas, &layerMap, "batch_norm", - 100, false, false); - // test layer initialize - std::vector parameters; - LayerPtr bnLayer; - initTestLayer(configBN, &layerMap, ¶meters, &bnLayer); - - std::vector parameters2; - LayerPtr convLayer; - initTestLayer(config, &layerMap, ¶meters2, &convLayer); - - bnLayer->forward(PASS_GC); - convLayer->forward(PASS_GC); - - CHECK_EQ(convLayer->getOutputValue()->getHeight(), 100); - CHECK_EQ(convLayer->getOutputValue()->getWidth(), 576); + configBN.inputDefs.push_back( + {INPUT_DATA, "layer_1_running_mean", 1, CHANNELS}); + configBN.inputDefs.back().isStatic = true; + configBN.inputDefs.push_back( + {INPUT_DATA, "layer_2_running_var", 1, CHANNELS}); + configBN.inputDefs.back().isStatic = true; + + LayerInputConfig* input = configBN.layerConfig.add_inputs(); + configBN.layerConfig.add_inputs(); + configBN.layerConfig.add_inputs(); + + ImageConfig* img_conf = input->mutable_image_conf(); + img_conf->set_channels(CHANNELS); + img_conf->set_img_size(IMG_SIZE); + + // Setting up conv-layer config + TestConfig config; + config.biasSize = 64; + config.layerConfig.set_type("exconv"); + config.layerConfig.set_num_filters(64); + config.layerConfig.set_partial_sum(1); + config.layerConfig.set_shared_biases(true); + + config.inputDefs.push_back({INPUT_DATA, "bn", 6272, 204800}); + input = config.layerConfig.add_inputs(); + ConvConfig* conv = input->mutable_conv_conf(); + conv->set_filter_size(5); + conv->set_filter_size_y(5); + conv->set_channels(128); + conv->set_padding(1); + conv->set_padding_y(1); + conv->set_stride(2); + conv->set_stride_y(2); + conv->set_groups(1); + conv->set_filter_channels(conv->channels() / conv->groups()); + conv->set_img_size(7); + conv->set_output_x(3); + config.layerConfig.set_size(conv->output_x() * conv->output_x() * + config.layerConfig.num_filters()); + config.layerConfig.set_name("conv"); + + // data layer initialize + std::vector dataLayers; + LayerMap layerMap; + vector datas; + initDataLayer(configBN, + &dataLayers, + &datas, + &layerMap, + "batch_norm", + 100, + false, + false); + // test layer initialize + std::vector parameters; + LayerPtr bnLayer; + initTestLayer(configBN, &layerMap, ¶meters, &bnLayer); + + std::vector parameters2; + LayerPtr convLayer; + initTestLayer(config, &layerMap, ¶meters2, &convLayer); + + bnLayer->forward(PASS_GC); + convLayer->forward(PASS_GC); + + CHECK_EQ(convLayer->getOutputValue()->getHeight(), 100); + CHECK_EQ(convLayer->getOutputValue()->getWidth(), 576); } int main(int argc, char** argv) { diff --git a/paddle/gserver/tests/test_ConvTrans.cpp b/paddle/gserver/tests/test_ConvTrans.cpp index 3af3f08f40..83100e3bec 100644 --- a/paddle/gserver/tests/test_ConvTrans.cpp +++ b/paddle/gserver/tests/test_ConvTrans.cpp @@ -13,17 +13,17 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include #include -#include "paddle/gserver/layers/DataLayer.h" +#include #include "ModelConfig.pb.h" -#include "paddle/trainer/Trainer.h" -#include "paddle/utils/GlobalConstants.h" +#include "paddle/gserver/layers/DataLayer.h" #include "paddle/gserver/layers/ExpandConvTransLayer.h" #include "paddle/math/MathUtils.h" +#include "paddle/trainer/Trainer.h" +#include "paddle/utils/GlobalConstants.h" -#include "TestUtil.h" #include "LayerGradUtil.h" +#include "TestUtil.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_ConvUnify.cpp b/paddle/gserver/tests/test_ConvUnify.cpp index d59acf96ac..02763406a3 100644 --- a/paddle/gserver/tests/test_ConvUnify.cpp +++ b/paddle/gserver/tests/test_ConvUnify.cpp @@ -13,17 +13,17 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include #include -#include "paddle/gserver/layers/DataLayer.h" +#include #include "ModelConfig.pb.h" -#include "paddle/trainer/Trainer.h" -#include "paddle/utils/GlobalConstants.h" +#include "paddle/gserver/layers/DataLayer.h" #include "paddle/gserver/layers/ExpandConvTransLayer.h" #include "paddle/math/MathUtils.h" +#include "paddle/trainer/Trainer.h" +#include "paddle/utils/GlobalConstants.h" -#include "TestUtil.h" #include "LayerGradUtil.h" +#include "TestUtil.h" using namespace paddle; // NOLINT using namespace std; // NOLINT @@ -36,10 +36,17 @@ P_DECLARE_bool(prev_batch_state); // Do one forward pass of convTrans layer and check to see if its output // matches the given result -MatrixPtr doOneConvTest(size_t imgSize, size_t output_x, size_t stride, - size_t padding, size_t filter_size, size_t channel, - size_t numfilters, size_t groups, MatrixPtr& inputData, - real* param, bool useGpu) { +MatrixPtr doOneConvTest(size_t imgSize, + size_t output_x, + size_t stride, + size_t padding, + size_t filter_size, + size_t channel, + size_t numfilters, + size_t groups, + MatrixPtr& inputData, + real* param, + bool useGpu) { TestConfig config; config.biasSize = numfilters; if (useGpu) { @@ -51,11 +58,10 @@ MatrixPtr doOneConvTest(size_t imgSize, size_t output_x, size_t stride, config.layerConfig.set_partial_sum(1); config.layerConfig.set_shared_biases(true); - size_t weightSize = channel* filter_size * filter_size * - config.layerConfig.num_filters() / groups; - config.inputDefs.push_back({INPUT_DATA, "layer_0", - imgSize * imgSize * channel, - weightSize}); + size_t weightSize = channel * filter_size * filter_size * + config.layerConfig.num_filters() / groups; + config.inputDefs.push_back( + {INPUT_DATA, "layer_0", imgSize * imgSize * channel, weightSize}); LayerInputConfig* input = config.layerConfig.add_inputs(); ConvConfig* conv = input->mutable_conv_conf(); conv->set_filter_size(filter_size); @@ -66,7 +72,7 @@ MatrixPtr doOneConvTest(size_t imgSize, size_t output_x, size_t stride, conv->set_stride(stride); conv->set_stride_y(stride); conv->set_groups(groups); - conv->set_filter_channels(channel/groups); + conv->set_filter_channels(channel / groups); conv->set_img_size(imgSize); conv->set_output_x(output_x); @@ -77,8 +83,8 @@ MatrixPtr doOneConvTest(size_t imgSize, size_t output_x, size_t stride, std::vector dataLayers; LayerMap layerMap; vector datas; - initDataLayer(config, &dataLayers, &datas, &layerMap, "conv", - 1, false, useGpu); + initDataLayer( + config, &dataLayers, &datas, &layerMap, "conv", 1, false, useGpu); dataLayers[0]->getOutputValue()->zeroMem(); dataLayers[0]->getOutputValue()->copyFrom(*inputData); @@ -88,106 +94,124 @@ MatrixPtr doOneConvTest(size_t imgSize, size_t output_x, size_t stride, initTestLayer(config, &layerMap, ¶meters, &convLayer); convLayer->getBiasParameter()->zeroMem(); convLayer->getParameters()[0]->zeroMem(); - convLayer->getParameters()[0]->getBuf(PARAMETER_VALUE)->copyFrom(param, - weightSize); + convLayer->getParameters()[0] + ->getBuf(PARAMETER_VALUE) + ->copyFrom(param, weightSize); convLayer->forward(PASS_GC); return convLayer->getOutputValue(); } TEST(Layer, convParaUnified) { - #ifndef PADDLE_ONLY_CPU - MatrixPtr input, resultCpu, resultGpu; - input = Matrix::create(1, 4 * 4, false, false); - float inputData[] = {1, 2, 3, 4, - 5, 6, 7, 8, - 9, 10, 11, 12, - 13, 14, 15, 16}; - float param[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, - 9, 8, 7, 6, 5, 4, 3, 2, 1}; - - input->setData(inputData); - - resultCpu = doOneConvTest(/* imgSize */ 4, - /* output_x */ 2, - /* stride */ 1, - /* padding */ 0, - /* filter_size */ 3, - /*channel*/ 1, - /*numfilters*/ 2, - /*groups*/ 1, - input, param, false); - - resultGpu = doOneConvTest(/* imgSize */ 4, - /* output_x */ 2, - /* stride */ 1, - /* padding */ 0, - /* filter_size */ 3, - /*channel*/ 1, - /*numfilters*/ 2, - /*groups*/ 1, - input, param, true); - checkMatrixEqual(resultCpu, resultGpu); - - input = Matrix::create(1, 3 * 3 * 2, false, false); - float inputData2[] = {1, 2, 3, - 4, 5, 6, - 7, 8, 9, - - 10, 11, 12, - 13, 14, 15, - 16, 17, 18}; - float param2[] = {1, 2, 3, 4, 5, 6, 7, 8, - 8, 7, 6, 5, 4, 3, 2, 1}; - - input->setData(inputData2); - - resultCpu = doOneConvTest(/* imgSize */ 3, - /* output_x */ 2, - /* stride */ 1, - /* padding */ 0, - /* filter_size */ 2, - /*channel*/ 2, - /*numfilters*/ 2, - /*groups*/ 1, - input, param2, false); - - resultGpu = doOneConvTest(/* imgSize */ 3, - /* output_x */ 2, - /* stride */ 1, - /* padding */ 0, - /* filter_size */ 2, - /*channel*/ 2, - /*numfilters*/ 2, - /*groups*/ 1, - input, param2, true); - checkMatrixEqual(resultCpu, resultGpu); - - - float param3[] = {1, 2, 3, 4, - 4, 3, 2, 1}; - - resultCpu = doOneConvTest(/* imgSize */ 3, - /* output_x */ 2, - /* stride */ 1, - /* padding */ 0, - /* filter_size */ 2, - /*channel*/ 2, - /*numfilters*/ 2, - /*groups*/ 2, - input, param3, false); - - resultGpu = doOneConvTest(/* imgSize */ 3, - /* output_x */ 2, - /* stride */ 1, - /* padding */ 0, - /* filter_size */ 2, - /*channel*/ 2, - /*numfilters*/ 2, - /*groups*/ 2, - input, param3, true); - checkMatrixEqual(resultCpu, resultGpu); - #endif +#ifndef PADDLE_ONLY_CPU + MatrixPtr input, resultCpu, resultGpu; + input = Matrix::create(1, 4 * 4, false, false); + float inputData[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; + float param[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 8, 7, 6, 5, 4, 3, 2, 1}; + + input->setData(inputData); + + resultCpu = doOneConvTest(/* imgSize */ 4, + /* output_x */ 2, + /* stride */ 1, + /* padding */ 0, + /* filter_size */ 3, + /*channel*/ 1, + /*numfilters*/ 2, + /*groups*/ 1, + input, + param, + false); + + resultGpu = doOneConvTest(/* imgSize */ 4, + /* output_x */ 2, + /* stride */ 1, + /* padding */ 0, + /* filter_size */ 3, + /*channel*/ 1, + /*numfilters*/ 2, + /*groups*/ 1, + input, + param, + true); + checkMatrixEqual(resultCpu, resultGpu); + + input = Matrix::create(1, 3 * 3 * 2, false, false); + float inputData2[] = {1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18}; + float param2[] = {1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1}; + + input->setData(inputData2); + + resultCpu = doOneConvTest(/* imgSize */ 3, + /* output_x */ 2, + /* stride */ 1, + /* padding */ 0, + /* filter_size */ 2, + /*channel*/ 2, + /*numfilters*/ 2, + /*groups*/ 1, + input, + param2, + false); + + resultGpu = doOneConvTest(/* imgSize */ 3, + /* output_x */ 2, + /* stride */ 1, + /* padding */ 0, + /* filter_size */ 2, + /*channel*/ 2, + /*numfilters*/ 2, + /*groups*/ 1, + input, + param2, + true); + checkMatrixEqual(resultCpu, resultGpu); + + float param3[] = {1, 2, 3, 4, 4, 3, 2, 1}; + + resultCpu = doOneConvTest(/* imgSize */ 3, + /* output_x */ 2, + /* stride */ 1, + /* padding */ 0, + /* filter_size */ 2, + /*channel*/ 2, + /*numfilters*/ 2, + /*groups*/ 2, + input, + param3, + false); + + resultGpu = doOneConvTest(/* imgSize */ 3, + /* output_x */ 2, + /* stride */ 1, + /* padding */ 0, + /* filter_size */ 2, + /*channel*/ 2, + /*numfilters*/ 2, + /*groups*/ 2, + input, + param3, + true); + checkMatrixEqual(resultCpu, resultGpu); +#endif } int main(int argc, char** argv) { diff --git a/paddle/gserver/tests/test_Evaluator.cpp b/paddle/gserver/tests/test_Evaluator.cpp index 2c20f3a52f..7a930aebcf 100644 --- a/paddle/gserver/tests/test_Evaluator.cpp +++ b/paddle/gserver/tests/test_Evaluator.cpp @@ -15,8 +15,8 @@ limitations under the License. */ #include #include #include "ModelConfig.pb.h" -#include "paddle/trainer/Trainer.h" #include "TestUtil.h" +#include "paddle/trainer/Trainer.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 7983d9fe64..9f8b197df5 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -17,8 +17,8 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" -#include "paddle/trainer/Trainer.h" #include "paddle/math/MathUtils.h" +#include "paddle/trainer/Trainer.h" #include "LayerGradUtil.h" #include "TestUtil.h" diff --git a/paddle/gserver/tests/test_MultinomialSampler.cpp b/paddle/gserver/tests/test_MultinomialSampler.cpp index fc164da8ea..eadf40ade0 100644 --- a/paddle/gserver/tests/test_MultinomialSampler.cpp +++ b/paddle/gserver/tests/test_MultinomialSampler.cpp @@ -20,8 +20,8 @@ limitations under the License. */ #undef PADDLE_DISABLE_TIMER #include "paddle/utils/Stat.h" -#include "paddle/utils/Util.h" #include "paddle/gserver/layers/MultinomialSampler.h" +#include "paddle/utils/Util.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_NetworkCompare.cpp b/paddle/gserver/tests/test_NetworkCompare.cpp index ff6b5ab0d0..baa55aa025 100644 --- a/paddle/gserver/tests/test_NetworkCompare.cpp +++ b/paddle/gserver/tests/test_NetworkCompare.cpp @@ -13,14 +13,14 @@ See the License for the specific language governing permissions and limitations under the License. */ #undef PADDLE_DISABLE_TIMER +#include #include -#include #include -#include +#include +#include "TestUtil.h" #include "paddle/trainer/Trainer.h" #include "paddle/utils/Stat.h" -#include "TestUtil.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_ProtoDataProvider.cpp b/paddle/gserver/tests/test_ProtoDataProvider.cpp index d5b8017cd1..d421b6e2f2 100644 --- a/paddle/gserver/tests/test_ProtoDataProvider.cpp +++ b/paddle/gserver/tests/test_ProtoDataProvider.cpp @@ -17,8 +17,8 @@ limitations under the License. */ #include -#include "paddle/utils/Util.h" #include "paddle/gserver/dataproviders/ProtoDataProvider.h" +#include "paddle/utils/Util.h" #include "TestUtil.h" diff --git a/paddle/gserver/tests/test_RecurrentLayer.cpp b/paddle/gserver/tests/test_RecurrentLayer.cpp index 3f26b710e9..cd96ca7c84 100644 --- a/paddle/gserver/tests/test_RecurrentLayer.cpp +++ b/paddle/gserver/tests/test_RecurrentLayer.cpp @@ -13,11 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include #include +#include +#include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" #include "paddle/gserver/layers/Layer.h" -#include "ModelConfig.pb.h" #include "TestUtil.h" @@ -220,8 +220,8 @@ TEST(Layer, RecurrentLayer) { } #define protected public -#include "paddle/gserver/layers/LstmLayer.h" #include "paddle/gserver/layers/GatedRecurrentLayer.h" +#include "paddle/gserver/layers/LstmLayer.h" template class TestRecurrentLayer { public: diff --git a/paddle/gserver/tests/test_SelectiveFCLayer.cpp b/paddle/gserver/tests/test_SelectiveFCLayer.cpp index c588f69446..4f3a95a535 100644 --- a/paddle/gserver/tests/test_SelectiveFCLayer.cpp +++ b/paddle/gserver/tests/test_SelectiveFCLayer.cpp @@ -12,17 +12,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include +#include #include +#include #include #include -#include -#include -#include +#include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" -#include "paddle/gserver/layers/Layer.h" #include "paddle/gserver/layers/FullyConnectedLayer.h" +#include "paddle/gserver/layers/Layer.h" #include "paddle/gserver/layers/SelectiveFullyConnectedLayer.h" -#include "ModelConfig.pb.h" #include "paddle/math/CpuSparseMatrix.h" #include "paddle/trainer/Trainer.h" diff --git a/paddle/gserver/tests/test_WarpCTCLayer.cpp b/paddle/gserver/tests/test_WarpCTCLayer.cpp index e526a27906..700425412c 100644 --- a/paddle/gserver/tests/test_WarpCTCLayer.cpp +++ b/paddle/gserver/tests/test_WarpCTCLayer.cpp @@ -14,11 +14,11 @@ limitations under the License. */ #include #include -#include "paddle/gserver/layers/Layer.h" -#include "paddle/gserver/layers/DataLayer.h" +#include "ModelConfig.pb.h" #include "paddle/gserver/layers/CTCLayer.h" +#include "paddle/gserver/layers/DataLayer.h" +#include "paddle/gserver/layers/Layer.h" #include "paddle/gserver/layers/WarpCTCLayer.h" -#include "ModelConfig.pb.h" #include "TestUtil.h" diff --git a/paddle/math/Allocator.h b/paddle/math/Allocator.h index 4d0a1506be..666a8b8368 100644 --- a/paddle/math/Allocator.h +++ b/paddle/math/Allocator.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include #include +#include #include "hl_gpu.h" #include "paddle/utils/Logging.h" diff --git a/paddle/math/BaseMatrix.h b/paddle/math/BaseMatrix.h index 368557bb26..2933c20fba 100644 --- a/paddle/math/BaseMatrix.h +++ b/paddle/math/BaseMatrix.h @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include #include -#include "paddle/utils/TypeDefs.h" +#include #include "TensorExpression.h" +#include "paddle/utils/TypeDefs.h" namespace paddle { diff --git a/paddle/math/CpuSparseMatrix.cpp b/paddle/math/CpuSparseMatrix.cpp index 324c7ec0ca..b5d5b6ef61 100644 --- a/paddle/math/CpuSparseMatrix.cpp +++ b/paddle/math/CpuSparseMatrix.cpp @@ -12,12 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "hl_gpu.h" #include "CpuSparseMatrix.h" #include "SparseMatrix.h" +#include "float.h" +#include "hl_gpu.h" #include "paddle/math/MathUtils.h" #include "paddle/utils/Util.h" -#include "float.h" namespace paddle { @@ -656,9 +656,9 @@ void CpuSparseMatrix::trimFrom(const CpuSparseMatrix& src) { if (format_ == SPARSE_CSR) { int* srcCols = src.getCols(); size_t numLessWidth = - std::count_if(srcCols, - srcCols + src.getElementCnt(), - [this](size_t n) { return n < this->width_; }); + std::count_if(srcCols, srcCols + src.getElementCnt(), [this](size_t n) { + return n < this->width_; + }); resize(height_, width_, numLessWidth, valueType_, format_); rows_[0] = 0; size_t index = 0; diff --git a/paddle/math/MathFunctions.cpp b/paddle/math/MathFunctions.cpp index 037525b402..d7aa118487 100644 --- a/paddle/math/MathFunctions.cpp +++ b/paddle/math/MathFunctions.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "MathFunctions.h" -#include "hl_matrix_ops.cuh" #include "hl_matrix_apply.cuh" +#include "hl_matrix_ops.cuh" namespace paddle { diff --git a/paddle/math/MathUtils.cpp b/paddle/math/MathUtils.cpp index 1fb7655c5a..5bbc3e4e37 100644 --- a/paddle/math/MathUtils.cpp +++ b/paddle/math/MathUtils.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "MathUtils.h" #include -#include "paddle/utils/Logging.h" #include "Vector.h" +#include "paddle/utils/Logging.h" namespace paddle { diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h index 395143a4b1..4342ca52a3 100644 --- a/paddle/math/Matrix.h +++ b/paddle/math/Matrix.h @@ -14,20 +14,20 @@ limitations under the License. */ #pragma once +#include #include #include -#include #include "paddle/utils/Logging.h" #include "paddle/utils/ThreadLocal.h" #include +#include "BaseMatrix.h" #include "MemoryHandle.h" -#include "paddle/utils/TypeDefs.h" #include "Vector.h" #include "paddle/utils/ThreadLocal.h" -#include "BaseMatrix.h" +#include "paddle/utils/TypeDefs.h" namespace paddle { diff --git a/paddle/math/MatrixBitCode.cpp b/paddle/math/MatrixBitCode.cpp index 6390d4b6a5..cea912d3ca 100644 --- a/paddle/math/MatrixBitCode.cpp +++ b/paddle/math/MatrixBitCode.cpp @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" -#include "paddle/utils/Util.h" #include "Matrix.h" #include "hl_gpu.h" +#include "paddle/utils/Logging.h" +#include "paddle/utils/Util.h" namespace paddle { diff --git a/paddle/math/MemoryHandle.cpp b/paddle/math/MemoryHandle.cpp index 4c4a827b23..84afb5944c 100644 --- a/paddle/math/MemoryHandle.cpp +++ b/paddle/math/MemoryHandle.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "MemoryHandle.h" +#include #include "Storage.h" namespace paddle { diff --git a/paddle/math/PoolAllocator.h b/paddle/math/PoolAllocator.h index 1544cb2cfc..c06efa9ac7 100644 --- a/paddle/math/PoolAllocator.h +++ b/paddle/math/PoolAllocator.h @@ -14,11 +14,11 @@ limitations under the License. */ #pragma once +#include #include #include -#include #include -#include +#include #include "Allocator.h" namespace paddle { diff --git a/paddle/math/SparseMatrix.cpp b/paddle/math/SparseMatrix.cpp index d2779cc9f5..9154503c21 100644 --- a/paddle/math/SparseMatrix.cpp +++ b/paddle/math/SparseMatrix.cpp @@ -12,13 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "SparseMatrix.h" #include +#include #include #include "hl_gpu.h" -#include "SparseMatrix.h" -#include "paddle/utils/Util.h" #include "hl_top_k.h" -#include +#include "paddle/utils/Util.h" namespace paddle { @@ -537,11 +537,9 @@ void GpuSparseMatrix::transpose(MatrixPtr matTrans, bool memAlloc) { dataVec.emplace_back( rows.getData()[i], cols_full.getData()[i], value.getData()[i]); } - std::sort(dataVec.begin(), - dataVec.end(), - [](Element a, Element b) { - return a.row < b.row || (a.row == b.row && a.col < b.col); - }); + std::sort(dataVec.begin(), dataVec.end(), [](Element a, Element b) { + return a.row < b.row || (a.row == b.row && a.col < b.col); + }); /*get sorted data, row index, and col index, put them in the right place*/ cols.resize(height_ + 1); diff --git a/paddle/math/SparseMatrix.h b/paddle/math/SparseMatrix.h index f8d9ffc29f..bd96a3301d 100644 --- a/paddle/math/SparseMatrix.h +++ b/paddle/math/SparseMatrix.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once #include -#include "Matrix.h" #include "CpuSparseMatrix.h" +#include "Matrix.h" namespace paddle { diff --git a/paddle/math/SparseRowMatrix.h b/paddle/math/SparseRowMatrix.h index 2fee1b39fe..badb4b9c1c 100644 --- a/paddle/math/SparseRowMatrix.h +++ b/paddle/math/SparseRowMatrix.h @@ -14,10 +14,10 @@ limitations under the License. */ #pragma once -#include #include -#include "paddle/utils/CommandLineParser.h" +#include #include "Matrix.h" +#include "paddle/utils/CommandLineParser.h" #include "paddle/utils/Util.h" P_DECLARE_bool(allow_inefficient_sparse_update); diff --git a/paddle/math/Storage.cpp b/paddle/math/Storage.cpp index 0170b4efb8..f9a2c12cd5 100644 --- a/paddle/math/Storage.cpp +++ b/paddle/math/Storage.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Util.h" -#include "Allocator.h" #include "Storage.h" +#include "Allocator.h" +#include "paddle/utils/Util.h" P_DEFINE_int32(pool_limit_size, 536870912, diff --git a/paddle/math/Storage.h b/paddle/math/Storage.h index 3658320182..06a66b5f14 100644 --- a/paddle/math/Storage.h +++ b/paddle/math/Storage.h @@ -16,8 +16,8 @@ limitations under the License. */ #include #include -#include "paddle/utils/Locks.h" #include "PoolAllocator.h" +#include "paddle/utils/Locks.h" namespace paddle { diff --git a/paddle/math/TensorEvaluate.h b/paddle/math/TensorEvaluate.h index 346ed7ab13..9de2099b85 100644 --- a/paddle/math/TensorEvaluate.h +++ b/paddle/math/TensorEvaluate.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include -#include "paddle/utils/Logging.h" #include "hl_base.h" +#include "paddle/utils/Logging.h" namespace paddle { diff --git a/paddle/math/TensorExpression.h b/paddle/math/TensorExpression.h index 7f28ad83bb..9bd789e8c5 100644 --- a/paddle/math/TensorExpression.h +++ b/paddle/math/TensorExpression.h @@ -13,11 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include #include -#include "paddle/utils/TypeDefs.h" -#include "paddle/utils/Logging.h" +#include #include "hl_tensor_ops.h" +#include "paddle/utils/Logging.h" +#include "paddle/utils/TypeDefs.h" namespace paddle { diff --git a/paddle/math/TrainingAlgorithmOp.h b/paddle/math/TrainingAlgorithmOp.h index 2dc56f69e5..881a8d72d8 100644 --- a/paddle/math/TrainingAlgorithmOp.h +++ b/paddle/math/TrainingAlgorithmOp.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/utils/Logging.h" #include "BaseMatrix.h" +#include "paddle/utils/Logging.h" namespace paddle { diff --git a/paddle/math/Vector.cpp b/paddle/math/Vector.cpp index 484f4c9252..eaa1cdce30 100644 --- a/paddle/math/Vector.cpp +++ b/paddle/math/Vector.cpp @@ -12,17 +12,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Util.h" #include "Vector.h" +#include "paddle/utils/Util.h" #include -#include "paddle/utils/Logging.h" -#include "paddle/utils/ThreadLocal.h" -#include "paddle/utils/Thread.h" -#include "paddle/utils/Flags.h" #include "Matrix.h" #include "hl_gpu.h" #include "hl_table_apply.h" +#include "paddle/utils/Flags.h" +#include "paddle/utils/Logging.h" +#include "paddle/utils/Thread.h" +#include "paddle/utils/ThreadLocal.h" namespace paddle { @@ -754,8 +754,7 @@ void ParallelCpuVectorT::exec(SyncThreadPool::JobFunc func) { } template -CpuGpuVectorT::CpuGpuVectorT(size_t size, bool useGpu) - : sync_(nullptr) { +CpuGpuVectorT::CpuGpuVectorT(size_t size, bool useGpu) : sync_(nullptr) { if (!useGpu) { cpuVectorT_ = std::make_shared>(size); } else { diff --git a/paddle/math/Vector.h b/paddle/math/Vector.h index 535580ac37..8a24103bd4 100644 --- a/paddle/math/Vector.h +++ b/paddle/math/Vector.h @@ -14,15 +14,15 @@ limitations under the License. */ #pragma once -#include #include +#include #include -#include "MemoryHandle.h" -#include "paddle/utils/TypeDefs.h" #include "BaseMatrix.h" +#include "MemoryHandle.h" #include "paddle/utils/Thread.h" +#include "paddle/utils/TypeDefs.h" namespace paddle { diff --git a/paddle/math/tests/OriginalOptimizerApi.h b/paddle/math/tests/OriginalOptimizerApi.h index ddcdd6bb51..0188372771 100644 --- a/paddle/math/tests/OriginalOptimizerApi.h +++ b/paddle/math/tests/OriginalOptimizerApi.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/utils/GlobalConstants.h" #include "paddle/math/Vector.h" +#include "paddle/utils/GlobalConstants.h" using namespace paddle; // NOLINT diff --git a/paddle/math/tests/TestUtils.h b/paddle/math/tests/TestUtils.h index 5f9fab7245..c302096188 100644 --- a/paddle/math/tests/TestUtils.h +++ b/paddle/math/tests/TestUtils.h @@ -40,9 +40,9 @@ limitations under the License. */ */ #include +#include "TensorCheck.h" #include "paddle/math/Matrix.h" #include "paddle/math/SparseMatrix.h" -#include "TensorCheck.h" namespace autotest { diff --git a/paddle/math/tests/test_Allocator.cpp b/paddle/math/tests/test_Allocator.cpp index 440fcda0fe..33e0952efe 100644 --- a/paddle/math/tests/test_Allocator.cpp +++ b/paddle/math/tests/test_Allocator.cpp @@ -13,11 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/utils/Util.h" #include "paddle/utils/Logging.h" +#include "paddle/utils/Util.h" #define private public -#include "paddle/math/MemoryHandle.h" #include "paddle/math/Allocator.h" +#include "paddle/math/MemoryHandle.h" #include "paddle/math/PoolAllocator.h" using namespace paddle; // NOLINT diff --git a/paddle/math/tests/test_BaseMatrix.cpp b/paddle/math/tests/test_BaseMatrix.cpp index a4683918ca..cc7c1e7eb2 100644 --- a/paddle/math/tests/test_BaseMatrix.cpp +++ b/paddle/math/tests/test_BaseMatrix.cpp @@ -20,8 +20,8 @@ limitations under the License. */ */ #include -#include "paddle/math/BaseMatrix.h" #include "TestUtils.h" +#include "paddle/math/BaseMatrix.h" using paddle::BaseMatrix; using paddle::Matrix; diff --git a/paddle/math/tests/test_CpuGpuVector.cpp b/paddle/math/tests/test_CpuGpuVector.cpp index c671735875..624fa20ca5 100644 --- a/paddle/math/tests/test_CpuGpuVector.cpp +++ b/paddle/math/tests/test_CpuGpuVector.cpp @@ -14,10 +14,10 @@ limitations under the License. */ #ifndef PADDLE_ONLY_CPU -#include "paddle/utils/Util.h" +#include #include "paddle/math/Vector.h" +#include "paddle/utils/Util.h" #include "test_matrixUtil.h" -#include using namespace paddle; // NOLINT diff --git a/paddle/math/tests/test_ExecViaCpu.cpp b/paddle/math/tests/test_ExecViaCpu.cpp index b328ebf554..27216ddb58 100644 --- a/paddle/math/tests/test_ExecViaCpu.cpp +++ b/paddle/math/tests/test_ExecViaCpu.cpp @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include -#include +#include #include +#include #include "paddle/math/SparseMatrix.h" using namespace paddle; // NOLINT diff --git a/paddle/math/tests/test_GpuProfiler.cpp b/paddle/math/tests/test_GpuProfiler.cpp index e5fd6f4523..d490078d90 100644 --- a/paddle/math/tests/test_GpuProfiler.cpp +++ b/paddle/math/tests/test_GpuProfiler.cpp @@ -14,12 +14,12 @@ limitations under the License. */ #ifndef PADDLE_ONLY_CPU -#include "paddle/utils/Util.h" -#include "paddle/math/Matrix.h" -#include "paddle/math/SparseMatrix.h" #include #include "paddle/gserver/tests/TestUtil.h" +#include "paddle/math/Matrix.h" +#include "paddle/math/SparseMatrix.h" #include "paddle/utils/Stat.h" +#include "paddle/utils/Util.h" using namespace paddle; // NOLINT using namespace std; // NOLINT @@ -52,7 +52,9 @@ void MatrixCheckErr(const Matrix& matrix1, const Matrix& matrix2) { EXPECT_EQ(count, 0) << "There are " << count << " different element."; } -void testBilinearFwdBwd(int numSamples, int imgSizeH, int imgSizeW, +void testBilinearFwdBwd(int numSamples, + int imgSizeH, + int imgSizeW, int channels) { int inWidth = imgSizeH * imgSizeW * channels; int outWidth = 2 * imgSizeH * 2 * imgSizeW * channels; @@ -73,10 +75,22 @@ void testBilinearFwdBwd(int numSamples, int imgSizeH, int imgSizeW, { // nvprof: GPU Proflier REGISTER_GPU_PROFILER("testBilinearFwdBwd"); - target->bilinearForward(*input, imgSizeH, imgSizeW, - 2 * imgSizeH, 2 * imgSizeW, channels, ratioH, ratioW); - targetGpu->bilinearForward(*inputGpu, imgSizeH, imgSizeW, - 2 * imgSizeH, 2 * imgSizeW, channels, ratioH, ratioW); + target->bilinearForward(*input, + imgSizeH, + imgSizeW, + 2 * imgSizeH, + 2 * imgSizeW, + channels, + ratioH, + ratioW); + targetGpu->bilinearForward(*inputGpu, + imgSizeH, + imgSizeW, + 2 * imgSizeH, + 2 * imgSizeW, + channels, + ratioH, + ratioW); } // check @@ -88,8 +102,8 @@ void testBilinearFwdBwd(int numSamples, int imgSizeH, int imgSizeW, MatrixPtr inputGpuGrad = GpuMatrix::create(numSamples, inWidth, false, true); MatrixPtr targetGrad = CpuMatrix::create(numSamples, outWidth, false, false); - MatrixPtr targetGpuGrad = GpuMatrix::create(numSamples, outWidth, false, - true); + MatrixPtr targetGpuGrad = + GpuMatrix::create(numSamples, outWidth, false, true); MatrixPtr targetCheckGrad = CpuMatrix::create(numSamples, inWidth, false, false); @@ -98,10 +112,22 @@ void testBilinearFwdBwd(int numSamples, int imgSizeH, int imgSizeW, inputGpuGrad->copyFrom(*inputGrad); targetGpuGrad->copyFrom(*targetGrad); - inputGrad->bilinearBackward(*targetGrad, 2 * imgSizeH, 2 * imgSizeW, - imgSizeH, imgSizeW, channels, ratioH, ratioW); - inputGpuGrad->bilinearBackward(*targetGpuGrad, 2 * imgSizeH, 2 * imgSizeW, - imgSizeH, imgSizeW, channels, ratioH, ratioW); + inputGrad->bilinearBackward(*targetGrad, + 2 * imgSizeH, + 2 * imgSizeW, + imgSizeH, + imgSizeW, + channels, + ratioH, + ratioW); + inputGpuGrad->bilinearBackward(*targetGpuGrad, + 2 * imgSizeH, + 2 * imgSizeW, + imgSizeH, + imgSizeW, + channels, + ratioH, + ratioW); // check targetCheckGrad->copyFrom(*inputGpuGrad); @@ -116,8 +142,9 @@ TEST(Profiler, testBilinearFwdBwd) { // nvprof: GPU Proflier REGISTER_GPU_PROFILER("testBilinearFwdBwd"); // Paddle built-in timer - REGISTER_TIMER_INFO("testBilinearFwdBwd", - "numSamples = 10, channels = 16, imgSizeX = 64, imgSizeY = 64"); + REGISTER_TIMER_INFO( + "testBilinearFwdBwd", + "numSamples = 10, channels = 16, imgSizeX = 64, imgSizeY = 64"); testBilinearFwdBwd(numSamples, imgSize, imgSize, channels); } globalStat.printAllStatus(); @@ -128,8 +155,9 @@ int main(int argc, char** argv) { initMain(argc, argv); // nvprof: GPU Proflier - REGISTER_GPU_PROFILER("RecursiveProfilingTest", - "numSamples = 10, channels = 16, imgSizeX = 64, imgSizeY = 64"); + REGISTER_GPU_PROFILER( + "RecursiveProfilingTest", + "numSamples = 10, channels = 16, imgSizeX = 64, imgSizeY = 64"); return RUN_ALL_TESTS(); } diff --git a/paddle/math/tests/test_SIMDFunctions.cpp b/paddle/math/tests/test_SIMDFunctions.cpp index 2c54121d99..f62843310d 100644 --- a/paddle/math/tests/test_SIMDFunctions.cpp +++ b/paddle/math/tests/test_SIMDFunctions.cpp @@ -17,10 +17,10 @@ limitations under the License. */ #include -#include -#include #include +#include #include +#include #include #include diff --git a/paddle/math/tests/test_TrainingAlgorithm.cpp b/paddle/math/tests/test_TrainingAlgorithm.cpp index 93a930cc2f..1bf6a0cc43 100644 --- a/paddle/math/tests/test_TrainingAlgorithm.cpp +++ b/paddle/math/tests/test_TrainingAlgorithm.cpp @@ -13,11 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/utils/Util.h" -#include "paddle/math/TrainingAlgorithmOp.h" #include "OriginalOptimizerApi.h" -#include "TensorCheck.h" #include "PerfUtils.h" +#include "TensorCheck.h" +#include "paddle/math/TrainingAlgorithmOp.h" +#include "paddle/utils/Util.h" using namespace paddle; // NOLINT diff --git a/paddle/math/tests/test_batchTranspose.cpp b/paddle/math/tests/test_batchTranspose.cpp index 88631c62b8..9925e24dc1 100644 --- a/paddle/math/tests/test_batchTranspose.cpp +++ b/paddle/math/tests/test_batchTranspose.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "test_matrixUtil.h" #include "hl_batch_transpose.h" +#include "test_matrixUtil.h" using namespace paddle; // NOLINT diff --git a/paddle/math/tests/test_matrixCompare.cpp b/paddle/math/tests/test_matrixCompare.cpp index 713792d82b..62de5b25e4 100644 --- a/paddle/math/tests/test_matrixCompare.cpp +++ b/paddle/math/tests/test_matrixCompare.cpp @@ -16,13 +16,13 @@ limitations under the License. */ /// This unittest checks GpuMatrix/CpuMatrix get same result, so disable when /// only cpu version. -#include "paddle/utils/Util.h" -#include "paddle/math/Matrix.h" -#include "paddle/math/SparseMatrix.h" #include +#include "TensorCheck.h" #include "paddle/gserver/tests/TestUtil.h" +#include "paddle/math/Matrix.h" +#include "paddle/math/SparseMatrix.h" #include "paddle/utils/Stat.h" -#include "TensorCheck.h" +#include "paddle/utils/Util.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/math/tests/test_perturbation.cpp b/paddle/math/tests/test_perturbation.cpp index eaf4dfea66..60ebae0153 100644 --- a/paddle/math/tests/test_perturbation.cpp +++ b/paddle/math/tests/test_perturbation.cpp @@ -14,10 +14,10 @@ limitations under the License. */ #ifndef PADDLE_ONLY_CPU -#include +#include #include +#include #include -#include #include "hl_cuda.h" #include "hl_perturbation_util.cuh" diff --git a/paddle/math/tests/test_sparseMatrixCompare.cpp b/paddle/math/tests/test_sparseMatrixCompare.cpp index eff2c502bb..6f6de238ba 100644 --- a/paddle/math/tests/test_sparseMatrixCompare.cpp +++ b/paddle/math/tests/test_sparseMatrixCompare.cpp @@ -17,10 +17,10 @@ limitations under the License. */ // so disable when /// only cpu version. -#include "paddle/utils/Util.h" +#include #include "paddle/math/Matrix.h" +#include "paddle/utils/Util.h" #include "test_matrixUtil.h" -#include using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/parameter/Argument.cpp b/paddle/parameter/Argument.cpp index b632a11bbd..e91daa3717 100644 --- a/paddle/parameter/Argument.cpp +++ b/paddle/parameter/Argument.cpp @@ -551,11 +551,10 @@ void Argument::getSeqInfo(std::vector* seqInfo) const { } seqInfo->push_back(info); } - std::sort(seqInfo->begin(), - seqInfo->end(), - [](const SeqInfo& a, const SeqInfo& b) { - return a.topLevelLength > b.topLevelLength; - }); + std::sort( + seqInfo->begin(), seqInfo->end(), [](const SeqInfo& a, const SeqInfo& b) { + return a.topLevelLength > b.topLevelLength; + }); } void Argument::checkSubset() const { diff --git a/paddle/parameter/Argument.h b/paddle/parameter/Argument.h index 69d57a28c0..afd2de0202 100644 --- a/paddle/parameter/Argument.h +++ b/paddle/parameter/Argument.h @@ -18,9 +18,9 @@ limitations under the License. */ #include "paddle/math/Matrix.h" #include "paddle/math/Vector.h" +#include "paddle/parameter/Parameter.h" #include "paddle/utils/Locks.h" #include "paddle/utils/Util.h" -#include "paddle/parameter/Parameter.h" namespace paddle { diff --git a/paddle/parameter/FirstOrderOptimizer.cpp b/paddle/parameter/FirstOrderOptimizer.cpp index 17268d3715..630f15c8cf 100644 --- a/paddle/parameter/FirstOrderOptimizer.cpp +++ b/paddle/parameter/FirstOrderOptimizer.cpp @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Util.h" -#include "paddle/utils/Flags.h" -#include "paddle/math/TrainingAlgorithmOp.h" #include "FirstOrderOptimizer.h" +#include "paddle/math/TrainingAlgorithmOp.h" +#include "paddle/utils/Flags.h" +#include "paddle/utils/Util.h" #include diff --git a/paddle/parameter/ParallelParameter.cpp b/paddle/parameter/ParallelParameter.cpp index b3182306a4..cea77e5b17 100644 --- a/paddle/parameter/ParallelParameter.cpp +++ b/paddle/parameter/ParallelParameter.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" #include +#include "paddle/utils/Logging.h" #include "ParallelParameter.h" diff --git a/paddle/parameter/ParallelParameter.h b/paddle/parameter/ParallelParameter.h index b0fe82d3c4..417e386dc7 100644 --- a/paddle/parameter/ParallelParameter.h +++ b/paddle/parameter/ParallelParameter.h @@ -16,19 +16,19 @@ limitations under the License. */ #include +#include +#include #include #include #include -#include -#include #include "hl_gpu.h" -#include "paddle/utils/Flags.h" -#include "paddle/utils/Locks.h" +#include "paddle/math/Vector.h" #include "paddle/parameter/Parameter.h" #include "paddle/parameter/ParameterUpdateFunctions.h" +#include "paddle/utils/Flags.h" +#include "paddle/utils/Locks.h" #include "paddle/utils/TypeDefs.h" -#include "paddle/math/Vector.h" #include "ParameterConfig.pb.h" diff --git a/paddle/parameter/Parameter.cpp b/paddle/parameter/Parameter.cpp index 3b06650e0c..986ae1539b 100644 --- a/paddle/parameter/Parameter.cpp +++ b/paddle/parameter/Parameter.cpp @@ -12,19 +12,19 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "Parameter.h" #include -#include "paddle/math/MathUtils.h" #include "AverageOptimizer.h" #include "FirstOrderOptimizer.h" -#include "Parameter.h" -#include "paddle/utils/Logging.h" #include "OptimizerFunctions.h" #include "OptimizerWithRegularizer.h" #include "ParameterUpdateFunctions.h" -#include "paddle/math/SparseRowMatrix.h" -#include "paddle/math/CpuSparseMatrix.h" #include "hl_gpu.h" +#include "paddle/math/CpuSparseMatrix.h" +#include "paddle/math/MathUtils.h" +#include "paddle/math/SparseRowMatrix.h" #include "paddle/utils/CommandLineParser.h" +#include "paddle/utils/Logging.h" P_DEFINE_int32(enable_grad_share, (100 * 1024 * 1024), diff --git a/paddle/parameter/Parameter.h b/paddle/parameter/Parameter.h index 6b0600517a..532c6770e5 100644 --- a/paddle/parameter/Parameter.h +++ b/paddle/parameter/Parameter.h @@ -23,14 +23,14 @@ limitations under the License. */ #include "ParameterConfig.pb.h" #include "TrainerConfig.pb.h" +#include "ParameterUpdaterHook.h" +#include "paddle/math/Matrix.h" +#include "paddle/math/Vector.h" +#include "paddle/utils/GlobalConstants.h" #include "paddle/utils/Locks.h" +#include "paddle/utils/ThreadLocal.h" #include "paddle/utils/TypeDefs.h" -#include "paddle/math/Vector.h" -#include "paddle/math/Matrix.h" #include "paddle/utils/Util.h" -#include "paddle/utils/ThreadLocal.h" -#include "ParameterUpdaterHook.h" -#include "paddle/utils/GlobalConstants.h" namespace paddle { diff --git a/paddle/parameter/ParameterUpdateFunctions.h b/paddle/parameter/ParameterUpdateFunctions.h index 7374843d80..2d277e47e7 100644 --- a/paddle/parameter/ParameterUpdateFunctions.h +++ b/paddle/parameter/ParameterUpdateFunctions.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/utils/TypeDefs.h" #include "paddle/math/Vector.h" +#include "paddle/utils/TypeDefs.h" namespace paddle { diff --git a/paddle/parameter/ParameterUpdaterBase.cpp b/paddle/parameter/ParameterUpdaterBase.cpp index b938270ce1..49e2ae2b39 100644 --- a/paddle/parameter/ParameterUpdaterBase.cpp +++ b/paddle/parameter/ParameterUpdaterBase.cpp @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include -#include "paddle/utils/Logging.h" #include "ParameterUpdaterBase.h" +#include #include "hl_gpu.h" +#include "paddle/utils/Logging.h" namespace paddle { diff --git a/paddle/parameter/ParameterUpdaterHook.cpp b/paddle/parameter/ParameterUpdaterHook.cpp index 466560c437..f826e8448c 100644 --- a/paddle/parameter/ParameterUpdaterHook.cpp +++ b/paddle/parameter/ParameterUpdaterHook.cpp @@ -14,16 +14,16 @@ limitations under the License. */ #include "ParameterUpdaterHook.h" +#include #include -#include #include -#include #include +#include #include "paddle/math/Vector.h" #include "paddle/parameter/Parameter.h" -#include "paddle/utils/Util.h" #include "paddle/utils/Flags.h" +#include "paddle/utils/Util.h" namespace paddle { @@ -156,7 +156,8 @@ private: static WeakKVCache, IParameterUpdaterHook, - StringIntPairHasher> g_hookCache_; + StringIntPairHasher> + g_hookCache_; /** * ParameterUpdaterHook actually factory method. diff --git a/paddle/parameter/Regularizer.cpp b/paddle/parameter/Regularizer.cpp index 4420ee0031..8511900150 100644 --- a/paddle/parameter/Regularizer.cpp +++ b/paddle/parameter/Regularizer.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Util.h" -#include "paddle/utils/Flags.h" #include "Regularizer.h" +#include "paddle/utils/Flags.h" +#include "paddle/utils/Util.h" namespace paddle { diff --git a/paddle/parameter/Weight.cpp b/paddle/parameter/Weight.cpp index f366a2b53f..3738a58d7f 100644 --- a/paddle/parameter/Weight.cpp +++ b/paddle/parameter/Weight.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" #include "Weight.h" +#include "paddle/utils/Logging.h" namespace paddle { diff --git a/paddle/parameter/tests/test_common.cpp b/paddle/parameter/tests/test_common.cpp index 4e4d0ccfa2..aa57a63469 100644 --- a/paddle/parameter/tests/test_common.cpp +++ b/paddle/parameter/tests/test_common.cpp @@ -12,12 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include +#include #include -#include #include +#include #include #include diff --git a/paddle/pserver/BaseClient.cpp b/paddle/pserver/BaseClient.cpp index 62fafc1891..a43def98c5 100644 --- a/paddle/pserver/BaseClient.cpp +++ b/paddle/pserver/BaseClient.cpp @@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#include "BaseClient.h" #include -#include "paddle/utils/Stat.h" +#include #include "paddle/utils/CommandLineParser.h" -#include "BaseClient.h" +#include "paddle/utils/Stat.h" P_DECLARE_string(pservers); diff --git a/paddle/pserver/BaseClient.h b/paddle/pserver/BaseClient.h index 5924f80684..262afafbe2 100644 --- a/paddle/pserver/BaseClient.h +++ b/paddle/pserver/BaseClient.h @@ -14,11 +14,11 @@ limitations under the License. */ #pragma once -#include "paddle/pserver/ProtoServer.h" +#include "ParameterService.pb.h" #include "paddle/math/Matrix.h" +#include "paddle/pserver/ProtoServer.h" #include "paddle/utils/Queue.h" #include "paddle/utils/TypeDefs.h" -#include "ParameterService.pb.h" namespace paddle { diff --git a/paddle/pserver/LightNetwork.cpp b/paddle/pserver/LightNetwork.cpp index 9a398d4f45..329dfb0fb3 100644 --- a/paddle/pserver/LightNetwork.cpp +++ b/paddle/pserver/LightNetwork.cpp @@ -12,23 +12,23 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include -#include +#include #include #include #include -#include +#include +#include #include -#include #include #include +#include #include #include "LightNetwork.h" -#include "paddle/utils/Util.h" -#include "paddle/utils/StringUtil.h" #include "RDMANetwork.h" +#include "paddle/utils/StringUtil.h" +#include "paddle/utils/Util.h" /// quick ack can reduce the latency of small message P_DEFINE_bool(small_messages, diff --git a/paddle/pserver/LightNetwork.h b/paddle/pserver/LightNetwork.h index 7aff007a27..c4a06deb94 100644 --- a/paddle/pserver/LightNetwork.h +++ b/paddle/pserver/LightNetwork.h @@ -16,10 +16,10 @@ limitations under the License. */ #include "SocketChannel.h" +#include #include #include #include -#include #include "paddle/utils/Thread.h" diff --git a/paddle/pserver/ParameterClient2.cpp b/paddle/pserver/ParameterClient2.cpp index 31418822b3..84d965a66a 100644 --- a/paddle/pserver/ParameterClient2.cpp +++ b/paddle/pserver/ParameterClient2.cpp @@ -15,10 +15,10 @@ limitations under the License. */ #include #include "ParameterClient2.h" -#include "paddle/utils/StringUtil.h" +#include "paddle/math/SparseRowMatrix.h" #include "paddle/utils/Flags.h" #include "paddle/utils/Stat.h" -#include "paddle/math/SparseRowMatrix.h" +#include "paddle/utils/StringUtil.h" P_DEFINE_string(pservers, "127.0.0.1", "Comma separated addresses of pservers"); P_DEFINE_int32(parallel_thread_num, 1, "Thread number for parameter send"); diff --git a/paddle/pserver/ParameterClient2.h b/paddle/pserver/ParameterClient2.h index 0f180722e3..5255394949 100644 --- a/paddle/pserver/ParameterClient2.h +++ b/paddle/pserver/ParameterClient2.h @@ -16,23 +16,23 @@ limitations under the License. */ #include #include -#include #include +#include -#include "paddle/utils/Locks.h" #include "paddle/math/Matrix.h" +#include "paddle/math/Vector.h" #include "paddle/parameter/Parameter.h" +#include "paddle/pserver/BaseClient.h" +#include "paddle/utils/Flags.h" +#include "paddle/utils/Locks.h" #include "paddle/utils/Queue.h" #include "paddle/utils/TypeDefs.h" #include "paddle/utils/Util.h" -#include "paddle/math/Vector.h" -#include "paddle/utils/Flags.h" -#include "paddle/pserver/BaseClient.h" #include "ParameterService.pb.h" -#include "SparseParameterDistribution.h" #include "ProtoServer.h" +#include "SparseParameterDistribution.h" P_DECLARE_int32(parallel_thread_num); diff --git a/paddle/pserver/ParameterServer2.cpp b/paddle/pserver/ParameterServer2.cpp index ac70efc64f..2cb4c93535 100644 --- a/paddle/pserver/ParameterServer2.cpp +++ b/paddle/pserver/ParameterServer2.cpp @@ -21,14 +21,14 @@ limitations under the License. */ #include "paddle/parameter/AverageOptimizer.h" #include "paddle/parameter/FirstOrderOptimizer.h" -#include "paddle/utils/Flags.h" #include "paddle/parameter/OptimizerFunctions.h" #include "paddle/parameter/OptimizerWithRegularizer.h" -#include "paddle/parameter/ParameterUpdateFunctions.h" #include "paddle/parameter/ParameterOptimizer.h" +#include "paddle/parameter/ParameterUpdateFunctions.h" #include "paddle/parameter/Regularizer.h" -#include "paddle/utils/Stat.h" +#include "paddle/utils/Flags.h" #include "paddle/utils/GlobalConstants.h" +#include "paddle/utils/Stat.h" P_DEFINE_int32(pserver_num_threads, 1, "number of threads for sync op exec"); P_DEFINE_double(async_lagged_ratio_min, diff --git a/paddle/pserver/ParameterServer2.h b/paddle/pserver/ParameterServer2.h index 47122f3632..61c139981e 100644 --- a/paddle/pserver/ParameterServer2.h +++ b/paddle/pserver/ParameterServer2.h @@ -15,24 +15,24 @@ limitations under the License. */ #pragma once #include +#include #include #include -#include -#include #include -#include +#include +#include #include #include -#include "paddle/utils/Locks.h" #include "paddle/math/Matrix.h" +#include "paddle/math/Vector.h" #include "paddle/parameter/Parameter.h" #include "paddle/parameter/ParameterOptimizer.h" +#include "paddle/utils/Locks.h" +#include "paddle/utils/Stat.h" #include "paddle/utils/ThreadLocal.h" #include "paddle/utils/TypeDefs.h" -#include "paddle/math/Vector.h" -#include "paddle/utils/Stat.h" #include "ParameterService.pb.h" diff --git a/paddle/pserver/ParameterServer2Main.cpp b/paddle/pserver/ParameterServer2Main.cpp index 1ba9b48c23..ffc521f2c1 100644 --- a/paddle/pserver/ParameterServer2Main.cpp +++ b/paddle/pserver/ParameterServer2Main.cpp @@ -12,13 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Util.h" -#include "paddle/utils/StringUtil.h" #include +#include "paddle/utils/StringUtil.h" +#include "paddle/utils/Util.h" -#include "paddle/utils/Flags.h" #include "ParameterServer2.h" #include "RDMANetwork.h" +#include "paddle/utils/Flags.h" using namespace paddle; // NOLINT diff --git a/paddle/pserver/ProtoServer.h b/paddle/pserver/ProtoServer.h index 97b7bf167d..3acdcc27da 100644 --- a/paddle/pserver/ProtoServer.h +++ b/paddle/pserver/ProtoServer.h @@ -100,7 +100,8 @@ protected: ResponseCallback callback); typedef std::function msgReader, - ResponseCallback callback)> ServiceFunction; + ResponseCallback callback)> + ServiceFunction; /** * @brief register one RPC function in function mapping diff --git a/paddle/pserver/SocketChannel.cpp b/paddle/pserver/SocketChannel.cpp index f3e74257f6..0599889164 100644 --- a/paddle/pserver/SocketChannel.cpp +++ b/paddle/pserver/SocketChannel.cpp @@ -14,11 +14,11 @@ limitations under the License. */ #include "SocketChannel.h" -#include -#include -#include #include #include +#include +#include +#include #include #include "RDMANetwork.h" diff --git a/paddle/pserver/SparseParameterDistribution.h b/paddle/pserver/SparseParameterDistribution.h index dc63b065a7..24b14106cf 100644 --- a/paddle/pserver/SparseParameterDistribution.h +++ b/paddle/pserver/SparseParameterDistribution.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include -#include "paddle/utils/Logging.h" #include +#include "paddle/utils/Logging.h" namespace paddle { diff --git a/paddle/pserver/test/SocketTest.cpp b/paddle/pserver/test/SocketTest.cpp index 528f5e381e..6e63c4f678 100644 --- a/paddle/pserver/test/SocketTest.cpp +++ b/paddle/pserver/test/SocketTest.cpp @@ -14,11 +14,11 @@ limitations under the License. */ #include "paddle/utils/Util.h" -#include -#include -#include #include #include +#include +#include +#include #include diff --git a/paddle/pserver/test/test_ParameterServer2.cpp b/paddle/pserver/test/test_ParameterServer2.cpp index 493b6d060c..4257a2308d 100644 --- a/paddle/pserver/test/test_ParameterServer2.cpp +++ b/paddle/pserver/test/test_ParameterServer2.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include #include -#include #include #include diff --git a/paddle/pserver/test/test_ProtoServer.cpp b/paddle/pserver/test/test_ProtoServer.cpp index cfed0d30d3..3880dde5e3 100644 --- a/paddle/pserver/test/test_ProtoServer.cpp +++ b/paddle/pserver/test/test_ProtoServer.cpp @@ -16,10 +16,10 @@ limitations under the License. */ #include -#include "paddle/utils/Stat.h" +#include "ParameterService.pb.h" #include "paddle/math/Vector.h" #include "paddle/pserver/ProtoServer.h" -#include "ParameterService.pb.h" +#include "paddle/utils/Stat.h" P_DEFINE_string(server_addr, "127.0.0.1", "Server address"); P_DEFINE_int64(dim, 50000000, "Data size"); diff --git a/paddle/py_paddle/util.py b/paddle/py_paddle/util.py index d6bbf9a5a9..ce105d249a 100644 --- a/paddle/py_paddle/util.py +++ b/paddle/py_paddle/util.py @@ -559,10 +559,10 @@ def __monkey_patch_trainer__(): def monkeypatches(): - patches = [__monkeypatch_init_paddle__, - __monkeypatch_gradient_machine__, - __monkey_patch_protobuf_objects__, - __monkey_patch_parameter__, - __monkey_patch_trainer__] + patches = [ + __monkeypatch_init_paddle__, __monkeypatch_gradient_machine__, + __monkey_patch_protobuf_objects__, __monkey_patch_parameter__, + __monkey_patch_trainer__ + ] for patch in patches: patch() diff --git a/paddle/scripts/travis/main.sh b/paddle/scripts/travis/main.sh index c49d4546c2..13f2552d29 100755 --- a/paddle/scripts/travis/main.sh +++ b/paddle/scripts/travis/main.sh @@ -5,6 +5,8 @@ if [ ${JOB} == "BUILD_AND_TEST" ]; then ./build_and_test.sh elif [ ${JOB} == "DOCS" ]; then ./docs.sh +elif [ ${JOB} == "PRE_COMMIT" ]; then + ./precommit.sh else echo Unknown job ${JOB} exit 1 diff --git a/paddle/scripts/travis/precommit.sh b/paddle/scripts/travis/precommit.sh new file mode 100755 index 0000000000..5ad84f1821 --- /dev/null +++ b/paddle/scripts/travis/precommit.sh @@ -0,0 +1,17 @@ +#!/bin/bash +function abort(){ + echo "Your commit not fit PaddlePaddle code style" 1>&2 + echo "Please use pre-commit scripts to auto-format your code" 1>&2 + exit 1 +} + +trap 'abort' 0 +set -e +source common.sh +cd .. +export PATH=/usr/bin:$PATH +pre-commit install +clang-format --version +pre-commit run -a + +trap : 0 diff --git a/paddle/trainer/MergeModel.cpp b/paddle/trainer/MergeModel.cpp index 8cb2873feb..1cf29a39b9 100644 --- a/paddle/trainer/MergeModel.cpp +++ b/paddle/trainer/MergeModel.cpp @@ -14,10 +14,10 @@ limitations under the License. */ #include -#include "paddle/utils/PythonUtil.h" -#include "paddle/pserver/ParameterServer2.h" #include "ParamUtil.h" #include "Trainer.h" +#include "paddle/pserver/ParameterServer2.h" +#include "paddle/utils/PythonUtil.h" P_DEFINE_string(model_dir, "", "Directory for separated model files"); P_DEFINE_string(model_file, "", "File for merged model file"); diff --git a/paddle/trainer/ParamUtil.cpp b/paddle/trainer/ParamUtil.cpp index 200417ebfc..ffbca42e10 100644 --- a/paddle/trainer/ParamUtil.cpp +++ b/paddle/trainer/ParamUtil.cpp @@ -17,22 +17,22 @@ limitations under the License. */ #include #include -#include #include -#include +#include #include +#include #include #include +#include "paddle/utils/GlobalConstants.h" #include "paddle/utils/PythonUtil.h" #include "paddle/utils/Stat.h" #include "paddle/utils/Util.h" -#include "paddle/utils/GlobalConstants.h" +#include "TesterConfig.h" #include "paddle/gserver/gradientmachines/NeuralNetwork.h" #include "paddle/gserver/layers/ValidationLayer.h" -#include "TesterConfig.h" namespace paddle { diff --git a/paddle/trainer/ParamUtil.h b/paddle/trainer/ParamUtil.h index 8fa6fda75c..2e05595848 100644 --- a/paddle/trainer/ParamUtil.h +++ b/paddle/trainer/ParamUtil.h @@ -22,11 +22,11 @@ limitations under the License. */ #include "paddle/gserver/dataproviders/DataProvider.h" #include "paddle/gserver/gradientmachines/GradientMachine.h" +#include +#include +#include "ParameterUpdater.h" #include "TrainerConfig.pb.h" #include "TrainerConfigHelper.h" -#include "ParameterUpdater.h" -#include -#include namespace paddle { diff --git a/paddle/trainer/ParameterUpdater.h b/paddle/trainer/ParameterUpdater.h index 81ac374425..e52b5cd318 100644 --- a/paddle/trainer/ParameterUpdater.h +++ b/paddle/trainer/ParameterUpdater.h @@ -24,8 +24,8 @@ limitations under the License. */ #include "paddle/parameter/Parameter.h" #include "paddle/parameter/ParameterUpdaterBase.h" -#include "paddle/gserver/layers/Layer.h" #include "TrainerConfig.pb.h" +#include "paddle/gserver/layers/Layer.h" #include #include diff --git a/paddle/trainer/RemoteParameterUpdater.cpp b/paddle/trainer/RemoteParameterUpdater.cpp index 702ea07f8a..b7f7b93b8d 100644 --- a/paddle/trainer/RemoteParameterUpdater.cpp +++ b/paddle/trainer/RemoteParameterUpdater.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "RemoteParameterUpdater.h" #include "Trainer.h" -#include "paddle/utils/Stat.h" #include "paddle/utils/GlobalConstants.h" +#include "paddle/utils/Stat.h" P_DECLARE_int32(trainer_id); P_DECLARE_string(save_dir); diff --git a/paddle/trainer/RemoteParameterUpdater.h b/paddle/trainer/RemoteParameterUpdater.h index 46ce4be146..66055c778e 100644 --- a/paddle/trainer/RemoteParameterUpdater.h +++ b/paddle/trainer/RemoteParameterUpdater.h @@ -14,12 +14,12 @@ limitations under the License. */ #pragma once -#include #include -#include "paddle/pserver/ParameterClient2.h" +#include #include "ParameterUpdater.h" -#include "paddle/utils/Util.h" +#include "paddle/pserver/ParameterClient2.h" #include "paddle/utils/Queue.h" +#include "paddle/utils/Util.h" namespace paddle { diff --git a/paddle/trainer/Tester.h b/paddle/trainer/Tester.h index ae7e0e93bf..e892744db2 100644 --- a/paddle/trainer/Tester.h +++ b/paddle/trainer/Tester.h @@ -24,12 +24,12 @@ limitations under the License. */ #include "TrainerConfig.pb.h" -#include "ParameterUpdater.h" +#include +#include #include "ParamUtil.h" +#include "ParameterUpdater.h" #include "TesterConfig.h" #include "TrainerInternalConfig.h" -#include -#include namespace paddle { diff --git a/paddle/trainer/TesterConfig.h b/paddle/trainer/TesterConfig.h index 9ff145a8a1..68d4c931ff 100644 --- a/paddle/trainer/TesterConfig.h +++ b/paddle/trainer/TesterConfig.h @@ -23,9 +23,9 @@ limitations under the License. */ #include "TrainerConfig.pb.h" -#include "ParameterUpdater.h" -#include #include +#include +#include "ParameterUpdater.h" namespace paddle { diff --git a/paddle/trainer/ThreadParameterUpdater.h b/paddle/trainer/ThreadParameterUpdater.h index 492692dbe5..d01ac689f9 100644 --- a/paddle/trainer/ThreadParameterUpdater.h +++ b/paddle/trainer/ThreadParameterUpdater.h @@ -14,13 +14,13 @@ limitations under the License. */ #pragma once -#include "paddle/utils/Util.h" #include "paddle/parameter/AverageOptimizer.h" #include "paddle/parameter/FirstOrderOptimizer.h" #include "paddle/parameter/OptimizerFunctions.h" #include "paddle/parameter/OptimizerWithRegularizer.h" #include "paddle/parameter/Parameter.h" #include "paddle/parameter/Regularizer.h" +#include "paddle/utils/Util.h" #include #include diff --git a/paddle/trainer/Trainer.h b/paddle/trainer/Trainer.h index f50b56143d..cabbb4acd1 100644 --- a/paddle/trainer/Trainer.h +++ b/paddle/trainer/Trainer.h @@ -22,13 +22,13 @@ limitations under the License. */ #include "paddle/gserver/dataproviders/DataProvider.h" #include "paddle/gserver/gradientmachines/GradientMachine.h" -#include "TrainerConfigHelper.h" +#include +#include +#include "ParamUtil.h" #include "ParameterUpdater.h" -#include "TrainerInternal.h" #include "Tester.h" -#include "ParamUtil.h" -#include -#include +#include "TrainerConfigHelper.h" +#include "TrainerInternal.h" #ifdef PADDLE_METRIC_LEARNING #include "paddle/internals/metric_learning/MetricTrainer.h" diff --git a/paddle/trainer/TrainerConfigHelper.h b/paddle/trainer/TrainerConfigHelper.h index 2c5c492ce8..f1366cc041 100644 --- a/paddle/trainer/TrainerConfigHelper.h +++ b/paddle/trainer/TrainerConfigHelper.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include #include #include +#include namespace paddle { diff --git a/paddle/trainer/TrainerInternal.cpp b/paddle/trainer/TrainerInternal.cpp index 1b49d4aa28..f3b465b444 100644 --- a/paddle/trainer/TrainerInternal.cpp +++ b/paddle/trainer/TrainerInternal.cpp @@ -17,22 +17,22 @@ limitations under the License. */ #include #include -#include #include -#include +#include #include +#include #include +#include "paddle/gserver/gradientmachines/NeuralNetwork.h" +#include "paddle/gserver/layers/ValidationLayer.h" +#include "paddle/utils/GlobalConstants.h" #include "paddle/utils/PythonUtil.h" #include "paddle/utils/Stat.h" #include "paddle/utils/Util.h" -#include "paddle/utils/GlobalConstants.h" -#include "paddle/gserver/gradientmachines/NeuralNetwork.h" -#include "paddle/gserver/layers/ValidationLayer.h" -#include "ThreadParameterUpdater.h" #include "RemoteParameterUpdater.h" +#include "ThreadParameterUpdater.h" namespace paddle { diff --git a/paddle/trainer/TrainerInternal.h b/paddle/trainer/TrainerInternal.h index b67711a721..7018faab24 100644 --- a/paddle/trainer/TrainerInternal.h +++ b/paddle/trainer/TrainerInternal.h @@ -17,15 +17,15 @@ limitations under the License. */ #include "paddle/utils/Util.h" #include -#include #include +#include -#include "hl_gpu.h" -#include "paddle/gserver/gradientmachines/GradientMachine.h" -#include "TrainerConfig.pb.h" #include "ParameterUpdater.h" +#include "TrainerConfig.pb.h" #include "TrainerConfigHelper.h" #include "TrainerInternalConfig.h" +#include "hl_gpu.h" +#include "paddle/gserver/gradientmachines/GradientMachine.h" namespace paddle { diff --git a/paddle/trainer/TrainerInternalConfig.h b/paddle/trainer/TrainerInternalConfig.h index fd6fdf45e6..b47692720e 100644 --- a/paddle/trainer/TrainerInternalConfig.h +++ b/paddle/trainer/TrainerInternalConfig.h @@ -23,10 +23,10 @@ limitations under the License. */ #include "TrainerConfig.pb.h" -#include "ParameterUpdater.h" +#include #include #include -#include +#include "ParameterUpdater.h" namespace paddle { /** diff --git a/paddle/trainer/TrainerMain.cpp b/paddle/trainer/TrainerMain.cpp index 7a18f9836c..0a4d56b892 100644 --- a/paddle/trainer/TrainerMain.cpp +++ b/paddle/trainer/TrainerMain.cpp @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include +#include "paddle/pserver/ParameterServer2.h" +#include "paddle/utils/Excepts.h" #include "paddle/utils/PythonUtil.h" #include "paddle/utils/StringUtil.h" -#include "paddle/utils/Excepts.h" -#include "paddle/pserver/ParameterServer2.h" #include "ParamUtil.h" #include "Trainer.h" diff --git a/paddle/trainer/tests/picojson.h b/paddle/trainer/tests/picojson.h index cb657d219e..23bfa16408 100644 --- a/paddle/trainer/tests/picojson.h +++ b/paddle/trainer/tests/picojson.h @@ -30,10 +30,10 @@ #define picojson_h #include +#include #include #include #include -#include #include #include #include diff --git a/paddle/trainer/tests/test_Compare.cpp b/paddle/trainer/tests/test_Compare.cpp index 07a47b2990..63fa48540c 100644 --- a/paddle/trainer/tests/test_Compare.cpp +++ b/paddle/trainer/tests/test_Compare.cpp @@ -16,8 +16,8 @@ limitations under the License. */ #include "paddle/trainer/Trainer.h" -#include #include +#include using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/trainer/tests/test_CompareTwoNets.cpp b/paddle/trainer/tests/test_CompareTwoNets.cpp index 7e5449dcba..8a4556721d 100644 --- a/paddle/trainer/tests/test_CompareTwoNets.cpp +++ b/paddle/trainer/tests/test_CompareTwoNets.cpp @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include -#include #include -#include +#include #include "paddle/trainer/Trainer.h" diff --git a/paddle/trainer/tests/test_CompareTwoOpts.cpp b/paddle/trainer/tests/test_CompareTwoOpts.cpp index 4d051b537c..673ef289d8 100644 --- a/paddle/trainer/tests/test_CompareTwoOpts.cpp +++ b/paddle/trainer/tests/test_CompareTwoOpts.cpp @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include -#include #include -#include +#include #include "paddle/trainer/Trainer.h" diff --git a/paddle/trainer/tests/test_PyDataProviderWrapper.cpp b/paddle/trainer/tests/test_PyDataProviderWrapper.cpp index 5c5c6d5346..66ec65e340 100644 --- a/paddle/trainer/tests/test_PyDataProviderWrapper.cpp +++ b/paddle/trainer/tests/test_PyDataProviderWrapper.cpp @@ -13,16 +13,16 @@ See the License for the specific language governing permissions and limitations under the License. */ #ifndef PADDLE_NO_PYTHON +#include #include -#include #include -#include #include #include +#include +#include +#include #include #include -#include -#include #include "picojson.h" void checkEqual(const paddle::Argument& expect, const paddle::Argument& actual); diff --git a/paddle/trainer/tests/test_TrainerOnePass.cpp b/paddle/trainer/tests/test_TrainerOnePass.cpp index 1d9dce1b0e..0b587ecce1 100644 --- a/paddle/trainer/tests/test_TrainerOnePass.cpp +++ b/paddle/trainer/tests/test_TrainerOnePass.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include +#include #include "paddle/trainer/Trainer.h" #include "paddle/trainer/TrainerInternal.h" diff --git a/paddle/trainer/tests/test_recurrent_machine_generation.cpp b/paddle/trainer/tests/test_recurrent_machine_generation.cpp index b52acc2ca7..7d8dfd788f 100644 --- a/paddle/trainer/tests/test_recurrent_machine_generation.cpp +++ b/paddle/trainer/tests/test_recurrent_machine_generation.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include -#include #include +#include #include diff --git a/paddle/utils/BarrierStat.cpp b/paddle/utils/BarrierStat.cpp index 5040deefd0..9dde155aca 100644 --- a/paddle/utils/BarrierStat.cpp +++ b/paddle/utils/BarrierStat.cpp @@ -12,13 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "paddle/utils/BarrierStat.h" +#include #include -#include #include -#include -#include "paddle/utils/Stat.h" -#include "paddle/utils/BarrierStat.h" +#include #include "paddle/utils/Flags.h" +#include "paddle/utils/Stat.h" P_DEFINE_bool(log_barrier_abstract, true, diff --git a/paddle/utils/BarrierStat.h b/paddle/utils/BarrierStat.h index 3c5c0885d6..a9c925eff6 100644 --- a/paddle/utils/BarrierStat.h +++ b/paddle/utils/BarrierStat.h @@ -15,18 +15,17 @@ limitations under the License. */ #pragma once #include -#include #include -#include #include +#include +#include #include +#include #include -#include -#include "Logging.h" #include "Locks.h" +#include "Logging.h" #include "ThreadLocal.h" -#include "Stat.h" namespace paddle { diff --git a/paddle/utils/CommandLineParser.cpp b/paddle/utils/CommandLineParser.cpp index 14f83241c5..51558b45a1 100644 --- a/paddle/utils/CommandLineParser.cpp +++ b/paddle/utils/CommandLineParser.cpp @@ -14,15 +14,15 @@ limitations under the License. */ #include "CommandLineParser.h" #ifndef PADDLE_USE_GFLAGS -#include "paddle/utils/StringUtil.h" +#include #include -#include #include -#include +#include #include -#include -#include #include +#include +#include +#include "paddle/utils/StringUtil.h" namespace paddle { @@ -46,16 +46,13 @@ template <> bool StringToValue(const std::string& content, bool* value) { std::string tmp = content; - std::transform(tmp.begin(), - tmp.end(), - tmp.begin(), - [](char in) -> char { - if (in <= 'Z' && in >= 'A') { - return in - ('Z' - 'z'); - } else { - return in; - } - }); // tolower. + std::transform(tmp.begin(), tmp.end(), tmp.begin(), [](char in) -> char { + if (in <= 'Z' && in >= 'A') { + return in - ('Z' - 'z'); + } else { + return in; + } + }); // tolower. if (tmp == "true" || tmp == "1") { *value = true; diff --git a/paddle/utils/CommandLineParser.h b/paddle/utils/CommandLineParser.h index 3d25bc3b0b..b4449c6f09 100644 --- a/paddle/utils/CommandLineParser.h +++ b/paddle/utils/CommandLineParser.h @@ -14,10 +14,10 @@ limitations under the License. */ #pragma once #ifndef PADDLE_USE_GFLAGS -#include "DisableCopy.h" +#include #include #include -#include +#include "DisableCopy.h" namespace paddle { diff --git a/paddle/utils/CpuId.cpp b/paddle/utils/CpuId.cpp index 734b2e0924..53db82e48a 100644 --- a/paddle/utils/CpuId.cpp +++ b/paddle/utils/CpuId.cpp @@ -15,43 +15,43 @@ limitations under the License. */ #ifdef _WIN32 /// for MSVC -#define CPUID(info, x) __cpuidex(info, x, 0) +#define CPUID(info, x) __cpuidex(info, x, 0) #else #include /// for GCC/Clang -#define CPUID(info, x) __cpuid_count(x, 0, info[0], info[1], info[2], info[3]) +#define CPUID(info, x) __cpuid_count(x, 0, info[0], info[1], info[2], info[3]) #endif namespace paddle { SIMDFlags::SIMDFlags() { - unsigned int cpuInfo[4]; - // CPUID: https://en.wikipedia.org/wiki/CPUID - CPUID(cpuInfo, 0x00000001); - simd_flags_ |= cpuInfo[3] & (1 << 25) ? SIMD_SSE : SIMD_NONE; - simd_flags_ |= cpuInfo[3] & (1 << 26) ? SIMD_SSE2 : SIMD_NONE; - simd_flags_ |= cpuInfo[2] & (1 << 0) ? SIMD_SSE3 : SIMD_NONE; - simd_flags_ |= cpuInfo[2] & (1 << 9) ? SIMD_SSSE3 : SIMD_NONE; - simd_flags_ |= cpuInfo[2] & (1 << 19) ? SIMD_SSE41 : SIMD_NONE; - simd_flags_ |= cpuInfo[2] & (1 << 20) ? SIMD_SSE42 : SIMD_NONE; - simd_flags_ |= cpuInfo[2] & (1 << 12) ? SIMD_FMA3 : SIMD_NONE; - simd_flags_ |= cpuInfo[2] & (1 << 28) ? SIMD_AVX : SIMD_NONE; - - CPUID(cpuInfo, 0x00000007); - simd_flags_ |= cpuInfo[1] & (1 << 5) ? SIMD_AVX2 : SIMD_NONE; - simd_flags_ |= cpuInfo[1] & (1 << 16) ? SIMD_AVX512: SIMD_NONE; - - CPUID(cpuInfo, 0x80000001); - simd_flags_ |= cpuInfo[2] & (1 << 16) ? SIMD_FMA4 : SIMD_NONE; + unsigned int cpuInfo[4]; + // CPUID: https://en.wikipedia.org/wiki/CPUID + CPUID(cpuInfo, 0x00000001); + simd_flags_ |= cpuInfo[3] & (1 << 25) ? SIMD_SSE : SIMD_NONE; + simd_flags_ |= cpuInfo[3] & (1 << 26) ? SIMD_SSE2 : SIMD_NONE; + simd_flags_ |= cpuInfo[2] & (1 << 0) ? SIMD_SSE3 : SIMD_NONE; + simd_flags_ |= cpuInfo[2] & (1 << 9) ? SIMD_SSSE3 : SIMD_NONE; + simd_flags_ |= cpuInfo[2] & (1 << 19) ? SIMD_SSE41 : SIMD_NONE; + simd_flags_ |= cpuInfo[2] & (1 << 20) ? SIMD_SSE42 : SIMD_NONE; + simd_flags_ |= cpuInfo[2] & (1 << 12) ? SIMD_FMA3 : SIMD_NONE; + simd_flags_ |= cpuInfo[2] & (1 << 28) ? SIMD_AVX : SIMD_NONE; + + CPUID(cpuInfo, 0x00000007); + simd_flags_ |= cpuInfo[1] & (1 << 5) ? SIMD_AVX2 : SIMD_NONE; + simd_flags_ |= cpuInfo[1] & (1 << 16) ? SIMD_AVX512 : SIMD_NONE; + + CPUID(cpuInfo, 0x80000001); + simd_flags_ |= cpuInfo[2] & (1 << 16) ? SIMD_FMA4 : SIMD_NONE; } SIMDFlags* SIMDFlags::instance() { - static SIMDFlags instance; - return &instance; + static SIMDFlags instance; + return &instance; } -} // namespace paddle +} // namespace paddle diff --git a/paddle/utils/CpuId.h b/paddle/utils/CpuId.h index d15e58d1dd..66ac59cf3e 100644 --- a/paddle/utils/CpuId.h +++ b/paddle/utils/CpuId.h @@ -18,54 +18,54 @@ namespace paddle { class SIMDFlags final { public: - DISABLE_COPY(SIMDFlags); + DISABLE_COPY(SIMDFlags); - SIMDFlags(); + SIMDFlags(); - static SIMDFlags* instance(); + static SIMDFlags* instance(); - inline bool isSSE() const { return simd_flags_ & SIMD_SSE; } - inline bool isSSE2() const { return simd_flags_ & SIMD_SSE2; } - inline bool isSSE3() const { return simd_flags_ & SIMD_SSE3; } - inline bool isSSSE3() const { return simd_flags_ & SIMD_SSSE3; } - inline bool isSSE41() const { return simd_flags_ & SIMD_SSE41; } - inline bool isSSE42() const { return simd_flags_ & SIMD_SSE42; } - inline bool isFMA3() const { return simd_flags_ & SIMD_FMA3; } - inline bool isFMA4() const { return simd_flags_ & SIMD_FMA4; } - inline bool isAVX() const { return simd_flags_ & SIMD_AVX; } - inline bool isAVX2() const { return simd_flags_ & SIMD_AVX2; } - inline bool isAVX512()const { return simd_flags_ & SIMD_AVX512;} + inline bool isSSE() const { return simd_flags_ & SIMD_SSE; } + inline bool isSSE2() const { return simd_flags_ & SIMD_SSE2; } + inline bool isSSE3() const { return simd_flags_ & SIMD_SSE3; } + inline bool isSSSE3() const { return simd_flags_ & SIMD_SSSE3; } + inline bool isSSE41() const { return simd_flags_ & SIMD_SSE41; } + inline bool isSSE42() const { return simd_flags_ & SIMD_SSE42; } + inline bool isFMA3() const { return simd_flags_ & SIMD_FMA3; } + inline bool isFMA4() const { return simd_flags_ & SIMD_FMA4; } + inline bool isAVX() const { return simd_flags_ & SIMD_AVX; } + inline bool isAVX2() const { return simd_flags_ & SIMD_AVX2; } + inline bool isAVX512() const { return simd_flags_ & SIMD_AVX512; } private: - enum simd_t { - SIMD_NONE = 0, ///< None - SIMD_SSE = 1 << 0, ///< SSE - SIMD_SSE2 = 1 << 1, ///< SSE 2 - SIMD_SSE3 = 1 << 2, ///< SSE 3 - SIMD_SSSE3 = 1 << 3, ///< SSSE 3 - SIMD_SSE41 = 1 << 4, ///< SSE 4.1 - SIMD_SSE42 = 1 << 5, ///< SSE 4.2 - SIMD_FMA3 = 1 << 6, ///< FMA 3 - SIMD_FMA4 = 1 << 7, ///< FMA 4 - SIMD_AVX = 1 << 8, ///< AVX - SIMD_AVX2 = 1 << 9, ///< AVX 2 - SIMD_AVX512 = 1 << 10, ///< AVX 512 - }; + enum simd_t { + SIMD_NONE = 0, ///< None + SIMD_SSE = 1 << 0, ///< SSE + SIMD_SSE2 = 1 << 1, ///< SSE 2 + SIMD_SSE3 = 1 << 2, ///< SSE 3 + SIMD_SSSE3 = 1 << 3, ///< SSSE 3 + SIMD_SSE41 = 1 << 4, ///< SSE 4.1 + SIMD_SSE42 = 1 << 5, ///< SSE 4.2 + SIMD_FMA3 = 1 << 6, ///< FMA 3 + SIMD_FMA4 = 1 << 7, ///< FMA 4 + SIMD_AVX = 1 << 8, ///< AVX + SIMD_AVX2 = 1 << 9, ///< AVX 2 + SIMD_AVX512 = 1 << 10, ///< AVX 512 + }; - /// simd flags - int simd_flags_ = SIMD_NONE; + /// simd flags + int simd_flags_ = SIMD_NONE; }; -#define HAS_SSE SIMDFlags::instance()->isSSE() -#define HAS_SSE2 SIMDFlags::instance()->isSSE2() -#define HAS_SSE3 SIMDFlags::instance()->isSSE3() -#define HAS_SSSE3 SIMDFlags::instance()->isSSSE3() -#define HAS_SSE41 SIMDFlags::instance()->isSSE41() -#define HAS_SSE42 SIMDFlags::instance()->isSSE42() -#define HAS_FMA3 SIMDFlags::instance()->isFMA3() -#define HAS_FMA4 SIMDFlags::instance()->isFMA4() -#define HAS_AVX SIMDFlags::instance()->isAVX() -#define HAS_AVX2 SIMDFlags::instance()->isAVX2() -#define HAS_AVX512 SIMDFlags::instance()->isAVX512() +#define HAS_SSE SIMDFlags::instance()->isSSE() +#define HAS_SSE2 SIMDFlags::instance()->isSSE2() +#define HAS_SSE3 SIMDFlags::instance()->isSSE3() +#define HAS_SSSE3 SIMDFlags::instance()->isSSSE3() +#define HAS_SSE41 SIMDFlags::instance()->isSSE41() +#define HAS_SSE42 SIMDFlags::instance()->isSSE42() +#define HAS_FMA3 SIMDFlags::instance()->isFMA3() +#define HAS_FMA4 SIMDFlags::instance()->isFMA4() +#define HAS_AVX SIMDFlags::instance()->isAVX() +#define HAS_AVX2 SIMDFlags::instance()->isAVX2() +#define HAS_AVX512 SIMDFlags::instance()->isAVX512() -} // namespace paddle +} // namespace paddle diff --git a/paddle/utils/CustomStackTrace.cpp b/paddle/utils/CustomStackTrace.cpp index 730788cb98..083f5c509a 100644 --- a/paddle/utils/CustomStackTrace.cpp +++ b/paddle/utils/CustomStackTrace.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "CustomStackTrace.h" -#include "CommandLineParser.h" #include +#include "CommandLineParser.h" P_DEFINE_bool( layer_stack_error_only_current_thread, diff --git a/paddle/utils/CustomStackTrace.h b/paddle/utils/CustomStackTrace.h index 5686f3c84c..6992e85622 100644 --- a/paddle/utils/CustomStackTrace.h +++ b/paddle/utils/CustomStackTrace.h @@ -14,10 +14,10 @@ limitations under the License. */ #pragma once +#include #include #include #include -#include #include "ThreadLocal.h" @@ -96,7 +96,8 @@ public: */ typedef std::function DumpCallback; + const T& /*item*/)> + DumpCallback; /** * Dump all thread stack, and all stack will be cleared. diff --git a/paddle/utils/Logging.cpp b/paddle/utils/Logging.cpp index 3c31633e58..20f32466a5 100644 --- a/paddle/utils/Logging.cpp +++ b/paddle/utils/Logging.cpp @@ -22,13 +22,13 @@ limitations under the License. */ #include #include #include -#include -#include #include +#include +#include -#include -#include #include +#include +#include #include namespace paddle { diff --git a/paddle/utils/Logging.h b/paddle/utils/Logging.h index c91ca9fecc..4379289f6d 100644 --- a/paddle/utils/Logging.h +++ b/paddle/utils/Logging.h @@ -18,8 +18,8 @@ limitations under the License. */ */ #pragma once -#include #include +#include #include #ifndef PADDLE_USE_GLOG diff --git a/paddle/utils/PythonUtil.cpp b/paddle/utils/PythonUtil.cpp index a9c6a20997..2ee4e4fb7e 100644 --- a/paddle/utils/PythonUtil.cpp +++ b/paddle/utils/PythonUtil.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "PythonUtil.h" -#include #include +#include namespace paddle { diff --git a/paddle/utils/PythonUtil.h b/paddle/utils/PythonUtil.h index 2cbc2fdd37..daebaffc85 100644 --- a/paddle/utils/PythonUtil.h +++ b/paddle/utils/PythonUtil.h @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +// clang-format off +#include "paddle/utils/Util.h" #ifndef PADDLE_NO_PYTHON // must include the following two blocks, otherwise, @@ -33,13 +35,12 @@ limitations under the License. */ #endif #include #include - #endif -#include "paddle/utils/Util.h" #include -#include #include +#include +// clang-format on namespace paddle { diff --git a/paddle/utils/Queue.h b/paddle/utils/Queue.h index 37748345a4..f054738f87 100644 --- a/paddle/utils/Queue.h +++ b/paddle/utils/Queue.h @@ -142,9 +142,9 @@ public: */ bool waitNotEmptyFor(int seconds) { std::unique_lock lock(queueLock_); - return queueCV_.wait_for(lock, - std::chrono::seconds(seconds), - [this] { return numElements_ != 0; }); + return queueCV_.wait_for(lock, std::chrono::seconds(seconds), [this] { + return numElements_ != 0; + }); } private: diff --git a/paddle/utils/Stat.cpp b/paddle/utils/Stat.cpp index 01ea535cfd..44acee2495 100644 --- a/paddle/utils/Stat.cpp +++ b/paddle/utils/Stat.cpp @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Stat.h" -#include "Util.h" -#include #include +#include +#include "Util.h" namespace paddle { @@ -207,10 +207,9 @@ static unsigned g_profileCount = 0; static std::recursive_mutex g_profileMutex; GpuProfiler::GpuProfiler(std::string statName, std::string info) - : guard_(g_profileMutex) { + : guard_(g_profileMutex) { if (++g_profileCount == 1) { - LOG(INFO) << "Enable GPU Profiler Stat: [" - << statName << "] " << info; + LOG(INFO) << "Enable GPU Profiler Stat: [" << statName << "] " << info; hl_profiler_start(); } } diff --git a/paddle/utils/StringUtil.h b/paddle/utils/StringUtil.h index 8a63ca23b4..0b4f4c9113 100644 --- a/paddle/utils/StringUtil.h +++ b/paddle/utils/StringUtil.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once +#include #include #include -#include #include "Logging.h" namespace paddle { diff --git a/paddle/utils/Thread.h b/paddle/utils/Thread.h index 435dff2f66..ef36a8c5b2 100644 --- a/paddle/utils/Thread.h +++ b/paddle/utils/Thread.h @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "Util.h" -#include "Logging.h" #include +#include "Logging.h" +#include "Util.h" #include "Queue.h" #include "ThreadLocal.h" diff --git a/paddle/utils/ThreadLocal.cpp b/paddle/utils/ThreadLocal.cpp index c9b32784d9..8a2878fc4b 100644 --- a/paddle/utils/ThreadLocal.cpp +++ b/paddle/utils/ThreadLocal.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "Util.h" #include "ThreadLocal.h" #include "CommandLineParser.h" +#include "Util.h" P_DEFINE_bool(thread_local_rand_use_global_seed, false, diff --git a/paddle/utils/ThreadLocal.h b/paddle/utils/ThreadLocal.h index b6e31bd05b..a4987c9ec2 100644 --- a/paddle/utils/ThreadLocal.h +++ b/paddle/utils/ThreadLocal.h @@ -15,14 +15,14 @@ limitations under the License. */ #pragma once #include -#include #include +#include #include #include #include #include -#include "Util.h" #include "Logging.h" +#include "Util.h" namespace paddle { diff --git a/paddle/utils/Util.cpp b/paddle/utils/Util.cpp index f48726bff0..26ff385c84 100644 --- a/paddle/utils/Util.cpp +++ b/paddle/utils/Util.cpp @@ -15,11 +15,11 @@ limitations under the License. */ #include "Util.h" #include +#include #include #include #include #include -#include #include #include @@ -28,10 +28,10 @@ limitations under the License. */ #include "CommandLineParser.h" #include "CustomStackTrace.h" +#include "StringUtil.h" #include "Thread.h" #include "ThreadLocal.h" #include "Version.h" -#include "StringUtil.h" P_DEFINE_int32(seed, 1, "random number seed. 0 for srand(time)"); @@ -126,25 +126,23 @@ void registerInitFunction(std::function func, int priority) { } void runInitFunctions() { - std::call_once( - g_onceFlag, - []() { - LOG(INFO) << "Calling runInitFunctions"; - if (g_initFuncs) { - std::sort(g_initFuncs->begin(), - g_initFuncs->end(), - [](const PriorityFuncPair& x, const PriorityFuncPair& y) { - return x.first > y.first; - }); - for (auto& f : *g_initFuncs) { - f.second(); - } - delete g_initFuncs; - g_initFuncs = nullptr; - } - g_initialized = true; - LOG(INFO) << "Call runInitFunctions done."; - }); + std::call_once(g_onceFlag, []() { + LOG(INFO) << "Calling runInitFunctions"; + if (g_initFuncs) { + std::sort(g_initFuncs->begin(), + g_initFuncs->end(), + [](const PriorityFuncPair& x, const PriorityFuncPair& y) { + return x.first > y.first; + }); + for (auto& f : *g_initFuncs) { + f.second(); + } + delete g_initFuncs; + g_initFuncs = nullptr; + } + g_initialized = true; + LOG(INFO) << "Call runInitFunctions done."; + }); } void initMain(int argc, char** argv) { diff --git a/paddle/utils/Util.h b/paddle/utils/Util.h index ff67439da6..24ddde28e7 100644 --- a/paddle/utils/Util.h +++ b/paddle/utils/Util.h @@ -14,25 +14,25 @@ limitations under the License. */ #pragma once +#include // for syscall() +#include #include #include -#include -#include +#include #include +#include +#include #include #include -#include -#include -#include // for syscall() -#include +#include #include "CommandLineParser.h" +#include "DisableCopy.h" #include "Logging.h" #include "TrainerConfig.pb.h" -#include "DisableCopy.h" -#include "TypeDefs.h" #include "Flags.h" +#include "TypeDefs.h" #include "hl_gpu.h" /** diff --git a/paddle/utils/Version.cpp b/paddle/utils/Version.cpp index 086515791d..a9e351b69f 100644 --- a/paddle/utils/Version.cpp +++ b/paddle/utils/Version.cpp @@ -14,10 +14,10 @@ limitations under the License. */ #include "Version.h" -#include "Flags.h" -#include "Util.h" #include #include +#include "Flags.h" +#include "Util.h" //! TODO(yuyang18) in gflags, version has another define. Use another flag //! instead. #ifndef PADDLE_USE_GFLAGS @@ -33,7 +33,8 @@ void printVersion(std::ostream& os) { #ifndef PADDLE_VERSION #define PADDLE_VERSION "unknown" #endif -// converts macro to string https://gcc.gnu.org/onlinedocs/cpp/Stringification.html +// converts macro to string +// https://gcc.gnu.org/onlinedocs/cpp/Stringification.html #define xstr(s) str(s) #define str(s) #s diff --git a/paddle/utils/Version.h b/paddle/utils/Version.h index ac04963c2c..d1a07d9485 100644 --- a/paddle/utils/Version.h +++ b/paddle/utils/Version.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once #include -#include "TypeDefs.h" #include +#include "TypeDefs.h" namespace paddle { diff --git a/paddle/utils/arch/osx/Locks.cpp b/paddle/utils/arch/osx/Locks.cpp index 8590226431..e03992363f 100644 --- a/paddle/utils/arch/osx/Locks.cpp +++ b/paddle/utils/arch/osx/Locks.cpp @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/utils/Locks.h" -#include "paddle/utils/Logging.h" #include -#include #include +#include +#include "paddle/utils/Logging.h" namespace paddle { diff --git a/paddle/utils/tests/test_CommandLineParser.cpp b/paddle/utils/tests/test_CommandLineParser.cpp index 9a1d2391a8..ed2b3068d5 100644 --- a/paddle/utils/tests/test_CommandLineParser.cpp +++ b/paddle/utils/tests/test_CommandLineParser.cpp @@ -15,8 +15,8 @@ limitations under the License. */ #ifndef PADDLE_USE_GFLAGS //! Test Command Line Parser for paddle internal implement. -#include #include +#include P_DEFINE_int32(i1, 1, "test int flag 1"); P_DEFINE_int32(i2, 2, "test int flag 2"); diff --git a/paddle/utils/tests/test_CustomStackTrace.cpp b/paddle/utils/tests/test_CustomStackTrace.cpp index 512330b49e..292ed4619d 100644 --- a/paddle/utils/tests/test_CustomStackTrace.cpp +++ b/paddle/utils/tests/test_CustomStackTrace.cpp @@ -15,10 +15,10 @@ limitations under the License. */ #include #include -#include "paddle/utils/CustomStackTrace.h" #include "paddle/utils/CommandLineParser.h" -#include "paddle/utils/Util.h" +#include "paddle/utils/CustomStackTrace.h" #include "paddle/utils/Locks.h" +#include "paddle/utils/Util.h" P_DEFINE_int32(test_thread_num, 10, "testing thread number"); diff --git a/paddle/utils/tests/test_CustomStackTracePrint.cpp b/paddle/utils/tests/test_CustomStackTracePrint.cpp index 60ba210b70..611b16aa71 100644 --- a/paddle/utils/tests/test_CustomStackTracePrint.cpp +++ b/paddle/utils/tests/test_CustomStackTracePrint.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Util.h" #include "paddle/utils/CustomStackTrace.h" +#include "paddle/utils/Util.h" int main(int argc, char** argv) { paddle::initMain(argc, argv); diff --git a/paddle/utils/tests/test_Logging.cpp b/paddle/utils/tests/test_Logging.cpp index 667864aa75..fbfffcc65a 100644 --- a/paddle/utils/tests/test_Logging.cpp +++ b/paddle/utils/tests/test_Logging.cpp @@ -17,10 +17,10 @@ limitations under the License. */ * Used in embedded system where there is no glogs. */ +#include #include -#include #include -#include +#include #include "paddle/utils/Logging.h" #include "paddle/utils/Util.h" #ifndef PADDLE_USE_GLOG diff --git a/paddle/utils/tests/test_SIMDFlags.cpp b/paddle/utils/tests/test_SIMDFlags.cpp index a544901aa3..41532953a7 100644 --- a/paddle/utils/tests/test_SIMDFlags.cpp +++ b/paddle/utils/tests/test_SIMDFlags.cpp @@ -9,40 +9,39 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include #include "paddle/utils/CpuId.h" #include "paddle/utils/Logging.h" #include "paddle/utils/Util.h" -using namespace paddle; // NOLINT +using namespace paddle; // NOLINT TEST(SIMDFlags, gccTest) { #if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__)) - CHECK(!__builtin_cpu_supports("sse") != HAS_SSE); - CHECK(!__builtin_cpu_supports("sse2") != HAS_SSE2); - CHECK(!__builtin_cpu_supports("sse3") != HAS_SSE3); - CHECK(!__builtin_cpu_supports("ssse3") != HAS_SSSE3); - CHECK(!__builtin_cpu_supports("sse4.1")!= HAS_SSE41); - CHECK(!__builtin_cpu_supports("sse4.2")!= HAS_SSE42); - CHECK(!__builtin_cpu_supports("avx") != HAS_AVX); - CHECK(!__builtin_cpu_supports("avx2") != HAS_AVX2); + CHECK(!__builtin_cpu_supports("sse") != HAS_SSE); + CHECK(!__builtin_cpu_supports("sse2") != HAS_SSE2); + CHECK(!__builtin_cpu_supports("sse3") != HAS_SSE3); + CHECK(!__builtin_cpu_supports("ssse3") != HAS_SSSE3); + CHECK(!__builtin_cpu_supports("sse4.1") != HAS_SSE41); + CHECK(!__builtin_cpu_supports("sse4.2") != HAS_SSE42); + CHECK(!__builtin_cpu_supports("avx") != HAS_AVX); + CHECK(!__builtin_cpu_supports("avx2") != HAS_AVX2); #endif } TEST(SIMDFlags, normalPrint) { - auto simd = SIMDFlags::instance(); - LOG(INFO) << "Has SSE2: " << std::boolalpha << simd->isSSE2(); - LOG(INFO) << "Has SSE3: " << std::boolalpha << simd->isSSE3(); - LOG(INFO) << "Has SSSE3: " << std::boolalpha << simd->isSSSE3(); - LOG(INFO) << "Has SSE4.1: " << std::boolalpha << simd->isSSE41(); - LOG(INFO) << "Has SSE4.2: " << std::boolalpha << simd->isSSE42(); - LOG(INFO) << "Has FMA3: " << std::boolalpha << simd->isFMA3(); - LOG(INFO) << "Has FMA4: " << std::boolalpha << simd->isFMA4(); - LOG(INFO) << "Has AVX: " << std::boolalpha << simd->isAVX(); - LOG(INFO) << "Has AVX2: " << std::boolalpha << simd->isAVX2(); - LOG(INFO) << "Has AVX512: " << std::boolalpha << simd->isAVX512(); + auto simd = SIMDFlags::instance(); + LOG(INFO) << "Has SSE2: " << std::boolalpha << simd->isSSE2(); + LOG(INFO) << "Has SSE3: " << std::boolalpha << simd->isSSE3(); + LOG(INFO) << "Has SSSE3: " << std::boolalpha << simd->isSSSE3(); + LOG(INFO) << "Has SSE4.1: " << std::boolalpha << simd->isSSE41(); + LOG(INFO) << "Has SSE4.2: " << std::boolalpha << simd->isSSE42(); + LOG(INFO) << "Has FMA3: " << std::boolalpha << simd->isFMA3(); + LOG(INFO) << "Has FMA4: " << std::boolalpha << simd->isFMA4(); + LOG(INFO) << "Has AVX: " << std::boolalpha << simd->isAVX(); + LOG(INFO) << "Has AVX2: " << std::boolalpha << simd->isAVX2(); + LOG(INFO) << "Has AVX512: " << std::boolalpha << simd->isAVX512(); } int main(int argc, char** argv) { diff --git a/paddle/utils/tests/test_SpinLock.cpp b/paddle/utils/tests/test_SpinLock.cpp index 9c7ad05b0b..22f8584ef5 100644 --- a/paddle/utils/tests/test_SpinLock.cpp +++ b/paddle/utils/tests/test_SpinLock.cpp @@ -14,10 +14,10 @@ limitations under the License. */ #include #include -#include "paddle/utils/Logging.h" #include "paddle/utils/CommandLineParser.h" -#include "paddle/utils/Util.h" #include "paddle/utils/Locks.h" +#include "paddle/utils/Logging.h" +#include "paddle/utils/Util.h" P_DEFINE_int32(test_thread_num, 100, "testing thread number"); diff --git a/paddle/utils/tests/test_Thread.cpp b/paddle/utils/tests/test_Thread.cpp index b069be1d7a..2f5c5bbce0 100644 --- a/paddle/utils/tests/test_Thread.cpp +++ b/paddle/utils/tests/test_Thread.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include -#include #include +#include +#include using paddle::AsyncThreadPool; // NOLINT @@ -52,17 +52,13 @@ TEST(AsyncThreadPool, multiThreadAddBatchJob) { int counter = 0; const int numMonitors = 300; const int numSlaves = 300; - std::vector moniterJobs( - numMonitors, - [&] { - std::vector slaveJobs( - numSlaves, - [mut, &counter] { - std::lock_guard lk(*mut); - counter++; - }); - levelTwoPool.addBatchJobs(slaveJobs); - }); + std::vector moniterJobs(numMonitors, [&] { + std::vector slaveJobs(numSlaves, [mut, &counter] { + std::lock_guard lk(*mut); + counter++; + }); + levelTwoPool.addBatchJobs(slaveJobs); + }); levelOnePool.addBatchJobs(moniterJobs); ASSERT_EQ(counter, numMonitors * numSlaves); } diff --git a/paddle/utils/tests/test_ThreadBarrier.cpp b/paddle/utils/tests/test_ThreadBarrier.cpp index 997a393683..4a8af5b97e 100644 --- a/paddle/utils/tests/test_ThreadBarrier.cpp +++ b/paddle/utils/tests/test_ThreadBarrier.cpp @@ -15,10 +15,10 @@ limitations under the License. */ #include #include #include -#include "paddle/utils/Logging.h" #include "paddle/utils/CommandLineParser.h" -#include "paddle/utils/Util.h" #include "paddle/utils/Locks.h" +#include "paddle/utils/Logging.h" +#include "paddle/utils/Util.h" P_DEFINE_int32(test_thread_num, 100, "testing thread number"); diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 42a7a29403..5b7f4d85e2 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -3364,7 +3364,10 @@ def my_fatal(s): logger.critical(s) raise Exception() + _parse_config_hooks = set() + + def register_parse_config_hook(f): """ Register a hook function for parse_config. parse_config will invoke the hook @@ -3373,6 +3376,7 @@ def register_parse_config_hook(f): """ _parse_config_hooks.add(f) + def parse_config(config_file, config_arg_str): ''' @param config_arg_str: a string of the form var1=val1,var2=val2. It will be diff --git a/python/paddle/trainer_config_helpers/default_decorators.py b/python/paddle/trainer_config_helpers/default_decorators.py index 13712aad7b..ad3efcbf36 100644 --- a/python/paddle/trainer_config_helpers/default_decorators.py +++ b/python/paddle/trainer_config_helpers/default_decorators.py @@ -84,12 +84,15 @@ class DefaultNameFactory(object): _name_factories = [] + def reset_hook(): for factory in _name_factories: factory.reset() + register_parse_config_hook(reset_hook) + def wrap_name_default(name_prefix=None): """ Decorator to set "name" arguments default to "{name_prefix}_{invoke_count}". diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_config_parser_for_non_file_config.py b/python/paddle/trainer_config_helpers/tests/configs/test_config_parser_for_non_file_config.py index 87a607acf4..9b791a0222 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_config_parser_for_non_file_config.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_config_parser_for_non_file_config.py @@ -17,33 +17,35 @@ import sys import re import getopt + def main(print_whole_config, globals, locals): - ''' + ''' this test will all test_config.py ''' - cmdstr = """from paddle.trainer.config_parser import parse_config\n""" - importstr = "" - functionstr = "" + cmdstr = """from paddle.trainer.config_parser import parse_config\n""" + importstr = "" + functionstr = "" + + for line in sys.stdin: + if re.match("^import", line) or re.match("^from.*import", line): + importstr = importstr + line + else: + functionstr = functionstr + " " + line - for line in sys.stdin: - if re.match("^import", line) or re.match("^from.*import", line): - importstr = importstr + line + cmdstr = cmdstr + importstr + """def configs():\n""" + functionstr + #cmdstr = cmdstr + """def configs():\n""" + importstr + functionstr + if print_whole_config: + cmdstr = cmdstr + """print parse_config(configs, "")""" else: - functionstr = functionstr + " " + line + cmdstr = cmdstr + """print parse_config(configs, "").model_config""" - cmdstr = cmdstr + importstr + """def configs():\n""" + functionstr - #cmdstr = cmdstr + """def configs():\n""" + importstr + functionstr - if print_whole_config: - cmdstr = cmdstr + """print parse_config(configs, "")""" - else: - cmdstr = cmdstr + """print parse_config(configs, "").model_config""" + exec (cmdstr, globals, locals) - exec(cmdstr, globals, locals) if __name__ == '__main__': - whole = False - opts, args = getopt.getopt(sys.argv[1:], "", ["whole"]) - for op, value in opts: - if op == "--whole": - whole = True - main(whole, globals(), locals()) + whole = False + opts, args = getopt.getopt(sys.argv[1:], "", ["whole"]) + for op, value in opts: + if op == "--whole": + whole = True + main(whole, globals(), locals()) diff --git a/python/paddle/trainer_config_helpers/tests/test_reset_hook.py b/python/paddle/trainer_config_helpers/tests/test_reset_hook.py index dc494d0eef..0423babdb7 100644 --- a/python/paddle/trainer_config_helpers/tests/test_reset_hook.py +++ b/python/paddle/trainer_config_helpers/tests/test_reset_hook.py @@ -14,13 +14,13 @@ import unittest from paddle.trainer.config_parser import parse_config -class TestParse(unittest.TestCase): +class TestParse(unittest.TestCase): def test_parse(self): - a = parse_config( - 'trainer_config_helpers/tests/layers_test_config.py', '') - b = parse_config( - 'trainer_config_helpers/tests/layers_test_config.py', '') + a = parse_config('trainer_config_helpers/tests/layers_test_config.py', + '') + b = parse_config('trainer_config_helpers/tests/layers_test_config.py', + '') self.assertEqual(a, b) diff --git a/third_party/gtest.BUILD b/third_party/gtest.BUILD index 3e68a1d879..71c74af513 100644 --- a/third_party/gtest.BUILD +++ b/third_party/gtest.BUILD @@ -1,14 +1,8 @@ cc_library( - name = "main", - srcs = glob( - ["src/*.cc"], - exclude = ["src/gtest-all.cc"] - ), - hdrs = glob([ - "include/**/*.h", - "src/*.h" - ]), - copts = ["-Iexternal/gtest/include"], - linkopts = ["-pthread"], - visibility = ["//visibility:public"], -) + name="main", + srcs=glob( + ["src/*.cc"], exclude=["src/gtest-all.cc"]), + hdrs=glob(["include/**/*.h", "src/*.h"]), + copts=["-Iexternal/gtest/include"], + linkopts=["-pthread"], + visibility=["//visibility:public"], ) diff --git a/third_party/protobuf_test/BUILD b/third_party/protobuf_test/BUILD index 46f769da5f..95a687a356 100644 --- a/third_party/protobuf_test/BUILD +++ b/third_party/protobuf_test/BUILD @@ -3,25 +3,22 @@ licenses(["notice"]) # Apache 2.0 load("@protobuf//:protobuf.bzl", "cc_proto_library") cc_proto_library( - name = "example_proto", - srcs = ["example.proto"], - protoc = "@protobuf//:protoc", - default_runtime = "@protobuf//:protobuf", -) + name="example_proto", + srcs=["example.proto"], + protoc="@protobuf//:protoc", + default_runtime="@protobuf//:protobuf", ) cc_library( - name = "example_lib", - srcs = ["example_lib.cc"], - hdrs = ["example_lib.h"], - deps = [":example_proto"], -) + name="example_lib", + srcs=["example_lib.cc"], + hdrs=["example_lib.h"], + deps=[":example_proto"], ) cc_test( - name = "example_lib_test", - srcs = ["example_lib_test.cc"], - copts = ["-Iexternal/gtest/include"], - deps =[ + name="example_lib_test", + srcs=["example_lib_test.cc"], + copts=["-Iexternal/gtest/include"], + deps=[ "@gtest//:main", ":example_lib", - ], -) + ], ) diff --git a/third_party/protobuf_test/example_lib.cc b/third_party/protobuf_test/example_lib.cc index 56341a0124..ced377bc0a 100644 --- a/third_party/protobuf_test/example_lib.cc +++ b/third_party/protobuf_test/example_lib.cc @@ -3,9 +3,7 @@ namespace third_party { namespace protobuf_test { -std::string get_greet(const Greeting& who) { - return "Hello " + who.name(); -} +std::string get_greet(const Greeting& who) { return "Hello " + who.name(); } } // namespace protobuf_test } // namespace thrid_party