Upgrade string literals to raw string (#28989)

* upgrade comment string to raw string

* fix string in

* fix string with ' '

* revert update on comments

* upgrade only necessary

* fix sample code checker

* fix comments with '''
musl/disable_test_yolov3_temporarily
Leo Chen 5 years ago committed by GitHub
parent 767d0ba267
commit 3815d7aa40
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,4 +1,19 @@
#!/bin/python
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import platform
from sys import argv
@ -120,7 +135,7 @@ python setup.py install
self.py_str = ["py27", "py35", "py36", "py37"]
self.pip_end = ".whl --no-deps"
self.pip_prefix_linux = "pip install /package/paddlepaddle"
self.pip_prefix_windows = "pip install C:\package\paddlepaddle"
self.pip_prefix_windows = r"pip install C:\package\paddlepaddle"
self.pip_gpu = "_gpu-"
self.pip_cpu = "-"
self.mac_pip = [
@ -216,7 +231,7 @@ package:
- matplotlib"""
if not (cuda_str == None):
meta_str = meta_str + cuda_str
blt_str = var.blt_const + blt_var
if (python_str == var.python27):
blt_str = blt_str + """
@ -224,7 +239,7 @@ package:
else:
meta_str = meta_str + """
- opencv>=3.4.2"""
meta_str = meta_str + var.test + var.about
meta_filename = "meta.yaml"
build_filename = "bld.bat"

@ -116,8 +116,8 @@ def train(word_idx):
:rtype: callable
"""
return reader_creator(
re.compile("aclImdb/train/pos/.*\.txt$"),
re.compile("aclImdb/train/neg/.*\.txt$"), word_idx)
re.compile(r"aclImdb/train/pos/.*\.txt$"),
re.compile(r"aclImdb/train/neg/.*\.txt$"), word_idx)
@deprecated(
@ -137,8 +137,8 @@ def test(word_idx):
:rtype: callable
"""
return reader_creator(
re.compile("aclImdb/test/pos/.*\.txt$"),
re.compile("aclImdb/test/neg/.*\.txt$"), word_idx)
re.compile(r"aclImdb/test/pos/.*\.txt$"),
re.compile(r"aclImdb/test/neg/.*\.txt$"), word_idx)
@deprecated(
@ -153,7 +153,7 @@ def word_dict():
:rtype: dict
"""
return build_dict(
re.compile("aclImdb/((train)|(test))/((pos)|(neg))/.*\.txt$"), 150)
re.compile(r"aclImdb/((train)|(test))/((pos)|(neg))/.*\.txt$"), 150)
@deprecated(

@ -18,13 +18,13 @@ import paddle.dataset.imdb
import unittest
import re
TRAIN_POS_PATTERN = re.compile("aclImdb/train/pos/.*\.txt$")
TRAIN_NEG_PATTERN = re.compile("aclImdb/train/neg/.*\.txt$")
TRAIN_PATTERN = re.compile("aclImdb/train/.*\.txt$")
TRAIN_POS_PATTERN = re.compile(r"aclImdb/train/pos/.*\.txt$")
TRAIN_NEG_PATTERN = re.compile(r"aclImdb/train/neg/.*\.txt$")
TRAIN_PATTERN = re.compile(r"aclImdb/train/.*\.txt$")
TEST_POS_PATTERN = re.compile("aclImdb/test/pos/.*\.txt$")
TEST_NEG_PATTERN = re.compile("aclImdb/test/neg/.*\.txt$")
TEST_PATTERN = re.compile("aclImdb/test/.*\.txt$")
TEST_POS_PATTERN = re.compile(r"aclImdb/test/pos/.*\.txt$")
TEST_NEG_PATTERN = re.compile(r"aclImdb/test/neg/.*\.txt$")
TEST_PATTERN = re.compile(r"aclImdb/test/.*\.txt$")
class TestIMDB(unittest.TestCase):

@ -862,7 +862,7 @@ class DistributedStrategy(object):
@property
def dgc_configs(self):
"""
r"""
Set Deep Gradient Compression training configurations. In general, dgc has serveral configurable
settings that can be configured through a dict.

@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
r"""
fleetrun is a module that spawns multiple distributed
process on each training node for gpu training and cpu training.
Usage:

@ -158,13 +158,13 @@ class ParameterServerOptimizer(MetaOptimizerBase):
['vm_stat'], stdout=subprocess.PIPE).communicate()[0]
# Process vm_stat
vmLines = vm.split('\n')
sep = re.compile(':[\s]+')
sep = re.compile(r':[\s]+')
vmStats = {}
for row in range(1, len(vmLines) - 2):
rowText = vmLines[row].strip()
rowElements = sep.split(rowText)
vmStats[(rowElements[0]
)] = int(rowElements[1].strip('\.')) * 4096
)] = int(rowElements[1].strip(r'\.')) * 4096
return vmStats["Pages free"]
elif platform.system() == "Linux":
mems = {}

@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
r"""
paddle.distributed.launch is a module that spawns multiple distributed
process on each training node for gpu training.
Usage:

@ -166,7 +166,7 @@ class Distribution(object):
class Uniform(Distribution):
"""Uniform distribution with `low` and `high` parameters.
r"""Uniform distribution with `low` and `high` parameters.
Mathematical Details
@ -374,7 +374,7 @@ class Uniform(Distribution):
return elementwise_div((lb * ub), (self.high - self.low), name=name)
def entropy(self):
"""Shannon entropy in nats.
r"""Shannon entropy in nats.
The entropy is
@ -391,7 +391,7 @@ class Uniform(Distribution):
class Normal(Distribution):
"""The Normal distribution with location `loc` and `scale` parameters.
r"""The Normal distribution with location `loc` and `scale` parameters.
Mathematical details
@ -534,7 +534,7 @@ class Normal(Distribution):
return output
def entropy(self):
"""Shannon entropy in nats.
r"""Shannon entropy in nats.
The entropy is
@ -599,7 +599,7 @@ class Normal(Distribution):
name=name)
def kl_divergence(self, other):
"""The KL-divergence between two normal distributions.
r"""The KL-divergence between two normal distributions.
The probability density function (pdf) is
@ -644,7 +644,7 @@ class Normal(Distribution):
class Categorical(Distribution):
"""
r"""
Categorical distribution is a discrete probability distribution that
describes the possible results of a random variable that can take on
one of K possible categories, with the probability of each category

@ -40,7 +40,7 @@ class BaseErrorClipAttr(object):
class ErrorClipByValue(BaseErrorClipAttr):
"""
r"""
Clips tensor values to the range [min, max].
Given a tensor ``t`` (see Examples below), this operation clips its value \
@ -241,7 +241,7 @@ class ClipGradByValue(ClipGradBase):
class ClipGradByNorm(ClipGradBase):
"""
r"""
Limit the l2 norm of multi-dimensional Tensor :math:`X` to ``clip_norm`` .
- If the l2 norm of :math:`X` is greater than ``clip_norm`` , :math:`X` will be compressed by a ratio.
@ -343,7 +343,7 @@ class ClipGradByNorm(ClipGradBase):
class ClipGradByGlobalNorm(ClipGradBase):
"""
r"""
Given a list of Tensor :math:`t\_list` , calculate the global norm for the elements of all tensors in
:math:`t\_list` , and limit it to ``clip_norm`` .

@ -137,7 +137,7 @@ def var_conv_2d(input,
act=None,
dtype='float32',
name=None):
"""
r"""
The var_conv_2d layer calculates the output base on the :attr:`input` with variable length,
row, col, input channel, filter size and strides. Both :attr:`input`, :attr:`row`,
and :attr:`col` are 1-level LodTensor. The convolution operation is same as conv2d layer with
@ -477,7 +477,7 @@ def fused_embedding_seq_pool(input,
combiner='sum',
param_attr=None,
dtype='float32'):
"""
r"""
**Embedding Sequence pool**
This layer is the fusion of lookup table and sequence_pool.
@ -1442,7 +1442,7 @@ def batch_fc(input, param_size, param_attr, bias_size, bias_attr, act=None):
def _pull_box_extended_sparse(input, size, extend_size=64, dtype='float32'):
"""
r"""
**Pull Box Extended Sparse Layer**
This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
BoxPS lookup table. The result of this lookup is the embedding of each ID in the
@ -1640,7 +1640,7 @@ def fused_bn_add_act(x,
moving_variance_name=None,
act=None,
name=None):
"""
r"""
This Op performs batch norm on input x, and adds the result to input y. Then
it performs activation on the sum. The data format of inputs must be NHWC
`[batch, in_height, in_width, in_channels]`.

@ -175,7 +175,7 @@ def basic_gru(input,
activation=None,
dtype='float32',
name='basic_gru'):
"""
r"""
GRU implementation using basic operator, supports multiple layers and bidirectional gru.
.. math::
@ -418,7 +418,7 @@ def basic_lstm(input,
forget_bias=1.0,
dtype='float32',
name='basic_lstm'):
"""
r"""
LSTM implementation using basic operators, supports multiple layers and bidirectional LSTM.
.. math::
@ -697,7 +697,7 @@ def basic_lstm(input,
class BasicLSTMUnit(Layer):
"""
r"""
****
BasicLSTMUnit class, Using basic operator to build LSTM
The algorithm can be described as the code below.

@ -44,7 +44,7 @@ DEBUG = False
def memory_usage(program, batch_size):
"""
r"""
Get the estimate memory usage of program with input batch size.
Args:

@ -64,7 +64,7 @@ class ImperativeQuantAware(object):
act_preprocess_layer=None,
weight_quantize_layer=None,
act_quantize_layer=None):
"""
r"""
The constructor for ImperativeQuantAware.
Args:

@ -30,7 +30,7 @@ __all__ = [
class FakeQuantMovingAverage(layers.Layer):
"""
r"""
FakeQuantMovingAverage layer does the moving_average_abs_max quant and then dequant.
Its computational formula is described as below:
@ -128,7 +128,7 @@ class FakeQuantMovingAverage(layers.Layer):
class FakeQuantAbsMax(layers.Layer):
"""
r"""
FakeQuantAbsMax layer does the abs_max quant and then dequant.
Its computational formula is described as below:
@ -545,7 +545,7 @@ class QuantizedLinear(layers.Layer):
class MovingAverageAbsMaxScale(layers.Layer):
def __init__(self, name=None, moving_rate=0.9, dtype='float32'):
"""
r"""
MovingAverageMaxScale layer is used to calculating the output quantization scale of Layer.
Its computational formula is described as below:

@ -37,7 +37,7 @@ class QuantInt8MkldnnPass(object):
"""
def __init__(self, _scope=None, _place=None):
"""
r"""
Args:
scope(fluid.Scope): scope is used to initialize the new parameters.
place(fluid.CPUPlace): place is used to initialize the new parameters.

@ -239,7 +239,7 @@ class QuantizationTransformPass(object):
act_preprocess_func=None,
optimizer_func=None,
executor=None):
"""
r"""
Constructor.
Args:

@ -33,7 +33,7 @@ _logger = get_logger(
class HDFSClient(object):
"""
r"""
A tool of HDFS
Args:
@ -376,7 +376,7 @@ class HDFSClient(object):
_logger.info("HDFS list path: {} successfully".format(hdfs_path))
ret_lines = []
regex = re.compile('\s+')
regex = re.compile(r'\s+')
out_lines = output.strip().split("\n")
for line in out_lines:
re_line = regex.split(line)
@ -418,7 +418,7 @@ class HDFSClient(object):
_logger.info("HDFS list all files: {} successfully".format(
hdfs_path))
lines = []
regex = re.compile('\s+')
regex = re.compile(r'\s+')
out_lines = output.strip().split("\n")
for line in out_lines:
re_line = regex.split(line)

@ -224,7 +224,7 @@ def less_than_ver(a, b):
import operator
def to_list(s):
s = re.sub('(\.0+)+$', '', s)
s = re.sub(r'(\.0+)+$', '', s)
return [int(x) for x in s.split('.')]
return operator.lt(to_list(a), to_list(b))

@ -101,10 +101,11 @@ class _DatasetKind(object):
ITER = 1
@staticmethod
def create_fetcher(kind, dataset, auto_collate_batch, collate_fn, drop_last):
def create_fetcher(kind, dataset, auto_collate_batch, collate_fn,
drop_last):
if kind == _DatasetKind.MAP:
return _MapDatasetFetcher(dataset, auto_collate_batch,
collate_fn, drop_last)
return _MapDatasetFetcher(dataset, auto_collate_batch, collate_fn,
drop_last)
elif kind == _DatasetKind.ITER:
return _IterableDatasetFetcher(dataset, auto_collate_batch,
collate_fn, drop_last)
@ -240,7 +241,8 @@ class _DataLoaderIterBase(object):
if self._dataset_kind == _DatasetKind.MAP:
self._sampler_iter = iter(list(range(len(self._dataset))))
else:
self._sampler_iter = iter(_InfiniteIterableSampler(self._dataset, 1))
self._sampler_iter = iter(
_InfiniteIterableSampler(self._dataset, 1))
self._collate_fn = loader.collate_fn
# LoDTensorBlockingQueue instance for create_py_reader and a thread
@ -380,8 +382,8 @@ class _DataLoaderIterSingleProcess(_DataLoaderIterBase):
# NOTE(chenweihang): _worker_loop must be top level method to be pickled
def _worker_loop(dataset, dataset_kind, indices_queue, out_queue, done_event,
auto_collate_batch, collate_fn, init_fn, worker_id, num_workers,
use_shared_memory):
auto_collate_batch, collate_fn, init_fn, worker_id,
num_workers, use_shared_memory):
try:
# NOTE: [ mmap files clear ] When the child process exits unexpectedly,
# some shared memory objects may have been applied for but have not yet
@ -400,8 +402,8 @@ def _worker_loop(dataset, dataset_kind, indices_queue, out_queue, done_event,
try:
if init_fn is not None:
init_fn(worker_id)
fetcher = _DatasetKind.create_fetcher(dataset_kind, dataset,
auto_collate_batch, collate_fn, True)
fetcher = _DatasetKind.create_fetcher(
dataset_kind, dataset, auto_collate_batch, collate_fn, True)
except:
init_exception = Exception("init_fn failed in worker {}: " \
"{}".format(worker_id, sys.exc_info()))

@ -22,7 +22,7 @@ from google.protobuf import text_format
class DownpourSGD(object):
"""
r"""
Distributed optimizer of downpour stochastic gradient descent
Standard implementation of Google's Downpour SGD
in Large Scale Distributed Deep Networks

@ -52,7 +52,7 @@ class DownpourServer(Server):
def add_sparse_table(self, table_id, learning_rate, slot_key_vars,
slot_value_var):
"""
r"""
Args:
table_id(int): id of sparse params table
learning_rate(float): the learning rate used to update parameters. \
@ -84,7 +84,7 @@ class DownpourServer(Server):
table.accessor.downpour_accessor_param.delete_threshold = 0.8
def add_dense_table(self, table_id, learning_rate, param_var, grad_var):
"""
r"""
Args:
table_id(int): id of sparse params table
learning_rate(float): the learning rate used to update parameters. \
@ -135,7 +135,7 @@ class DownpourWorker(Worker):
def add_sparse_table(self, table_id, learning_rate, slot_key_vars,
slot_value_vars):
"""
r"""
Args:
table_id(int): id of sparse params table
learning_rate(float): the learning rate used to update parameters. \
@ -153,7 +153,7 @@ class DownpourWorker(Worker):
[var.name + "@GRAD" for var in slot_value_vars])
def add_dense_table(self, table_id, learning_rate, param_vars, grad_vars):
"""
r"""
Args:
table_id(int): id of sparse params table
learning_rate(float): the learning rate used to update parameters. \

@ -593,7 +593,7 @@ def grad(outputs,
@framework.dygraph_only
def to_variable(value, name=None, zero_copy=None, dtype=None):
"""
r"""
:api_attr: imperative
The API will create a ``Variable`` or ``ComplexVariable`` object from

@ -183,7 +183,7 @@ class PiecewiseDecay(LearningRateDecay):
class NaturalExpDecay(LearningRateDecay):
"""
r"""
:api_attr: imperative
Applies natural exponential decay to the initial learning rate.
@ -266,7 +266,7 @@ class NaturalExpDecay(LearningRateDecay):
class ExponentialDecay(LearningRateDecay):
"""
r"""
:api_attr: imperative
Applies exponential decay to the learning rate.
@ -348,7 +348,7 @@ class ExponentialDecay(LearningRateDecay):
class InverseTimeDecay(LearningRateDecay):
"""
r"""
:api_attr: imperative
Applies inverse time decay to the initial learning rate.
@ -426,7 +426,7 @@ class InverseTimeDecay(LearningRateDecay):
class PolynomialDecay(LearningRateDecay):
"""
r"""
:api_attr: imperative
Applies polynomial decay to the initial learning rate.
@ -520,7 +520,7 @@ class PolynomialDecay(LearningRateDecay):
class CosineDecay(LearningRateDecay):
"""
r"""
:api_attr: imperative
Applies cosine decay to the learning rate.
@ -578,7 +578,7 @@ class CosineDecay(LearningRateDecay):
class NoamDecay(LearningRateDecay):
"""
r"""
:api_attr: imperative
Applies Noam decay to the initial learning rate.

@ -42,7 +42,7 @@ __all__ = [
class Conv2D(layers.Layer):
"""
r"""
This interface is used to construct a callable object of the ``Conv2D`` class.
For more details, refer to code examples.
The convolution2D layer calculates the output based on the input, filter
@ -282,7 +282,7 @@ class Conv2D(layers.Layer):
class Conv3D(layers.Layer):
"""
r"""
**Convlution3D Layer**
The convolution3D layer calculates the output based on the input, filter
@ -484,7 +484,7 @@ class Conv3D(layers.Layer):
class Conv3DTranspose(layers.Layer):
"""
r"""
**Convlution3D transpose layer**
The convolution3D transpose layer calculates the output based on the input,
@ -701,7 +701,7 @@ class Conv3DTranspose(layers.Layer):
class Pool2D(layers.Layer):
"""
r"""
This interface is used to construct a callable object of the ``Pool2D`` class.
For more details, refer to code examples.
@ -1009,7 +1009,7 @@ class Linear(layers.Layer):
class InstanceNorm(layers.Layer):
"""
r"""
This interface is used to construct a callable object of the ``InstanceNorm`` class.
For more details, refer to code examples.
@ -1143,7 +1143,7 @@ class InstanceNorm(layers.Layer):
class BatchNorm(layers.Layer):
"""
r"""
:alias_main: paddle.nn.BatchNorm
:alias: paddle.nn.BatchNorm,paddle.nn.layer.BatchNorm,paddle.nn.layer.norm.BatchNorm
:old_api: paddle.fluid.dygraph.BatchNorm
@ -1492,7 +1492,7 @@ class Dropout(layers.Layer):
class Embedding(layers.Layer):
"""
r"""
:alias_main: paddle.nn.Embedding
:alias: paddle.nn.Embedding,paddle.nn.layer.Embedding,paddle.nn.layer.common.Embedding
:old_api: paddle.fluid.dygraph.Embedding
@ -1652,7 +1652,7 @@ class Embedding(layers.Layer):
class LayerNorm(layers.Layer):
"""
r"""
:alias_main: paddle.nn.LayerNorm
:alias: paddle.nn.LayerNorm,paddle.nn.layer.LayerNorm,paddle.nn.layer.norm.LayerNorm
:old_api: paddle.fluid.dygraph.LayerNorm
@ -2242,7 +2242,7 @@ class NCE(layers.Layer):
class PRelu(layers.Layer):
"""
r"""
This interface is used to construct a callable object of the ``PRelu`` class.
For more details, refer to code examples.
It implements three activation methods of the ``PRelu`` activation function.
@ -2350,7 +2350,7 @@ class PRelu(layers.Layer):
class BilinearTensorProduct(layers.Layer):
"""
r"""
**Add Bilinear Tensor Product Layer**
@ -2467,7 +2467,7 @@ class BilinearTensorProduct(layers.Layer):
class Conv2DTranspose(layers.Layer):
"""
r"""
This interface is used to construct a callable object of the ``Conv2DTranspose`` class.
For more details, refer to code examples.
The convolution2D transpose layer calculates the output based on the input,
@ -2979,7 +2979,7 @@ class GroupNorm(layers.Layer):
class SpectralNorm(layers.Layer):
"""
r"""
:alias_main: paddle.nn.SpectralNorm
:alias: paddle.nn.SpectralNorm,paddle.nn.layer.SpectralNorm,paddle.nn.layer.norm.SpectralNorm
:old_api: paddle.fluid.dygraph.SpectralNorm

@ -20,7 +20,7 @@ __all__ = ['LSTMCell', 'GRUCell']
class LSTMCell(Layer):
"""
r"""
LSTMCell implementation using basic operators.
There are two LSTMCell version, the default one is compatible with CUDNN LSTM implementation.
The algorithm can be described as the equations below.
@ -236,7 +236,7 @@ class LSTMCell(Layer):
class GRUCell(Layer):
"""
r"""
GRU implementation using basic operators.
There are two GRUCell version, the default one is compatible with CUDNN GRU implementation.
The algorithm can be described as the equations below.

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save