2.0rc api rename (#28088) (#28179)

* rename manual_seed to seed

* rename xxx1d-->xxx1D, xxx2d-->xxx2D, xxx3d-->xxx3D

* rename manual_seed --> seed

* do not rename .cc, .cu and .h file

* rename manual_seed --> seed

* rename manual_seed --> seed

* rename manual_seed --> seed

* rename manual_seed --> seed

* disable_static on doc example code

* donot change manual_seed on generator

* add enable_static on sample code

* convert python/paddle/fluid/layers/nn.py to bak

* fix typo

* fix code style

* fix seed to manual_seed when call functions of Generator()

* fix bug
release/2.0-rc
cnn 5 years ago committed by GitHub
parent 7232f1ed44
commit b04c55ef0f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -221,7 +221,7 @@ from .tensor.search import sort #DEFINE_ALIAS
from .tensor.to_string import set_printoptions
from .framework.random import manual_seed #DEFINE_ALIAS
from .framework.random import seed #DEFINE_ALIAS
from .framework.random import get_cuda_rng_state #DEFINE_ALIAS
from .framework.random import set_cuda_rng_state #DEFINE_ALIAS
from .framework import ParamAttr #DEFINE_ALIAS

@ -37,7 +37,7 @@ def auto_cast(enable=True, custom_white_list=None, custom_black_list=None):
import paddle
conv2d = paddle.nn.Conv2d(3, 2, 3, bias_attr=False)
conv2d = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast():

@ -50,7 +50,7 @@ class GradScaler(AmpScaler):
import paddle
model = paddle.nn.Conv2d(3, 2, 3, bias_attr=True)
model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True)
optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
data = paddle.rand([10, 3, 32, 32])
@ -90,7 +90,7 @@ class GradScaler(AmpScaler):
import paddle
model = paddle.nn.Conv2d(3, 2, 3, bias_attr=True)
model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True)
optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
data = paddle.rand([10, 3, 32, 32])
@ -122,7 +122,7 @@ class GradScaler(AmpScaler):
import paddle
model = paddle.nn.Conv2d(3, 2, 3, bias_attr=True)
model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True)
optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
data = paddle.rand([10, 3, 32, 32])

@ -670,13 +670,13 @@ class Categorical(Distribution):
import paddle
from paddle.distribution import Categorical
paddle.manual_seed(100) # on CPU device
paddle.seed(100) # on CPU device
x = paddle.rand([6])
print(x.numpy())
# [0.5535528 0.20714243 0.01162981
# 0.51577556 0.36369765 0.2609165 ]
paddle.manual_seed(200) # on CPU device
paddle.seed(200) # on CPU device
y = paddle.rand([6])
print(y.numpy())
# [0.77663314 0.90824795 0.15685187
@ -685,7 +685,7 @@ class Categorical(Distribution):
cat = Categorical(x)
cat2 = Categorical(y)
paddle.manual_seed(1000) # on CPU device
paddle.seed(1000) # on CPU device
cat.sample([2,3])
# [[0, 0, 5],
# [3, 4, 5]]
@ -744,7 +744,7 @@ class Categorical(Distribution):
import paddle
from paddle.distribution import Categorical
paddle.manual_seed(100) # on CPU device
paddle.seed(100) # on CPU device
x = paddle.rand([6])
print(x.numpy())
# [0.5535528 0.20714243 0.01162981
@ -752,7 +752,7 @@ class Categorical(Distribution):
cat = Categorical(x)
paddle.manual_seed(1000) # on CPU device
paddle.seed(1000) # on CPU device
cat.sample([2,3])
# [[0, 0, 5],
# [3, 4, 5]]
@ -791,13 +791,13 @@ class Categorical(Distribution):
import paddle
from paddle.distribution import Categorical
paddle.manual_seed(100) # on CPU device
paddle.seed(100) # on CPU device
x = paddle.rand([6])
print(x.numpy())
# [0.5535528 0.20714243 0.01162981
# 0.51577556 0.36369765 0.2609165 ]
paddle.manual_seed(200) # on CPU device
paddle.seed(200) # on CPU device
y = paddle.rand([6])
print(y.numpy())
# [0.77663314 0.90824795 0.15685187
@ -842,7 +842,7 @@ class Categorical(Distribution):
import paddle
from paddle.distribution import Categorical
paddle.manual_seed(100) # on CPU device
paddle.seed(100) # on CPU device
x = paddle.rand([6])
print(x.numpy())
# [0.5535528 0.20714243 0.01162981
@ -887,7 +887,7 @@ class Categorical(Distribution):
import paddle
from paddle.distribution import Categorical
paddle.manual_seed(100) # on CPU device
paddle.seed(100) # on CPU device
x = paddle.rand([6])
print(x.numpy())
# [0.5535528 0.20714243 0.01162981
@ -953,7 +953,7 @@ class Categorical(Distribution):
import paddle
from paddle.distribution import Categorical
paddle.manual_seed(100) # on CPU device
paddle.seed(100) # on CPU device
x = paddle.rand([6])
print(x.numpy())
# [0.5535528 0.20714243 0.01162981

@ -114,7 +114,7 @@ class TestWeightDecay(unittest.TestCase):
return param_sum
def check_weight_decay(self, place, model):
paddle.manual_seed(1)
paddle.seed(1)
paddle.framework.random._manual_program_seed(1)
main_prog = fluid.framework.Program()
startup_prog = fluid.framework.Program()
@ -137,7 +137,7 @@ class TestWeightDecay(unittest.TestCase):
return param_sum
def check_weight_decay2(self, place, model):
paddle.manual_seed(1)
paddle.seed(1)
paddle.framework.random._manual_program_seed(1)
main_prog = fluid.framework.Program()
startup_prog = fluid.framework.Program()

@ -1058,7 +1058,7 @@ class Layer(core.Layer):
super(Mylayer, self).__init__()
self.linear1 = paddle.nn.Linear(10, 10)
self.linear2 = paddle.nn.Linear(5, 5)
self.conv2d = paddle.nn.Conv2d(3, 2, 3)
self.conv2d = paddle.nn.Conv2D(3, 2, 3)
self.embedding = paddle.nn.Embedding(128, 16)
self.h_0 = paddle.to_tensor(np.zeros([10, 10]).astype('float32'))

@ -110,7 +110,7 @@ class Conv2D(layers.Layer):
dilation (int or tuple, optional): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: 1.
groups (int, optional): The groups number of the Conv2d Layer. According to grouped
groups (int, optional): The groups number of the Conv2D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
@ -345,7 +345,7 @@ class Conv3D(layers.Layer):
dilation (int|tuple, optional): The dilation size. If dilation is a tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups (int, optional): The groups number of the Conv3d Layer. According to grouped
groups (int, optional): The groups number of the Conv3D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
@ -574,7 +574,7 @@ class Conv3DTranspose(layers.Layer):
dilation(int|tuple, optional): The dilation size. If dilation is a tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups(int, optional): The groups number of the Conv3d transpose layer. Inspired by
groups(int, optional): The groups number of the Conv3D transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
@ -2541,7 +2541,7 @@ class Conv2DTranspose(layers.Layer):
dilation(int or tuple, optional): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: 1.
groups(int, optional): The groups number of the Conv2d transpose layer. Inspired by
groups(int, optional): The groups number of the Conv2D transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the

@ -749,7 +749,7 @@ class BilinearInitializer(Initializer):
regularizer=L2Decay(0.),
initializer=nn.initializer.Bilinear())
data = paddle.rand([B, 3, H, W], dtype='float32')
conv_up = nn.ConvTranspose2d(3,
conv_up = nn.Conv2DTranspose(3,
out_channels=C,
kernel_size=2 * factor - factor % 2,
padding=int(

@ -43,7 +43,7 @@ def simple_img_conv_pool(input,
act=None,
use_cudnn=True):
"""
:api_attr: Static Graph
:api_attr: Static Graph
The simple_img_conv_pool api is composed of :ref:`api_fluid_layers_conv2d` and :ref:`api_fluid_layers_pool2d` .
@ -106,6 +106,8 @@ def simple_img_conv_pool(input,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
img = fluid.data(name='img', shape=[100, 1, 28, 28], dtype='float32')
conv_pool = fluid.nets.simple_img_conv_pool(input=img,
filter_size=5,
@ -151,37 +153,37 @@ def img_conv_group(input,
pool_type="max",
use_cudnn=True):
"""
:api_attr: Static Graph
:api_attr: Static Graph
The Image Convolution Group is composed of Convolution2d, BatchNorm, DropOut,
and Pool2d. According to the input arguments, img_conv_group will do serials of
and Pool2D. According to the input arguments, img_conv_group will do serials of
computation for Input using Convolution2d, BatchNorm, DropOut, and pass the last
result to Pool2d.
result to Pool2D.
Args:
input (Variable): The input is 4-D Tensor with shape [N, C, H, W], the data type of input is float32 or float64.
conv_num_filter(list|tuple): Indicates the numbers of filter of this group.
pool_size (int|list|tuple): The pooling size of Pool2d Layer. If pool_size
pool_size (int|list|tuple): The pooling size of Pool2D Layer. If pool_size
is a list or tuple, it must contain two integers, (pool_size_height, pool_size_width).
Otherwise, the pool_size_height = pool_size_width = pool_size.
conv_padding (int|list|tuple): The padding size of the Conv2d Layer. If padding is
conv_padding (int|list|tuple): The padding size of the Conv2D Layer. If padding is
a list or tuple, its length must be equal to the length of conv_num_filter.
Otherwise the conv_padding of all Conv2d Layers are the same. Default 1.
Otherwise the conv_padding of all Conv2D Layers are the same. Default 1.
conv_filter_size (int|list|tuple): The filter size. If filter_size is a list or
tuple, its length must be equal to the length of conv_num_filter.
Otherwise the conv_filter_size of all Conv2d Layers are the same. Default 3.
conv_act (str): Activation type for Conv2d Layer that is not followed by BatchNorm.
Otherwise the conv_filter_size of all Conv2D Layers are the same. Default 3.
conv_act (str): Activation type for Conv2D Layer that is not followed by BatchNorm.
Default: None.
param_attr (ParamAttr): The parameters to the Conv2d Layer. Default: None
conv_with_batchnorm (bool|list): Indicates whether to use BatchNorm after Conv2d Layer.
param_attr (ParamAttr): The parameters to the Conv2D Layer. Default: None
conv_with_batchnorm (bool|list): Indicates whether to use BatchNorm after Conv2D Layer.
If conv_with_batchnorm is a list, its length must be equal to the length of
conv_num_filter. Otherwise, conv_with_batchnorm indicates whether all the
Conv2d Layer follows a BatchNorm. Default False.
Conv2D Layer follows a BatchNorm. Default False.
conv_batchnorm_drop_rate (float|list): Indicates the drop_rate of Dropout Layer
after BatchNorm. If conv_batchnorm_drop_rate is a list, its length must be
equal to the length of conv_num_filter. Otherwise, drop_rate of all Dropout
Layers is conv_batchnorm_drop_rate. Default 0.0.
pool_stride (int|list|tuple): The pooling stride of Pool2d layer. If pool_stride
pool_stride (int|list|tuple): The pooling stride of Pool2D layer. If pool_stride
is a list or tuple, it must contain two integers, (pooling_stride_H,
pooling_stride_W). Otherwise, the pooling_stride_H = pooling_stride_W = pool_stride.
Default 1.
@ -192,12 +194,15 @@ def img_conv_group(input,
Return:
A Variable holding Tensor representing the final result after serial computation using Convolution2d,
BatchNorm, DropOut, and Pool2d, whose data type is the same with input.
BatchNorm, DropOut, and Pool2D, whose data type is the same with input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32')
conv_pool = fluid.nets.img_conv_group(input=img,
conv_padding=1,
@ -261,7 +266,7 @@ def sequence_conv_pool(input,
pool_type="max",
bias_attr=None):
"""
:api_attr: Static Graph
:api_attr: Static Graph
**This api takes input as an LoDTensor. If input is a Tensor, please use**
:ref:`api_fluid_nets_simple_img_conv_pool` **instead**
@ -300,6 +305,8 @@ def sequence_conv_pool(input,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
input_dim = 100 #len(word_dict)
emb_dim = 128
hid_dim = 512
@ -327,7 +334,7 @@ def sequence_conv_pool(input,
def glu(input, dim=-1):
"""
:api_attr: Static Graph
:api_attr: Static Graph
The Gated Linear Units(GLU) composed by :ref:`api_fluid_layers_split` ,
:ref:`api_fluid_layers_sigmoid` and :ref:`api_fluid_layers_elementwise_mul` .
@ -356,6 +363,9 @@ def glu(input, dim=-1):
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
data = fluid.data(
name="words", shape=[-1, 6, 3, 9], dtype="float32")
# shape of output: [-1, 3, 3, 9]
@ -375,7 +385,7 @@ def scaled_dot_product_attention(queries,
num_heads=1,
dropout_rate=0.):
"""
:api_attr: Static Graph
:api_attr: Static Graph
This interface Multi-Head Attention using scaled dot product.
Attention mechanism can be seen as mapping a query and a set of key-value
@ -435,7 +445,9 @@ def scaled_dot_product_attention(queries,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
queries = fluid.data(name="queries", shape=[3, 5, 9], dtype="float32")
keys = fluid.data(name="keys", shape=[3, 6, 9], dtype="float32")
values = fluid.data(name="values", shape=[3, 6, 10], dtype="float32")

@ -564,7 +564,7 @@ def train_bmn(args, place, to_static):
loss_data = []
with fluid.dygraph.guard(place):
paddle.manual_seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
global local_random
local_random = np.random.RandomState(SEED)

@ -450,7 +450,7 @@ def do_train(args, to_static):
place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.dygraph.guard(place):
paddle.manual_seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
reader = get_random_input_data(args.batch_size, args.vocab_size,

@ -451,7 +451,7 @@ def train_mobilenet(args, to_static):
with fluid.dygraph.guard(args.place):
np.random.seed(SEED)
paddle.manual_seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
if args.model == "MobileNetV1":

@ -218,7 +218,7 @@ def train(place):
batch_num = 200
with fluid.dygraph.guard(place):
paddle.manual_seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
ptb_model = PtbModel(
hidden_size=hidden_size,

@ -210,7 +210,7 @@ def train(place):
batch_num = 200
paddle.disable_static(place)
paddle.manual_seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
ptb_model = PtbModel(
hidden_size=hidden_size,

@ -65,7 +65,7 @@ def train(args, place, to_static):
env.seed(SEED)
with fluid.dygraph.guard(place):
paddle.manual_seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
local_random = np.random.RandomState(SEED)

@ -219,7 +219,7 @@ def train(to_static):
"""
with fluid.dygraph.guard(place):
np.random.seed(SEED)
paddle.manual_seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
train_reader = paddle.batch(

@ -66,7 +66,7 @@ class ConvBNLayer(paddle.nn.Layer):
act=None):
super(ConvBNLayer, self).__init__()
self._conv = paddle.nn.Conv2d(
self._conv = paddle.nn.Conv2D(
in_channels=num_channels,
out_channels=num_filters,
kernel_size=filter_size,
@ -214,7 +214,7 @@ def train(to_static):
"""
paddle.disable_static(place)
np.random.seed(SEED)
paddle.manual_seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
train_reader = paddle.batch(

@ -334,7 +334,7 @@ def train(train_reader, to_static):
np.random.seed(SEED)
with fluid.dygraph.guard(place):
paddle.manual_seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
se_resnext = SeResNeXt()
optimizer = optimizer_setting(train_parameters, se_resnext.parameters())

@ -286,7 +286,7 @@ def train(args, to_static):
with fluid.dygraph.guard(place):
np.random.seed(SEED)
paddle.manual_seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
train_reader = fake_data_reader(args.class_num, args.vocab_size,

@ -108,7 +108,7 @@ def train(conf_dict, to_static):
place = fluid.CPUPlace()
with fluid.dygraph.guard(place):
paddle.manual_seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
conf_dict['dict_size'] = len(vocab)

@ -106,7 +106,7 @@ def train(conf_dict, to_static):
place = paddle.CPUPlace()
paddle.disable_static(place)
paddle.manual_seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
conf_dict['dict_size'] = len(vocab)

@ -33,7 +33,7 @@ STEP_NUM = 10
def train_static(args, batch_generator):
paddle.enable_static()
paddle.manual_seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
train_prog = fluid.Program()
startup_prog = fluid.Program()
@ -131,7 +131,7 @@ def train_static(args, batch_generator):
def train_dygraph(args, batch_generator):
with fluid.dygraph.guard(place):
if SEED is not None:
paddle.manual_seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
# define data loader
train_loader = fluid.io.DataLoader.from_generator(capacity=10)
@ -223,7 +223,7 @@ def train_dygraph(args, batch_generator):
def predict_dygraph(args, batch_generator):
with fluid.dygraph.guard(place):
paddle.manual_seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
# define data loader
@ -295,7 +295,7 @@ def predict_dygraph(args, batch_generator):
def predict_static(args, batch_generator):
test_prog = fluid.Program()
with fluid.program_guard(test_prog):
paddle.manual_seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
# define input and reader

@ -272,7 +272,7 @@ def train(args, fake_data_reader, to_static):
random.seed(0)
np.random.seed(0)
with fluid.dygraph.guard(place):
paddle.manual_seed(1000)
paddle.seed(1000)
paddle.framework.random._manual_program_seed(1000)
video_model = TSM_ResNet("TSM", train_config, 'Train')

@ -20,7 +20,7 @@ import struct
import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
from paddle.fluid.tests.unittests.test_conv2d_op import conv2d_forward_naive, TestConv2dOp
from paddle.fluid.tests.unittests.test_conv2d_op import conv2d_forward_naive, TestConv2DOp
def conv2d_residual_naive(out, residual):
@ -31,7 +31,7 @@ def conv2d_residual_naive(out, residual):
@unittest.skipIf(not core.supports_bfloat16(),
"place does not support BF16 evaluation")
class TestConv2dBf16Op(TestConv2dOp):
class TestConv2DBf16Op(TestConv2DOp):
def setUp(self):
self.op_type = "conv2d"
self.use_cudnn = False
@ -110,7 +110,7 @@ class TestConv2dBf16Op(TestConv2dOp):
pass
def init_test_case(self):
TestConv2dOp.init_test_case(self)
TestConv2DOp.init_test_case(self)
self.input_size = [1, 1, 5, 5] # NCHW
f_c = self.input_size[1] // self.groups
self.input_residual_size = [1, 2, 3, 3]
@ -130,7 +130,7 @@ class TestConv2dBf16Op(TestConv2dOp):
self.fuse_residual = True
class TestConv2d(TestConv2dBf16Op):
class TestConv2D(TestConv2DBf16Op):
def init_test_case(self):
self.pad = [0, 0]
self.stride = [1, 1]
@ -144,19 +144,19 @@ class TestConv2d(TestConv2dBf16Op):
self.input_type = np.uint16
class TestWithPad(TestConv2d):
class TestWithPad(TestConv2D):
def init_test_case(self):
TestConv2d.init_test_case(self)
TestConv2D.init_test_case(self)
self.pad = [1, 1]
self.input_residual_size = [2, 6, 5, 5]
class TestWithGroup(TestConv2d):
class TestWithGroup(TestConv2D):
def init_group(self):
self.groups = 3
class TestWithStride(TestConv2dBf16Op):
class TestWithStride(TestConv2DBf16Op):
def init_test_case(self):
self.pad = [1, 1]
self.stride = [2, 2]
@ -170,7 +170,7 @@ class TestWithStride(TestConv2dBf16Op):
self.input_type = np.uint16
class TestWithDilations(TestConv2dBf16Op):
class TestWithDilations(TestConv2DBf16Op):
def init_test_case(self):
self.pad = [1, 1]
self.stride = [1, 1]
@ -185,7 +185,7 @@ class TestWithDilations(TestConv2dBf16Op):
self.input_type = np.uint16
class TestWith1x1ForceFP32Output(TestConv2dBf16Op):
class TestWith1x1ForceFP32Output(TestConv2DBf16Op):
def init_test_case(self):
self.pad = [0, 0]
self.stride = [1, 1]
@ -201,7 +201,7 @@ class TestWith1x1ForceFP32Output(TestConv2dBf16Op):
self.fuse_residual = False
class TestWithInput1x1Filter1x1(TestConv2dBf16Op):
class TestWithInput1x1Filter1x1(TestConv2DBf16Op):
def init_test_case(self):
self.pad = [0, 0]
self.stride = [1, 1]

@ -19,7 +19,7 @@ import numpy as np
import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.test_conv2d_op import conv2d_forward_naive, TestConv2dOp
from paddle.fluid.tests.unittests.test_conv2d_op import conv2d_forward_naive, TestConv2DOp
def conv2d_forward_refer(input, filter, group, conv_param):
@ -28,7 +28,7 @@ def conv2d_forward_refer(input, filter, group, conv_param):
return out
class TestConv2dInt8Op(TestConv2dOp):
class TestConv2DInt8Op(TestConv2DOp):
def setUp(self):
self.op_type = "conv2d"
self.use_cudnn = False
@ -162,7 +162,7 @@ class TestConv2dInt8Op(TestConv2dOp):
pass
def init_test_case(self):
TestConv2dOp.init_test_case(self)
TestConv2DOp.init_test_case(self)
self.input_size = [1, 1, 5, 5] # NCHW
f_c = self.input_size[1] // self.groups
self.input_residual_size = [1, 2, 3, 3]
@ -186,7 +186,7 @@ class TestConv2dInt8Op(TestConv2dOp):
#--------------------test conv2d u8 in and u8 out with residual fuse--------------------
class TestConv2d(TestConv2dInt8Op):
class TestConv2D(TestConv2DInt8Op):
def init_test_case(self):
self.pad = [0, 0]
self.stride = [1, 1]
@ -201,19 +201,19 @@ class TestConv2d(TestConv2dInt8Op):
self.scale_in_eltwise = 0.6
class TestWithPad(TestConv2d):
class TestWithPad(TestConv2D):
def init_test_case(self):
TestConv2d.init_test_case(self)
TestConv2D.init_test_case(self)
self.pad = [1, 1]
self.input_residual_size = [2, 6, 5, 5]
class TestWithGroup(TestConv2d):
class TestWithGroup(TestConv2D):
def init_group(self):
self.groups = 3
class TestWithStride(TestConv2dInt8Op):
class TestWithStride(TestConv2DInt8Op):
def init_test_case(self):
self.pad = [1, 1]
self.stride = [2, 2]
@ -228,7 +228,7 @@ class TestWithStride(TestConv2dInt8Op):
self.scale_in_eltwise = 0.5
class TestWithDilations(TestConv2dInt8Op):
class TestWithDilations(TestConv2DInt8Op):
def init_test_case(self):
self.pad = [1, 1]
self.stride = [1, 1]
@ -244,7 +244,7 @@ class TestWithDilations(TestConv2dInt8Op):
self.scale_in_eltwise = 0.5
class TestWith1x1(TestConv2dInt8Op):
class TestWith1x1(TestConv2DInt8Op):
def init_test_case(self):
self.pad = [0, 0]
self.stride = [1, 1]
@ -259,7 +259,7 @@ class TestWith1x1(TestConv2dInt8Op):
self.scale_in_eltwise = 0.5
class TestWithInput1x1Filter1x1(TestConv2dInt8Op):
class TestWithInput1x1Filter1x1(TestConv2DInt8Op):
def init_test_case(self):
self.pad = [0, 0]
self.stride = [1, 1]
@ -356,7 +356,7 @@ def create_test_int8_class(parent):
globals()[cls_name_u8s8_re_1] = TestU8S8ResCase
create_test_int8_class(TestConv2dInt8Op)
create_test_int8_class(TestConv2DInt8Op)
create_test_int8_class(TestWithPad)
create_test_int8_class(TestWithStride)
create_test_int8_class(TestWithDilations)
@ -365,7 +365,7 @@ create_test_int8_class(TestWith1x1)
create_test_int8_class(TestWithInput1x1Filter1x1)
class TestConv2dOp_AsyPadding_INT_MKLDNN(TestConv2dInt8Op):
class TestConv2DOp_AsyPadding_INT_MKLDNN(TestConv2DInt8Op):
def init_kernel_type(self):
self.use_mkldnn = True
@ -374,13 +374,13 @@ class TestConv2dOp_AsyPadding_INT_MKLDNN(TestConv2dInt8Op):
self.padding_algorithm = "EXPLICIT"
class TestConv2dOp_Same_INT_MKLDNN(TestConv2dOp_AsyPadding_INT_MKLDNN):
class TestConv2DOp_Same_INT_MKLDNN(TestConv2DOp_AsyPadding_INT_MKLDNN):
def init_paddings(self):
self.pad = [0, 0]
self.padding_algorithm = "SAME"
class TestConv2dOp_Valid_INT_MKLDNN(TestConv2dOp_AsyPadding_INT_MKLDNN):
class TestConv2DOp_Valid_INT_MKLDNN(TestConv2DOp_AsyPadding_INT_MKLDNN):
def init_paddings(self):
self.pad = [1, 1]
self.padding_algorithm = "VALID"

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save