Release 2.0rc cherry pick api rename #28108 (#28184)

* rename count_include_pad-->exclusive  return_indices-->return_mask

* remove track_running_stats

* fix typo.

* rename xxxd-->xxxxD

* solve conflicts
release/2.0-rc
cnn 5 years ago committed by GitHub
parent b04c55ef0f
commit 7bfd799d00
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -63,10 +63,7 @@ class TestLayer(fluid.dygraph.Layer):
bias_attr=False) bias_attr=False)
self._sync_batch_norm2 = SyncBatchNorm( self._sync_batch_norm2 = SyncBatchNorm(
num_filters, num_filters, weight_attr=False, bias_attr=False)
weight_attr=False,
bias_attr=False,
track_running_stats=False)
def forward(self, inputs): def forward(self, inputs):
y = self._conv(inputs) y = self._conv(inputs)

@ -150,7 +150,7 @@ class TestAdaptiveMaxPool2DAPI(unittest.TestCase):
x = paddle.to_tensor(self.x_np) x = paddle.to_tensor(self.x_np)
out_1 = paddle.nn.functional.adaptive_max_pool2d( out_1 = paddle.nn.functional.adaptive_max_pool2d(
x=x, return_indices=False, output_size=[3, 3]) x=x, return_mask=False, output_size=[3, 3])
out_2 = paddle.nn.functional.adaptive_max_pool2d(x=x, output_size=5) out_2 = paddle.nn.functional.adaptive_max_pool2d(x=x, output_size=5)

@ -148,11 +148,7 @@ class TestPool1D_API(unittest.TestCase):
input_np = np.random.random([2, 3, 32]).astype("float32") input_np = np.random.random([2, 3, 32]).astype("float32")
input = fluid.dygraph.to_variable(input_np) input = fluid.dygraph.to_variable(input_np)
result = F.avg_pool1d( result = F.avg_pool1d(
input, input, kernel_size=2, stride=2, padding=[1], exclusive=True)
kernel_size=2,
stride=2,
padding=[1],
count_include_pad=True)
result_np = avg_pool1D_forward_naive( result_np = avg_pool1D_forward_naive(
input_np, ksize=[2], strides=[2], paddings=[1], exclusive=False) input_np, ksize=[2], strides=[2], paddings=[1], exclusive=False)
@ -160,7 +156,8 @@ class TestPool1D_API(unittest.TestCase):
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
avg_pool1d_dg = paddle.nn.AvgPool1D( avg_pool1d_dg = paddle.nn.AvgPool1D(
kernel_size=2, stride=None, padding=1, count_include_pad=True) kernel_size=2, stride=None, padding=1, exclusive=True)
result = avg_pool1d_dg(input) result = avg_pool1d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
@ -200,7 +197,7 @@ class TestPool1D_API(unittest.TestCase):
input_np = np.random.random([2, 3, 32]).astype("float32") input_np = np.random.random([2, 3, 32]).astype("float32")
input = fluid.dygraph.to_variable(input_np) input = fluid.dygraph.to_variable(input_np)
result, index = F.max_pool1d( result, index = F.max_pool1d(
input, kernel_size=2, stride=2, padding=0, return_indices=True) input, kernel_size=2, stride=2, padding=0, return_mask=True)
result_np = max_pool1D_forward_naive( result_np = max_pool1D_forward_naive(
input_np, ksize=[2], strides=[2], paddings=[0]) input_np, ksize=[2], strides=[2], paddings=[0])

@ -134,7 +134,7 @@ class TestPool2D_API(unittest.TestCase):
input_np = np.random.random([2, 3, 32, 32]).astype("float32") input_np = np.random.random([2, 3, 32, 32]).astype("float32")
input = fluid.dygraph.to_variable(input_np) input = fluid.dygraph.to_variable(input_np)
result = max_pool2d( result = max_pool2d(
input, kernel_size=2, stride=2, padding=0, return_indices=False) input, kernel_size=2, stride=2, padding=0, return_mask=False)
result_np = pool2D_forward_naive( result_np = pool2D_forward_naive(
input_np, input_np,
@ -159,7 +159,7 @@ class TestPool2D_API(unittest.TestCase):
kernel_size=2, kernel_size=2,
stride=2, stride=2,
padding=0, padding=0,
return_indices=False, return_mask=False,
data_format="NHWC") data_format="NHWC")
result_np = pool2D_forward_naive( result_np = pool2D_forward_naive(
@ -222,7 +222,7 @@ class TestPool2D_API(unittest.TestCase):
kernel_size=2, kernel_size=2,
stride=None, stride=None,
padding="SAME", padding="SAME",
return_indices=True) return_mask=True)
result_np = pool2D_forward_naive( result_np = pool2D_forward_naive(
input_np, input_np,
@ -269,7 +269,7 @@ class TestPool2D_API(unittest.TestCase):
kernel_size=2, kernel_size=2,
stride=2, stride=2,
padding=padding, padding=padding,
return_indices=False) return_mask=False)
result_np = pool2D_forward_naive( result_np = pool2D_forward_naive(
input_np, input_np,
@ -490,7 +490,7 @@ class TestPool2DError_API(unittest.TestCase):
padding=0, padding=0,
ceil_mode=False, ceil_mode=False,
data_format='NHWC', data_format='NHWC',
return_indices=True) return_mask=True)
self.assertRaises(ValueError, run9) self.assertRaises(ValueError, run9)

@ -83,7 +83,7 @@ class TestPool3D_API(unittest.TestCase):
stride=2, stride=2,
padding=1, padding=1,
ceil_mode=False, ceil_mode=False,
count_include_pad=True) exclusive=True)
result_np = avg_pool3D_forward_naive( result_np = avg_pool3D_forward_naive(
input_np, input_np,
@ -100,7 +100,7 @@ class TestPool3D_API(unittest.TestCase):
stride=None, stride=None,
padding=1, padding=1,
ceil_mode=False, ceil_mode=False,
count_include_pad=True) exclusive=True)
result = avg_pool3d_dg(input) result = avg_pool3d_dg(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) self.assertTrue(np.allclose(result.numpy(), result_np))
@ -175,7 +175,7 @@ class TestPool3D_API(unittest.TestCase):
stride=2, stride=2,
padding=0, padding=0,
data_format="NDHWC", data_format="NDHWC",
return_indices=False) return_mask=False)
result_np = pool3D_forward_naive( result_np = pool3D_forward_naive(
input_np, input_np,
@ -239,7 +239,7 @@ class TestPool3D_API(unittest.TestCase):
kernel_size=2, kernel_size=2,
stride=None, stride=None,
padding="SAME", padding="SAME",
return_indices=True) return_mask=True)
result_np = pool3D_forward_naive( result_np = pool3D_forward_naive(
input_np, input_np,
@ -467,7 +467,7 @@ class TestPool3DError_API(unittest.TestCase):
stride=2, stride=2,
padding=0, padding=0,
data_format='NDHWC', data_format='NDHWC',
return_indices=True) return_mask=True)
self.assertRaises(ValueError, run10) self.assertRaises(ValueError, run10)

File diff suppressed because it is too large Load Diff

@ -73,7 +73,6 @@ class _InstanceNormBase(layers.Layer):
momentum=0.9, momentum=0.9,
weight_attr=None, weight_attr=None,
bias_attr=None, bias_attr=None,
track_running_stats=False,
data_format="NCHW", data_format="NCHW",
name=None): name=None):
super(_InstanceNormBase, self).__init__() super(_InstanceNormBase, self).__init__()
@ -135,9 +134,6 @@ class InstanceNorm1D(_InstanceNormBase):
epsilon(float, optional): A value added to the denominator for epsilon(float, optional): A value added to the denominator for
numerical stability. Default is 1e-5. numerical stability. Default is 1e-5.
momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9. momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
track_running_stats(bool, optional): Whether to use global mean and
variance. In train mode, when setting track_running_stats True, the global mean
and variance are also used during train period. Default: False.
weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale` weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
will create ParamAttr as weight_attr, the name of scale can be set in ParamAttr. will create ParamAttr as weight_attr, the name of scale can be set in ParamAttr.
@ -159,9 +155,6 @@ class InstanceNorm1D(_InstanceNormBase):
Returns: Returns:
None. None.
**Note**:
Momentum and track_running_stats is not effective. The next version will fix the problem .
Examples: Examples:
@ -214,9 +207,6 @@ class InstanceNorm2D(_InstanceNormBase):
epsilon(float, optional): A value added to the denominator for epsilon(float, optional): A value added to the denominator for
numerical stability. Default is 1e-5. numerical stability. Default is 1e-5.
momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9. momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
track_running_stats(bool, optional): Whether to use global mean and
variance. In train mode, when setting track_running_stats True, the global mean
and variance are also used during train period. Default: False.
weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale` weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
will create ParamAttr as weight_attr, the name of scale can be set in ParamAttr. will create ParamAttr as weight_attr, the name of scale can be set in ParamAttr.
@ -237,8 +227,6 @@ class InstanceNorm2D(_InstanceNormBase):
Returns: Returns:
None. None.
**Note**:
Momentum and track_running_stats is not effective. The next version will fix the problem .
Examples: Examples:
@ -290,9 +278,6 @@ class InstanceNorm3D(_InstanceNormBase):
epsilon(float, optional): A value added to the denominator for epsilon(float, optional): A value added to the denominator for
numerical stability. Default is 1e-5. numerical stability. Default is 1e-5.
momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9. momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
track_running_stats(bool, optional): Whether to use global mean and
variance. In train mode, when setting track_running_stats True, the global mean
and variance are also used during train period. Default: False.
weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale` weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
will create ParamAttr as weight_attr, the name of scale can be set in ParamAttr. will create ParamAttr as weight_attr, the name of scale can be set in ParamAttr.
@ -313,8 +298,6 @@ class InstanceNorm3D(_InstanceNormBase):
Returns: Returns:
None. None.
**Note**:
Momentum and track_running_stats is not effective. The next version will fix the problem .
Examples: Examples:
@ -570,7 +553,6 @@ class _BatchNormBase(layers.Layer):
weight_attr=None, weight_attr=None,
bias_attr=None, bias_attr=None,
data_format='NCHW', data_format='NCHW',
track_running_stats=True,
name=None): name=None):
super(_BatchNormBase, self).__init__() super(_BatchNormBase, self).__init__()
self._num_features = num_features self._num_features = num_features
@ -636,7 +618,6 @@ class _BatchNormBase(layers.Layer):
self._momentum = momentum self._momentum = momentum
self._epsilon = epsilon self._epsilon = epsilon
self._fuse_with_relu = False self._fuse_with_relu = False
self._track_running_stats = track_running_stats
self._name = name self._name = name
def _check_input_dim(self, input): def _check_input_dim(self, input):
@ -651,11 +632,7 @@ class _BatchNormBase(layers.Layer):
self._check_input_dim(input) self._check_input_dim(input)
if not self.training and not self._track_running_stats: if self.training:
raise ValueError(
'When inference, expected track_running_stats is True.')
if self.training and not self._track_running_stats:
warnings.warn( warnings.warn(
"When training, we now always track global mean and variance.") "When training, we now always track global mean and variance.")
@ -720,9 +697,6 @@ class BatchNorm1D(_BatchNormBase):
will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable. will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable.
If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None.
data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC". Defalut "NCL". data_format(str, optional): Specify the input data format, may be "NC", "NCL" or "NLC". Defalut "NCL".
track_running_stats(bool, optional): Whether to use global mean and variance. In train period,
True will track global mean and variance used for inference. When inference, track_running_stats must be
True. Default: True.
name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`.. name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..
Shape: Shape:
@ -733,9 +707,6 @@ class BatchNorm1D(_BatchNormBase):
Returns: Returns:
None. None.
**Note**:
Now track_running_stats is actucal always true. The next version will fix the problem .
Examples: Examples:
.. code-block:: python .. code-block:: python
@ -817,9 +788,6 @@ class BatchNorm2D(_BatchNormBase):
will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable. will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable.
If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None.
data_format(str, optional): Specify the input data format, the data format can be "NCHW" or "NHWC". Default: NCHW. data_format(str, optional): Specify the input data format, the data format can be "NCHW" or "NHWC". Default: NCHW.
track_running_stats(bool, optional): Whether to use global mean and variance. In train period,
True will track global mean and variance used for inference. When inference, track_running_stats must be
True. Default: True.
name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`.. name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..
Shape: Shape:
@ -830,9 +798,6 @@ class BatchNorm2D(_BatchNormBase):
Returns: Returns:
None None
**Note**:
Now track_running_stats is actucal always true. The next version will fix the problem .
Examples: Examples:
.. code-block:: python .. code-block:: python
@ -912,9 +877,6 @@ class BatchNorm3D(_BatchNormBase):
will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable. will create ParamAttr as bias_attr. If it is set to Fasle, the weight is not learnable.
If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None.
data_format(str, optional): Specify the input data format, the data format can be "NCDHW" or "NDHWC. Default: NCDHW. data_format(str, optional): Specify the input data format, the data format can be "NCDHW" or "NDHWC. Default: NCDHW.
track_running_stats(bool, optional): Whether to use global mean and variance. In train period,
True will track global mean and variance used for inference. When inference, track_running_stats must be
True. Default: True.
name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`.. name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..
Shape: Shape:
@ -925,9 +887,6 @@ class BatchNorm3D(_BatchNormBase):
Returns: Returns:
None None
**Note**:
Now track_running_stats is actucal always true. The next version will fix the problem .
Examples: Examples:
.. code-block:: python .. code-block:: python
@ -1024,8 +983,6 @@ class SyncBatchNorm(_BatchNormBase):
will create ParamAttr as bias_attr. If the Initializer of the bias_attr will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. If it is set to False, this layer will not is not set, the bias is initialized zero. If it is set to False, this layer will not
have trainable bias parameter. Default: None. have trainable bias parameter. Default: None.
track_running_stats(bool, optional): Whether to compute global stats, which including running mean and
running variance. Default: True.
Shapes: Shapes:
input: Tensor that the dimension from 2 to 5. input: Tensor that the dimension from 2 to 5.
@ -1055,11 +1012,10 @@ class SyncBatchNorm(_BatchNormBase):
weight_attr=None, weight_attr=None,
bias_attr=None, bias_attr=None,
data_format='NCHW', data_format='NCHW',
track_running_stats=True,
name=None): name=None):
super(SyncBatchNorm, super(SyncBatchNorm,
self).__init__(num_features, momentum, epsilon, weight_attr, self).__init__(num_features, momentum, epsilon, weight_attr,
bias_attr, data_format, track_running_stats, name) bias_attr, data_format, name)
def forward(self, x): def forward(self, x):
# create output # create output
@ -1147,10 +1103,10 @@ class SyncBatchNorm(_BatchNormBase):
""" """
layer_output = layer layer_output = layer
if isinstance(layer, _BatchNormBase): if isinstance(layer, _BatchNormBase):
layer_output = SyncBatchNorm( layer_output = SyncBatchNorm(layer._num_features, layer._momentum,
layer._num_features, layer._momentum, layer._epsilon, layer._epsilon, layer._weight_attr,
layer._weight_attr, layer._bias_attr, layer._data_format, layer._bias_attr, layer._data_format,
layer._track_running_stats, layer._name) layer._name)
if layer._weight_attr != False and layer._bias_attr != False: if layer._weight_attr != False and layer._bias_attr != False:
with no_grad(): with no_grad():

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save