clean redundant API alias in 2.0 - part 1 (#29928)
* rm check_import_scipy, rm chunk_eval and mean_iou in paddle.metric.__init__.py * Revert "rm check_import_scipy, rm chunk_eval and mean_iou in paddle.metric.__init__.py" This reverts commit 179ba8c2b22bc31fe8d8a126e31820792cbd0f4e. * delete paddle.metric.chunk_eval and paddle.metric.mean_iou * delete paddle.nn.clip and paddle.nn.clip_by_norm * delete paddle.nn.functional.activation.hard_sigmoid and paddle.nn.functional.activation.hard_swish * delete paddle.nn.Pool2D, paddle.nn.BilinearTensorProduct, paddle.nn.RowConv, paddle.nn.functional.row_conv * fix extension import error * fix unittest for row_conv and Pool2Drevert-31562-mean
parent
181ea1870b
commit
726c78f293
@ -1,131 +0,0 @@
|
||||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import numpy as np
|
||||
from paddle import fluid, nn
|
||||
import paddle.fluid.dygraph as dg
|
||||
import paddle.fluid.initializer as I
|
||||
import paddle.nn.functional as F
|
||||
import unittest
|
||||
|
||||
|
||||
class RowConvTestCase(unittest.TestCase):
|
||||
def __init__(self,
|
||||
methodName='runTest',
|
||||
batch_size=4,
|
||||
num_channels=8,
|
||||
time_steps=12,
|
||||
context_size=3,
|
||||
act=None,
|
||||
dtype="float32"):
|
||||
super(RowConvTestCase, self).__init__(methodName=methodName)
|
||||
self.batch_size = batch_size
|
||||
self.num_channels = num_channels
|
||||
self.time_steps = time_steps
|
||||
self.context_size = context_size
|
||||
self.act = act
|
||||
self.dtype = dtype
|
||||
|
||||
def setUp(self):
|
||||
input_shape = (self.batch_size, self.time_steps, self.num_channels)
|
||||
self.input = np.random.uniform(size=input_shape).astype(self.dtype)
|
||||
self.weight_shape = weight_shape = (self.context_size + 1,
|
||||
self.num_channels)
|
||||
self.weight = np.random.uniform(size=weight_shape).astype(self.dtype)
|
||||
|
||||
def fluid_layer(self, place):
|
||||
main = fluid.Program()
|
||||
start = fluid.Program()
|
||||
with fluid.unique_name.guard():
|
||||
with fluid.program_guard(main, start):
|
||||
x = fluid.data(
|
||||
"input", [-1, -1, self.num_channels], dtype=self.dtype)
|
||||
y = fluid.layers.row_conv(
|
||||
x,
|
||||
self.context_size,
|
||||
param_attr=I.NumpyArrayInitializer(self.weight),
|
||||
act=self.act)
|
||||
exe = fluid.Executor(place)
|
||||
exe.run(start)
|
||||
y_np, = exe.run(main, feed={"input": self.input}, fetch_list=[y])
|
||||
return y_np
|
||||
|
||||
def functional_declarative(self, place):
|
||||
main = fluid.Program()
|
||||
start = fluid.Program()
|
||||
with fluid.unique_name.guard():
|
||||
with fluid.program_guard(main, start):
|
||||
x = fluid.data(
|
||||
"input", [-1, -1, self.num_channels], dtype=self.dtype)
|
||||
w = fluid.data("weight", self.weight_shape, dtype=self.dtype)
|
||||
y = F.extension.row_conv(x, w, act=self.act)
|
||||
exe = fluid.Executor(place)
|
||||
exe.run(start)
|
||||
y_np, = exe.run(main,
|
||||
feed={"input": self.input,
|
||||
"weight": self.weight},
|
||||
fetch_list=[y])
|
||||
return y_np
|
||||
|
||||
def functional_imperative(self, place):
|
||||
with dg.guard(place):
|
||||
x_var = dg.to_variable(self.input)
|
||||
w_var = dg.to_variable(self.weight)
|
||||
y_var = F.extension.row_conv(x_var, w_var, act=self.act)
|
||||
y_np = y_var.numpy()
|
||||
return y_np
|
||||
|
||||
def nn_layer(self, place):
|
||||
with dg.guard(place):
|
||||
x_var = dg.to_variable(self.input)
|
||||
conv = nn.RowConv(
|
||||
self.num_channels,
|
||||
self.context_size,
|
||||
param_attr=I.NumpyArrayInitializer(self.weight),
|
||||
act=self.act,
|
||||
dtype=self.dtype)
|
||||
y_var = conv(x_var)
|
||||
y_np = y_var.numpy()
|
||||
return y_np
|
||||
|
||||
def _test_equivalence(self, place):
|
||||
result1 = self.fluid_layer(place)
|
||||
result2 = self.functional_declarative(place)
|
||||
result3 = self.functional_imperative(place)
|
||||
result4 = self.nn_layer(place)
|
||||
np.testing.assert_array_almost_equal(result1, result2)
|
||||
np.testing.assert_array_almost_equal(result2, result3)
|
||||
np.testing.assert_array_almost_equal(result3, result4)
|
||||
|
||||
def runTest(self):
|
||||
place = fluid.CPUPlace()
|
||||
self._test_equivalence(place)
|
||||
|
||||
if fluid.core.is_compiled_with_cuda():
|
||||
palce = fluid.CUDAPlace(0)
|
||||
self._test_equivalence(place)
|
||||
|
||||
|
||||
def load_tests(loader, standard_tests, pattern):
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(RowConvTestCase(methodName="runTest"))
|
||||
suite.addTest(RowConvTestCase(methodName="runTest", act="sigmoid"))
|
||||
suite.addTest(
|
||||
RowConvTestCase(
|
||||
methodName="runTest", context_size=5, act="sigmoid"))
|
||||
return suite
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
@ -1,99 +0,0 @@
|
||||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__all__ = ['RowConv']
|
||||
|
||||
from ...fluid.dygraph import layers
|
||||
from .. import functional as F
|
||||
|
||||
|
||||
class RowConv(layers.Layer):
|
||||
"""
|
||||
**Row-convolution operator**
|
||||
|
||||
The row convolution is called lookahead convolution. This operator was
|
||||
introduced in the following paper for
|
||||
`DeepSpeech2 <http://www.cs.cmu.edu/~dyogatam/papers/wang+etal.iclrworkshop2016.pdf>`_.
|
||||
|
||||
The main motivation is that a bidirectional RNN, useful in DeepSpeech like
|
||||
speech models, learns representation for a sequence by performing a
|
||||
forward and a backward pass through the entire sequence. However, unlike
|
||||
unidirectional RNNs, bidirectional RNNs are challenging to deploy in an online
|
||||
and low-latency setting. The lookahead convolution incorporates information
|
||||
from future subsequences in a computationally efficient manner to improve
|
||||
unidirectional recurrent neural networks. The row convolution operator is
|
||||
different from the 1D sequence convolution, and is computed as follows:
|
||||
|
||||
Given an input sequence X of length t and input dimension D, and a filter
|
||||
(W) of size context * D.
|
||||
|
||||
More details about row_conv please refer to the design document
|
||||
`<https://github.com/PaddlePaddle/Paddle/issues/2228#issuecomment-303903645>`_ .
|
||||
|
||||
Parameters:
|
||||
num_channels (int): input data's feature size.
|
||||
future_context_size (int): Future context size. Please note, the shape
|
||||
of convolution kernel is [future_context_size + 1, D].
|
||||
param_attr (ParamAttr): Attributes of parameters, including
|
||||
name, initializer etc. Default: None.
|
||||
act (str): Non-linear activation to be applied to output tensor. Default: None.
|
||||
dtype (str, optional): Data type, it can be "float32". Default: "float32".
|
||||
|
||||
Attributes:
|
||||
weight (Parameter): shape [future_context_size + 1, D], the learnable
|
||||
weight (convolution kernel) of this layer.
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
Examples:
|
||||
.. code-block:: python
|
||||
|
||||
from paddle import nn
|
||||
import paddle.nn.functional as F
|
||||
import numpy as np
|
||||
|
||||
batch_size = 4
|
||||
time_steps = 8
|
||||
feature_size = 6
|
||||
context_size = 4
|
||||
|
||||
x = np.random.randn(batch_size, time_steps, feature_size).astype(np.float32)
|
||||
|
||||
x = paddle.to_tensor(x)
|
||||
conv = nn.RowConv(feature_size, context_size)
|
||||
y = conv(x)
|
||||
print(y.shape)
|
||||
|
||||
# [4, 8, 6]
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
num_channels,
|
||||
future_context_size,
|
||||
param_attr=None,
|
||||
act=None,
|
||||
dtype="float32"):
|
||||
super(RowConv, self).__init__()
|
||||
self._dtype = dtype
|
||||
self._param_attr = param_attr
|
||||
self._act = act
|
||||
|
||||
filter_shape = [future_context_size + 1, num_channels]
|
||||
self.weight = self.create_parameter(
|
||||
filter_shape, attr=param_attr, dtype=dtype)
|
||||
|
||||
def forward(self, input):
|
||||
out = F.extension.row_conv(input, self.weight, act=self._act)
|
||||
return out
|
Loading…
Reference in new issue