Merge pull request #15470 from JiabinYang/feature/imperative

Add simple RNN in imperative
revert-15470-feature/imperative
Jiabin Yang 6 years ago committed by GitHub
commit 075df09f86
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -31,6 +31,7 @@ void CreateGradOp(const framework::OpDesc& op_desc,
framework::OpInfoMap::Instance()
.Get(op_desc.Type())
.GradOpMaker()(op_desc, no_grad_set, grad_to_var, grad_sub_block);
for (auto& desc : descs) {
grad_op_descs->emplace_back(desc.release());
}

@ -13,7 +13,9 @@
// limitations under the License.
#pragma once
#include <string>
#include <utility>
#include <vector>
#include "paddle/fluid/inference/analysis/analysis_pass.h"
#include "paddle/fluid/platform/port.h"

@ -1,4 +1,4 @@
cc_library(benchmark SRCS benchmark.cc DEPS enforce)
cc_test(test_benchmark SRCS benchmark_tester.cc DEPS benchmark)
cc_binary(visualizer SRCS visualizer.cc DEPS analysis
paddle_pass_builder ir_pass_manager pass graph_viz_pass analysis_passes)
#cc_binary(visualizer SRCS visualizer.cc DEPS analysis
# paddle_pass_builder ir_pass_manager pass graph_viz_pass analysis_passes)

@ -22,13 +22,7 @@ from . import layers
from ..framework import Variable, OpProtoHolder
from ..param_attr import ParamAttr
from ..initializer import Normal, Constant
__all__ = [
'Conv2D',
'Pool2D',
'FC',
'BatchNorm',
]
__all__ = ['Conv2D', 'Pool2D', 'FC', 'BatchNorm', 'Embedding']
class Conv2D(layers.Layer):
@ -414,3 +408,91 @@ class BatchNorm(layers.Layer):
# Currently, we don't support inplace in imperative mode
return self._helper.append_activation(batch_norm_out)
class Embedding(layers.Layer):
"""
**Embedding Layer**
This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
a lookup table. The result of this lookup is the embedding of each ID in the
:attr:`input`.
All the input variables are passed in as local variables to the LayerHelper
constructor.
Args:
size(tuple|list): The shape of the look up table parameter. It should
have two elements which indicate the size of the dictionary of
embeddings and the size of each embedding vector respectively.
is_sparse(bool): The flag indicating whether to use sparse update.
is_distributed(bool): Whether to run lookup table from remote parameter server.
padding_idx(int|long|None): If :attr:`None`, it makes no effect to lookup.
Otherwise the given :attr:`padding_idx` indicates padding the output
with zeros whenever lookup encounters it in :attr:`input`. If
:math:`padding_idx < 0`, the :attr:`padding_idx` to use in lookup is
:math:`size[0] + dim`.
param_attr(ParamAttr): Parameters for this layer
dtype(np.dtype|core.VarDesc.VarType|str): The type of data : float32, float_16, int etc
Returns:
Variable: The tensor variable storing the embeddings of the \
supplied inputs.
Examples:
.. code-block:: python
dict_size = len(dataset.ids)
input = fluid.layers.data(name='ids', shape=[32, 32], dtype='float32')
embedding = fluid.imperative.Embedding(size=[dict_size, 16])
fc = embedding(input)
"""
def __init__(self,
size,
is_sparse=False,
is_distributed=False,
padding_idx=None,
param_attr=None,
dtype='float32'):
super(Embedding, self).__init__()
self._size = size
self._is_sparse = is_sparse
self._is_distributed = is_distributed
self._padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else (
size[0] + padding_idx)
self._param_attr = param_attr
self._dtype = dtype
self._remote_prefetch = self._is_sparse and (not self._is_distributed)
if self._remote_prefetch:
assert self._is_sparse is True and self._is_distributed is False
from ..layer_helper import LayerHelper
self._helper = LayerHelper('embedding', param_attr=param_attr)
self._w = self._helper.create_parameter(
attr=self._param_attr,
shape=self._size,
dtype=self._dtype,
is_bias=False)
def parameters(self):
return [self._w]
def forward(self, input):
out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type='lookup_table',
inputs={'Ids': input,
'W': self._w},
outputs={'Out': out},
attrs={
'is_sparse': self._is_sparse,
'is_distributed': self._is_distributed,
'remote_prefetch': self._remote_prefetch,
'padding_idx': self._padding_idx
})
return out

@ -66,6 +66,128 @@ class MLP(fluid.imperative.Layer):
return x
class SimpleRNNCell(fluid.imperative.Layer):
def __init__(self, step_input_size, hidden_size, output_size, param_attr):
super(SimpleRNNCell, self).__init__()
self.step_input_size = step_input_size
self.hidden_size = hidden_size
self.output_size = output_size
self._dype = core.VarDesc.VarType.FP32
from paddle.fluid.layer_helper import LayerHelper
self._helper = LayerHelper(
'SimpleRNNCell', act="tanh", param_attr=param_attr)
def _build_once(self, inputs, pre_hidden):
i2h_param_shape = [self.step_input_size, self.hidden_size]
h2h_param_shape = [self.hidden_size, self.hidden_size]
h2o_param_shape = [self.output_size, self.hidden_size]
self._i2h_w = self._helper.create_parameter(
attr=self._helper.param_attr,
shape=i2h_param_shape,
dtype=self._dtype,
is_bias=False)
self._h2h_w = self._helper.create_parameter(
attr=self._helper.param_attr,
shape=h2h_param_shape,
dtype=self._dtype,
is_bias=False)
self._h2o_w = self._helper.create_parameter(
attr=self._helper.param_attr,
shape=h2o_param_shape,
dtype=self._dtype,
is_bias=False)
def forward(self, input, pre_hidden):
tmp_i2h = self._helper.create_variable_for_type_inference(self._dtype)
tmp_h2h = self._helper.create_variable_for_type_inference(self._dtype)
hidden = self._helper.create_variable_for_type_inference(self._dype)
out = self._helper.create_variable_for_type_inference(self._dype)
softmax_out = self._helper.create_variable_for_type_inference(
self._dtype)
reduce_out = self._helper.create_variable_for_type_inference(
self._dtype)
self._helper.append_op(
type="mul",
inputs={"X": input,
"Y": self._i2h_w},
outputs={"Out": tmp_i2h},
attrs={"x_num_col_dims": 1,
"y_num_col_dims": 1})
self._helper.append_op(
type="mul",
inputs={"X": pre_hidden,
"Y": self._h2h_w},
outputs={"Out": tmp_h2h},
attrs={"x_num_col_dims": 1,
"y_num_col_dims": 1})
self._helper.append_op(
type="elementwise_add",
inputs={'X': tmp_h2h,
'Y': tmp_i2h},
outputs={'Out': hidden},
attrs={'axis': -1,
'use_mkldnn': False})
hidden = self._helper.append_activation(hidden)
self._helper.append_op(
type="mul",
inputs={"X": hidden,
"Y": self._h2o_w},
outputs={"Out": out},
attrs={"x_num_col_dims": 1,
"y_num_col_dims": 1})
self._helper.append_op(
type="softmax",
inputs={"X": out},
outputs={"Out": softmax_out},
attrs={"use_cudnn": False})
self._helper.append_op(
type='reduce_sum',
inputs={'X': softmax_out},
outputs={'Out': reduce_out},
attrs={'dim': None,
'keep_dim': False,
'reduce_all': True})
return reduce_out, hidden
class SimpleRNN(fluid.imperative.Layer):
def __init__(self):
super(SimpleRNN, self).__init__()
self.seq_len = 4
self._cell = SimpleRNNCell(
3,
3,
3,
fluid.ParamAttr(initializer=fluid.initializer.Constant(value=0.1)))
def forward(self, inputs):
outs = list()
pre_hiddens = list()
init_hidden = fluid.layers.tensor.create_parameter(
attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.1)),
shape=[1, 3],
dtype='float32',
is_bias=False)
pre_hidden = init_hidden
for i in range(self.seq_len):
input = fluid.layers.slice(
inputs, axes=[1], starts=[i], ends=[i + 1])
input = fluid.layers.reshape(input, shape=[1, 3])
out_softmax, pre_hidden = self._cell(input, pre_hidden)
outs.append(out_softmax)
return outs, pre_hiddens
class TestImperative(unittest.TestCase):
def test_sum_op(self):
x = np.ones([2, 2], np.float32)
@ -211,6 +333,41 @@ class TestImperative(unittest.TestCase):
self.assertTrue(np.allclose(dy_out, static_out))
self.assertTrue(np.allclose(dy_grad, static_grad))
def test_rnn(self):
np_inp = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0],
[10.0, 11.0, 12.0]])
np_inp = np_inp.reshape((1, 4, 3))
np_inp = np_inp.astype(np.float32)
with fluid.imperative.guard():
var_inp = fluid.imperative.base.to_variable(np_inp)
var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3])
simple_rnn = SimpleRNN()
outs, pre_hiddens = simple_rnn.forward(var_inp)
dy_out = outs[3]._numpy()
outs[3]._backward()
dy_grad_h2o = simple_rnn._cell._h2o_w._gradient()
dy_grad_h2h = simple_rnn._cell._h2h_w._gradient()
dy_grad_i2h = simple_rnn._cell._i2h_w._gradient()
with new_program_scope():
inp = fluid.layers.data(
name="inp", shape=[1, 4, 3], append_batch_size=False)
simple_rnn = SimpleRNN()
outs, pre_hiddens = simple_rnn(inp)
param_grads = fluid.backward.append_backward(outs[3])
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
static_out, static_grad_h2o, static_grad_h2h, static_grad_i2h = exe.run(
feed={inp.name: np_inp},
fetch_list=[
outs[3].name, param_grads[0][1].name,
param_grads[1][1].name, param_grads[2][1].name
])
self.assertTrue(np.allclose(dy_out, static_out))
self.assertTrue(np.allclose(dy_grad_h2o, static_grad_h2o))
self.assertTrue(np.allclose(dy_grad_h2h, static_grad_h2h))
self.assertTrue(np.allclose(dy_grad_i2h, static_grad_i2h))
if __name__ == '__main__':
unittest.main()

Loading…
Cancel
Save