revert-15470-feature/imperative
JiabinYang 6 years ago
parent af1cee5a35
commit e686818aed

@ -28,6 +28,8 @@ void CreateGradOp(const framework::OpDesc& op_desc,
.GradOpMaker()(op_desc, no_grad_set, grad_to_var, grad_sub_block);
PADDLE_ENFORCE(grad_op_descs.size() == 1, "Only support 1 grad op now.");
// TODO(panyx0718): Leak?
// TODO(marsyang1993): Change grad_op_desc pointer to
// vector<framework::OpDesc*> to allow multi grad_op
*grad_op_desc = grad_op_descs[0].release();
}

@ -23,11 +23,7 @@ from ..framework import Variable, OpProtoHolder
from ..param_attr import ParamAttr
from ..initializer import Normal, Constant
__all__ = [
'Conv2D',
'Pool2D',
'FC',
]
__all__ = ['Conv2D', 'Pool2D', 'FC', 'SimpleRNNCell']
class Conv2D(layers.Layer):
@ -251,14 +247,9 @@ class FC(layers.Layer):
class SimpleRNNCell(layers.Layer):
def __init__(self,
step_input_size,
hidden_size,
output_size,
param_attr,
dtype=core.VarDesc.VarType.FP32):
def __init__(self, step_input_size, hidden_size, output_size, param_attr):
super(SimpleRNNCell, self).__init__()
self.input_size = step_input_size
self.step_input_size = step_input_size
self.hidden_size = hidden_size
self.output_size = output_size
self._dype = core.VarDesc.VarType.FP32
@ -266,7 +257,7 @@ class SimpleRNNCell(layers.Layer):
self._helper = LayerHelper(
'SimpleRNNCell', act="tanh", param_attr=param_attr)
def _build_once(self, inputs):
def _build_once(self, inputs, pre_hidden):
i2h_param_shape = [self.step_input_size, self.hidden_size]
h2h_param_shape = [self.hidden_size, self.hidden_size]
h2o_param_shape = [self.output_size, self.hidden_size]
@ -294,6 +285,7 @@ class SimpleRNNCell(layers.Layer):
out = self._helper.create_variable_for_type_inference(self._dype)
softmax_out = self._helper.create_variable_for_type_inference(
self._dtype)
self._helper.append_op(
type="mul",
inputs={"X": input,
@ -301,7 +293,7 @@ class SimpleRNNCell(layers.Layer):
outputs={"Out": tmp_i2h},
attrs={"x_num_col_dims": 1,
"y_num_col_dims": 1})
print("mul op 1")
self._helper.append_op(
type="mul",
inputs={"X": pre_hidden,
@ -309,15 +301,45 @@ class SimpleRNNCell(layers.Layer):
outputs={"Out": tmp_h2h},
attrs={"x_num_col_dims": 1,
"y_num_col_dims": 1})
print("mul op 2")
self._helper.append_op(
type='sum',
inputs={'X': [tmp_i2h, tmp_h2h]},
type="elementwise_add",
inputs={'X': tmp_h2h,
'Y': tmp_i2h},
outputs={'Out': hidden},
attrs={'use_mkldnn': False})
attrs={'axis': -1,
'use_mkldnn': False})
print("elementwise op 1")
self._helper.append_op(
type='print',
inputs={'In': hidden},
attrs={
'first_n': -1,
'summarize': -1,
'message': None or "",
'print_tensor_name': True,
'print_tensor_type': True,
'print_tensor_shape': True,
'print_tensor_lod': True,
'print_phase': 'BOTH'
})
hidden = self._helper.append_activation(hidden)
self._helper.append_op(
type='print',
inputs={'In': hidden},
attrs={
'first_n': -1,
'summarize': -1,
'message': None or "",
'print_tensor_name': True,
'print_tensor_type': True,
'print_tensor_shape': True,
'print_tensor_lod': True,
'print_phase': 'BOTH'
})
self._helper.append_op(
type="mul",
inputs={"X": hidden,
@ -325,11 +347,13 @@ class SimpleRNNCell(layers.Layer):
outputs={"Out": out},
attrs={"x_num_col_dims": 1,
"y_num_col_dims": 1})
print("mul op 3")
self._helper.append_op(
type="softmax",
inputs={"X": out},
outputs={"Out": softmax_out},
attrs={"use_cudnn": False})
print("softmax op 1")
return softmax_out, hidden

@ -19,7 +19,10 @@ import sys
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid.imperative.nn import FC, SimpleRNNCell
from paddle.fluid.imperative.nn import FC
from paddle.fluid.imperative.nn import SimpleRNNCell
from typing import List, Any, Tuple
from test_imperative_base import new_program_scope
@ -67,14 +70,34 @@ class MLP(fluid.imperative.Layer):
class SimpleRNN(fluid.imperative.Layer):
def __init__(self, inputs):
def __init__(self):
super(SimpleRNN, self).__init__()
self.seq_len = input.shape[0]
self.cell = SimpleRNNCell(input.shape[1], out)
self.seq_len = 4
self._cell = SimpleRNNCell(
3,
3,
3,
fluid.ParamAttr(initializer=fluid.initializer.Constant(value=0.1)))
def forward(self, inputs):
out = list()
pre_hiddens = list()
init_hidden = fluid.layers.tensor.create_parameter(
attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.1)),
shape=[1, 3],
dtype='float32',
is_bias=False)
pre_hidden = init_hidden
for i in range(self.seq_len):
x = self._fc1(inputs[i])
input = fluid.layers.slice(
inputs, axes=[1], starts=[i], ends=[i + 1])
input = fluid.layers.reshape(input, shape=[1, 3])
pre_hidden, out_softmax = self._cell(input, pre_hidden)
out.append(out_softmax)
return out, pre_hiddens
class TestImperative(unittest.TestCase):
@ -207,8 +230,41 @@ class TestImperative(unittest.TestCase):
self.assertTrue(np.allclose(dy_out, static_out))
self.assertTrue(np.allclose(dy_grad, static_grad))
def test_rnn_ptb(self):
np_inp = np.arrary([])
def test_rnn(self):
np_inp = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0],
[10.0, 11.0, 12.0]])
np_inp = np_inp.reshape((1, 4, 3))
np_inp = np_inp.astype(np.float32)
# with fluid.imperative.guard():
# var_inp = fluid.imperative.base.to_variable(np_inp)
# var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3])
# simple_rnn = SimpleRNN()
# outs, pre_hiddens = simple_rnn.forward(var_inp)
# dy_out = outs[3]._numpy()
# outs[3]._backward()
# dy_grad = simple_rnn._cell._i2h_w._gradient()
# print("dy_grad is {}".format(dy_grad))
with new_program_scope():
print("im here")
inp = fluid.layers.data(
name="inp", shape=[1, 4, 3], append_batch_size=False)
simple_rnn = SimpleRNN()
outs, pre_hiddens = simple_rnn(inp)
param_grads = fluid.backward.append_backward(
outs[3],
parameter_list=[
simple_rnn._cell._i2h_w.name, simple_rnn._cell._h2h_w.name,
simple_rnn._cell._h2o_w.name
])
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
# print("param_grads is : {} ".format(param_grads))
static_out, static_grad = exe.run(
feed={inp.name: np_inp},
fetch_list=[outs[3].name, param_grads[2][1].name])
# self.assertTrue(np.allclose(dy_out, static_out))
# self.assertTrue(np.allclose(dy_grad, static_grad))
if __name__ == '__main__':

Loading…
Cancel
Save