|
|
|
@ -3115,9 +3115,7 @@ def recurrent_group(step,
|
|
|
|
|
reverse=False,
|
|
|
|
|
name=None,
|
|
|
|
|
targetInlink=None,
|
|
|
|
|
is_generating=False,
|
|
|
|
|
in_args_converter=None,
|
|
|
|
|
boot_layer=None):
|
|
|
|
|
is_generating=False):
|
|
|
|
|
"""
|
|
|
|
|
Recurrent layer group is an extremely flexible recurrent unit in
|
|
|
|
|
PaddlePaddle. As long as the user defines the calculation done within a
|
|
|
|
@ -3260,13 +3258,7 @@ def recurrent_group(step,
|
|
|
|
|
|
|
|
|
|
assert (is_generating != has_LayerOutput)
|
|
|
|
|
|
|
|
|
|
if in_args_converter is None:
|
|
|
|
|
layer_outs = step(*in_args)
|
|
|
|
|
else:
|
|
|
|
|
# append boot_layer to the last of in_args
|
|
|
|
|
if boot_layer is not None:
|
|
|
|
|
in_args.append(boot_layer)
|
|
|
|
|
layer_outs = step(*in_args_converter(*in_args)).to_proto(dict())
|
|
|
|
|
layer_outs = step(*in_args)
|
|
|
|
|
|
|
|
|
|
if isinstance(layer_outs, LayerOutput):
|
|
|
|
|
layer_outs = [layer_outs]
|
|
|
|
|