|
|
|
@ -1,13 +1,17 @@
|
|
|
|
|
import sys
|
|
|
|
|
|
|
|
|
|
import paddle.v2 as paddle
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def seqToseq_net(source_dict_dim, target_dict_dim):
|
|
|
|
|
def seqToseq_net(source_dict_dim, target_dict_dim, is_generating=False):
|
|
|
|
|
### Network Architecture
|
|
|
|
|
word_vector_dim = 512 # dimension of word vector
|
|
|
|
|
decoder_size = 512 # dimension of hidden unit in GRU Decoder network
|
|
|
|
|
encoder_size = 512 # dimension of hidden unit in GRU Encoder network
|
|
|
|
|
|
|
|
|
|
beam_size = 3
|
|
|
|
|
max_length = 250
|
|
|
|
|
|
|
|
|
|
#### Encoder
|
|
|
|
|
src_word_id = paddle.layer.data(
|
|
|
|
|
name='source_language_word',
|
|
|
|
@ -67,30 +71,57 @@ def seqToseq_net(source_dict_dim, target_dict_dim):
|
|
|
|
|
group_input2 = paddle.layer.StaticInputV2(input=encoded_proj, is_seq=True)
|
|
|
|
|
group_inputs = [group_input1, group_input2]
|
|
|
|
|
|
|
|
|
|
trg_embedding = paddle.layer.embedding(
|
|
|
|
|
input=paddle.layer.data(
|
|
|
|
|
name='target_language_word',
|
|
|
|
|
type=paddle.data_type.integer_value_sequence(target_dict_dim)),
|
|
|
|
|
size=word_vector_dim,
|
|
|
|
|
param_attr=paddle.attr.ParamAttr(name='_target_language_embedding'))
|
|
|
|
|
group_inputs.append(trg_embedding)
|
|
|
|
|
|
|
|
|
|
# For decoder equipped with attention mechanism, in training,
|
|
|
|
|
# target embeding (the groudtruth) is the data input,
|
|
|
|
|
# while encoded source sequence is accessed to as an unbounded memory.
|
|
|
|
|
# Here, the StaticInput defines a read-only memory
|
|
|
|
|
# for the recurrent_group.
|
|
|
|
|
decoder = paddle.layer.recurrent_group(
|
|
|
|
|
name=decoder_group_name,
|
|
|
|
|
step=gru_decoder_with_attention,
|
|
|
|
|
input=group_inputs)
|
|
|
|
|
|
|
|
|
|
lbl = paddle.layer.data(
|
|
|
|
|
name='target_language_next_word',
|
|
|
|
|
type=paddle.data_type.integer_value_sequence(target_dict_dim))
|
|
|
|
|
cost = paddle.layer.classification_cost(input=decoder, label=lbl)
|
|
|
|
|
|
|
|
|
|
return cost
|
|
|
|
|
if not is_generating:
|
|
|
|
|
trg_embedding = paddle.layer.embedding(
|
|
|
|
|
input=paddle.layer.data(
|
|
|
|
|
name='target_language_word',
|
|
|
|
|
type=paddle.data_type.integer_value_sequence(target_dict_dim)),
|
|
|
|
|
size=word_vector_dim,
|
|
|
|
|
param_attr=paddle.attr.ParamAttr(name='_target_language_embedding'))
|
|
|
|
|
group_inputs.append(trg_embedding)
|
|
|
|
|
|
|
|
|
|
# For decoder equipped with attention mechanism, in training,
|
|
|
|
|
# target embeding (the groudtruth) is the data input,
|
|
|
|
|
# while encoded source sequence is accessed to as an unbounded memory.
|
|
|
|
|
# Here, the StaticInput defines a read-only memory
|
|
|
|
|
# for the recurrent_group.
|
|
|
|
|
decoder = paddle.layer.recurrent_group(
|
|
|
|
|
name=decoder_group_name,
|
|
|
|
|
step=gru_decoder_with_attention,
|
|
|
|
|
input=group_inputs)
|
|
|
|
|
|
|
|
|
|
lbl = paddle.layer.data(
|
|
|
|
|
name='target_language_next_word',
|
|
|
|
|
type=paddle.data_type.integer_value_sequence(target_dict_dim))
|
|
|
|
|
cost = paddle.layer.classification_cost(input=decoder, label=lbl)
|
|
|
|
|
|
|
|
|
|
return cost
|
|
|
|
|
else:
|
|
|
|
|
# In generation, the decoder predicts a next target word based on
|
|
|
|
|
# the encoded source sequence and the last generated target word.
|
|
|
|
|
|
|
|
|
|
# The encoded source sequence (encoder's output) must be specified by
|
|
|
|
|
# StaticInput, which is a read-only memory.
|
|
|
|
|
# Embedding of the last generated word is automatically gotten by
|
|
|
|
|
# GeneratedInputs, which is initialized by a start mark, such as <s>,
|
|
|
|
|
# and must be included in generation.
|
|
|
|
|
|
|
|
|
|
trg_embedding = paddle.layer.GeneratedInputV2(
|
|
|
|
|
size=target_dict_dim,
|
|
|
|
|
embedding_name='_target_language_embedding',
|
|
|
|
|
embedding_size=word_vector_dim)
|
|
|
|
|
group_inputs.append(trg_embedding)
|
|
|
|
|
|
|
|
|
|
beam_gen = paddle.layer.beam_search(
|
|
|
|
|
name=decoder_group_name,
|
|
|
|
|
step=gru_decoder_with_attention,
|
|
|
|
|
input=group_inputs,
|
|
|
|
|
bos_id=0,
|
|
|
|
|
eos_id=1,
|
|
|
|
|
beam_size=beam_size,
|
|
|
|
|
max_length=max_length)
|
|
|
|
|
|
|
|
|
|
return beam_gen
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main():
|
|
|
|
|