add book04.word2vec train test (#5002)
* init * ensure ids in lookup table op must be a column vector * add book4 configuration in test_layers * debug test_book4 * add test_word2vec * follow comments * follow commentsrevert-4814-Add_sequence_project_op
parent
40e7caf667
commit
fcd74e06b8
@ -0,0 +1,165 @@
|
|||||||
|
import paddle.v2 as paddle
|
||||||
|
import paddle.v2.framework.layers as layers
|
||||||
|
import paddle.v2.framework.core as core
|
||||||
|
import paddle.v2.framework.optimizer as optimizer
|
||||||
|
|
||||||
|
from paddle.v2.framework.framework import Program, g_program
|
||||||
|
from paddle.v2.framework.executor import Executor
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
init_program = Program()
|
||||||
|
program = Program()
|
||||||
|
|
||||||
|
embed_size = 32
|
||||||
|
hidden_size = 256
|
||||||
|
N = 5
|
||||||
|
batch_size = 32
|
||||||
|
|
||||||
|
word_dict = paddle.dataset.imikolov.build_dict()
|
||||||
|
dict_size = len(word_dict)
|
||||||
|
|
||||||
|
first_word = layers.data(
|
||||||
|
name='firstw',
|
||||||
|
shape=[1],
|
||||||
|
data_type='int32',
|
||||||
|
program=program,
|
||||||
|
init_program=init_program)
|
||||||
|
second_word = layers.data(
|
||||||
|
name='secondw',
|
||||||
|
shape=[1],
|
||||||
|
data_type='int32',
|
||||||
|
program=program,
|
||||||
|
init_program=init_program)
|
||||||
|
third_word = layers.data(
|
||||||
|
name='thirdw',
|
||||||
|
shape=[1],
|
||||||
|
data_type='int32',
|
||||||
|
program=program,
|
||||||
|
init_program=init_program)
|
||||||
|
forth_word = layers.data(
|
||||||
|
name='forthw',
|
||||||
|
shape=[1],
|
||||||
|
data_type='int32',
|
||||||
|
program=program,
|
||||||
|
init_program=init_program)
|
||||||
|
next_word = layers.data(
|
||||||
|
name='nextw',
|
||||||
|
shape=[1],
|
||||||
|
data_type='int32',
|
||||||
|
program=program,
|
||||||
|
init_program=init_program)
|
||||||
|
|
||||||
|
embed_param_attr_1 = {
|
||||||
|
'name': 'shared_w',
|
||||||
|
'init_attr': {
|
||||||
|
'max': 1.0,
|
||||||
|
'type': 'uniform_random',
|
||||||
|
'min': -1.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
embed_param_attr_2 = {'name': 'shared_w'}
|
||||||
|
|
||||||
|
embed_first = layers.embedding(
|
||||||
|
input=first_word,
|
||||||
|
size=[dict_size, embed_size],
|
||||||
|
data_type='float32',
|
||||||
|
param_attr=embed_param_attr_1,
|
||||||
|
program=program,
|
||||||
|
init_program=init_program)
|
||||||
|
embed_second = layers.embedding(
|
||||||
|
input=second_word,
|
||||||
|
size=[dict_size, embed_size],
|
||||||
|
data_type='float32',
|
||||||
|
param_attr=embed_param_attr_2,
|
||||||
|
program=program,
|
||||||
|
init_program=init_program)
|
||||||
|
|
||||||
|
embed_third = layers.embedding(
|
||||||
|
input=third_word,
|
||||||
|
size=[dict_size, embed_size],
|
||||||
|
data_type='float32',
|
||||||
|
param_attr=embed_param_attr_2,
|
||||||
|
program=program,
|
||||||
|
init_program=init_program)
|
||||||
|
embed_forth = layers.embedding(
|
||||||
|
input=forth_word,
|
||||||
|
size=[dict_size, embed_size],
|
||||||
|
data_type='float32',
|
||||||
|
param_attr=embed_param_attr_2,
|
||||||
|
program=program,
|
||||||
|
init_program=init_program)
|
||||||
|
|
||||||
|
concat_embed = layers.concat(
|
||||||
|
input=[embed_first, embed_second, embed_third, embed_forth],
|
||||||
|
axis=1,
|
||||||
|
program=program,
|
||||||
|
init_program=init_program)
|
||||||
|
|
||||||
|
hidden1 = layers.fc(input=concat_embed,
|
||||||
|
size=hidden_size,
|
||||||
|
act='sigmoid',
|
||||||
|
program=program,
|
||||||
|
init_program=init_program)
|
||||||
|
predict_word = layers.fc(input=hidden1,
|
||||||
|
size=dict_size,
|
||||||
|
act='softmax',
|
||||||
|
program=program,
|
||||||
|
init_program=init_program)
|
||||||
|
cost = layers.cross_entropy(
|
||||||
|
input=predict_word,
|
||||||
|
label=next_word,
|
||||||
|
program=program,
|
||||||
|
init_program=init_program)
|
||||||
|
avg_cost = layers.mean(x=cost, program=program, init_program=init_program)
|
||||||
|
|
||||||
|
sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001)
|
||||||
|
opts = sgd_optimizer.minimize(avg_cost)
|
||||||
|
|
||||||
|
train_reader = paddle.batch(
|
||||||
|
paddle.dataset.imikolov.train(word_dict, N), batch_size)
|
||||||
|
|
||||||
|
place = core.CPUPlace()
|
||||||
|
exe = Executor(place)
|
||||||
|
|
||||||
|
exe.run(init_program, feed={}, fetch_list=[])
|
||||||
|
PASS_NUM = 100
|
||||||
|
for pass_id in range(PASS_NUM):
|
||||||
|
for data in train_reader():
|
||||||
|
input_data = [[data_idx[idx] for data_idx in data] for idx in xrange(5)]
|
||||||
|
input_data = map(lambda x: np.array(x).astype("int32"), input_data)
|
||||||
|
input_data = map(lambda x: np.expand_dims(x, axis=1), input_data)
|
||||||
|
|
||||||
|
first_data = input_data[0]
|
||||||
|
first_tensor = core.LoDTensor()
|
||||||
|
first_tensor.set(first_data, place)
|
||||||
|
|
||||||
|
second_data = input_data[0]
|
||||||
|
second_tensor = core.LoDTensor()
|
||||||
|
second_tensor.set(second_data, place)
|
||||||
|
|
||||||
|
third_data = input_data[0]
|
||||||
|
third_tensor = core.LoDTensor()
|
||||||
|
third_tensor.set(third_data, place)
|
||||||
|
|
||||||
|
forth_data = input_data[0]
|
||||||
|
forth_tensor = core.LoDTensor()
|
||||||
|
forth_tensor.set(forth_data, place)
|
||||||
|
|
||||||
|
next_data = input_data[0]
|
||||||
|
next_tensor = core.LoDTensor()
|
||||||
|
next_tensor.set(next_data, place)
|
||||||
|
|
||||||
|
outs = exe.run(program,
|
||||||
|
feed={
|
||||||
|
'firstw': first_tensor,
|
||||||
|
'secondw': second_tensor,
|
||||||
|
'thirdw': third_tensor,
|
||||||
|
'forthw': forth_tensor,
|
||||||
|
'nextw': next_tensor
|
||||||
|
},
|
||||||
|
fetch_list=[avg_cost])
|
||||||
|
out = np.array(outs[0])
|
||||||
|
if out[0] < 10.0:
|
||||||
|
exit(0) # if avg cost less than 10.0, we think our code is good.
|
||||||
|
exit(1)
|
||||||
Loading…
Reference in new issue