Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into develop
commit
047f3a766c
@ -0,0 +1,3 @@
|
||||
[submodule "book"]
|
||||
path = book
|
||||
url = https://github.com/PaddlePaddle/book.git
|
@ -0,0 +1 @@
|
||||
Subproject commit 22ed2a01aee872f055b5f5f212428f481cefc10d
|
@ -0,0 +1,125 @@
|
||||
import paddle.v2 as paddle
|
||||
import cPickle
|
||||
import copy
|
||||
|
||||
|
||||
def main():
|
||||
paddle.init(use_gpu=False)
|
||||
movie_title_dict = paddle.dataset.movielens.get_movie_title_dict()
|
||||
uid = paddle.layer.data(
|
||||
name='user_id',
|
||||
type=paddle.data_type.integer_value(
|
||||
paddle.dataset.movielens.max_user_id() + 1))
|
||||
usr_emb = paddle.layer.embedding(input=uid, size=32)
|
||||
|
||||
usr_gender_id = paddle.layer.data(
|
||||
name='gender_id', type=paddle.data_type.integer_value(2))
|
||||
usr_gender_emb = paddle.layer.embedding(input=usr_gender_id, size=16)
|
||||
|
||||
usr_age_id = paddle.layer.data(
|
||||
name='age_id',
|
||||
type=paddle.data_type.integer_value(
|
||||
len(paddle.dataset.movielens.age_table)))
|
||||
usr_age_emb = paddle.layer.embedding(input=usr_age_id, size=16)
|
||||
|
||||
usr_job_id = paddle.layer.data(
|
||||
name='job_id',
|
||||
type=paddle.data_type.integer_value(paddle.dataset.movielens.max_job_id(
|
||||
) + 1))
|
||||
|
||||
usr_job_emb = paddle.layer.embedding(input=usr_job_id, size=16)
|
||||
|
||||
usr_combined_features = paddle.layer.fc(
|
||||
input=[usr_emb, usr_gender_emb, usr_age_emb, usr_job_emb],
|
||||
size=200,
|
||||
act=paddle.activation.Tanh())
|
||||
|
||||
mov_id = paddle.layer.data(
|
||||
name='movie_id',
|
||||
type=paddle.data_type.integer_value(
|
||||
paddle.dataset.movielens.max_movie_id() + 1))
|
||||
mov_emb = paddle.layer.embedding(input=mov_id, size=32)
|
||||
|
||||
mov_categories = paddle.layer.data(
|
||||
name='category_id',
|
||||
type=paddle.data_type.sparse_binary_vector(
|
||||
len(paddle.dataset.movielens.movie_categories())))
|
||||
|
||||
mov_categories_hidden = paddle.layer.fc(input=mov_categories, size=32)
|
||||
|
||||
mov_title_id = paddle.layer.data(
|
||||
name='movie_title',
|
||||
type=paddle.data_type.integer_value_sequence(len(movie_title_dict)))
|
||||
mov_title_emb = paddle.layer.embedding(input=mov_title_id, size=32)
|
||||
mov_title_conv = paddle.networks.sequence_conv_pool(
|
||||
input=mov_title_emb, hidden_size=32, context_len=3)
|
||||
|
||||
mov_combined_features = paddle.layer.fc(
|
||||
input=[mov_emb, mov_categories_hidden, mov_title_conv],
|
||||
size=200,
|
||||
act=paddle.activation.Tanh())
|
||||
|
||||
inference = paddle.layer.cos_sim(
|
||||
a=usr_combined_features, b=mov_combined_features, size=1, scale=5)
|
||||
cost = paddle.layer.regression_cost(
|
||||
input=inference,
|
||||
label=paddle.layer.data(
|
||||
name='score', type=paddle.data_type.dense_vector(1)))
|
||||
|
||||
parameters = paddle.parameters.create(cost)
|
||||
|
||||
trainer = paddle.trainer.SGD(cost=cost,
|
||||
parameters=parameters,
|
||||
update_equation=paddle.optimizer.Adam(
|
||||
learning_rate=1e-4))
|
||||
feeding = {
|
||||
'user_id': 0,
|
||||
'gender_id': 1,
|
||||
'age_id': 2,
|
||||
'job_id': 3,
|
||||
'movie_id': 4,
|
||||
'category_id': 5,
|
||||
'movie_title': 6,
|
||||
'score': 7
|
||||
}
|
||||
|
||||
def event_handler(event):
|
||||
if isinstance(event, paddle.event.EndIteration):
|
||||
if event.batch_id % 100 == 0:
|
||||
print "Pass %d Batch %d Cost %.2f" % (
|
||||
event.pass_id, event.batch_id, event.cost)
|
||||
|
||||
trainer.train(
|
||||
reader=paddle.batch(
|
||||
paddle.reader.shuffle(
|
||||
paddle.dataset.movielens.train(), buf_size=8192),
|
||||
batch_size=256),
|
||||
event_handler=event_handler,
|
||||
feeding=feeding,
|
||||
num_passes=1)
|
||||
|
||||
user_id = 234
|
||||
movie_id = 345
|
||||
|
||||
user = paddle.dataset.movielens.user_info()[user_id]
|
||||
movie = paddle.dataset.movielens.movie_info()[movie_id]
|
||||
|
||||
feature = user.value() + movie.value()
|
||||
|
||||
def reader():
|
||||
yield feature
|
||||
|
||||
infer_dict = copy.copy(feeding)
|
||||
del infer_dict['score']
|
||||
|
||||
prediction = paddle.infer(
|
||||
output=inference,
|
||||
parameters=parameters,
|
||||
reader=paddle.batch(
|
||||
reader, batch_size=32),
|
||||
feeding=infer_dict)
|
||||
print(prediction + 5) / 2
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,90 +0,0 @@
|
||||
import paddle.v2.activation as activation
|
||||
import paddle.v2.attr as attr
|
||||
import paddle.v2.data_type as data_type
|
||||
import paddle.v2.layer as layer
|
||||
import paddle.v2.networks as networks
|
||||
|
||||
|
||||
def seqToseq_net_v2(source_dict_dim, target_dict_dim):
|
||||
### Network Architecture
|
||||
word_vector_dim = 512 # dimension of word vector
|
||||
decoder_size = 512 # dimension of hidden unit in GRU Decoder network
|
||||
encoder_size = 512 # dimension of hidden unit in GRU Encoder network
|
||||
|
||||
#### Encoder
|
||||
src_word_id = layer.data(
|
||||
name='source_language_word',
|
||||
type=data_type.integer_value_sequence(source_dict_dim))
|
||||
src_embedding = layer.embedding(
|
||||
input=src_word_id,
|
||||
size=word_vector_dim,
|
||||
param_attr=attr.ParamAttr(name='_source_language_embedding'))
|
||||
src_forward = networks.simple_gru(input=src_embedding, size=encoder_size)
|
||||
src_backward = networks.simple_gru(
|
||||
input=src_embedding, size=encoder_size, reverse=True)
|
||||
encoded_vector = layer.concat(input=[src_forward, src_backward])
|
||||
|
||||
#### Decoder
|
||||
with layer.mixed(size=decoder_size) as encoded_proj:
|
||||
encoded_proj += layer.full_matrix_projection(input=encoded_vector)
|
||||
|
||||
backward_first = layer.first_seq(input=src_backward)
|
||||
|
||||
with layer.mixed(size=decoder_size, act=activation.Tanh()) as decoder_boot:
|
||||
decoder_boot += layer.full_matrix_projection(input=backward_first)
|
||||
|
||||
def gru_decoder_with_attention(enc_vec, enc_proj, current_word):
|
||||
|
||||
decoder_mem = layer.memory(
|
||||
name='gru_decoder', size=decoder_size, boot_layer=decoder_boot)
|
||||
|
||||
context = networks.simple_attention(
|
||||
encoded_sequence=enc_vec,
|
||||
encoded_proj=enc_proj,
|
||||
decoder_state=decoder_mem)
|
||||
|
||||
with layer.mixed(size=decoder_size * 3) as decoder_inputs:
|
||||
decoder_inputs += layer.full_matrix_projection(input=context)
|
||||
decoder_inputs += layer.full_matrix_projection(input=current_word)
|
||||
|
||||
gru_step = layer.gru_step(
|
||||
name='gru_decoder',
|
||||
input=decoder_inputs,
|
||||
output_mem=decoder_mem,
|
||||
size=decoder_size)
|
||||
|
||||
with layer.mixed(
|
||||
size=target_dict_dim, bias_attr=True,
|
||||
act=activation.Softmax()) as out:
|
||||
out += layer.full_matrix_projection(input=gru_step)
|
||||
return out
|
||||
|
||||
decoder_group_name = "decoder_group"
|
||||
group_input1 = layer.StaticInputV2(input=encoded_vector, is_seq=True)
|
||||
group_input2 = layer.StaticInputV2(input=encoded_proj, is_seq=True)
|
||||
group_inputs = [group_input1, group_input2]
|
||||
|
||||
trg_embedding = layer.embedding(
|
||||
input=layer.data(
|
||||
name='target_language_word',
|
||||
type=data_type.integer_value_sequence(target_dict_dim)),
|
||||
size=word_vector_dim,
|
||||
param_attr=attr.ParamAttr(name='_target_language_embedding'))
|
||||
group_inputs.append(trg_embedding)
|
||||
|
||||
# For decoder equipped with attention mechanism, in training,
|
||||
# target embeding (the groudtruth) is the data input,
|
||||
# while encoded source sequence is accessed to as an unbounded memory.
|
||||
# Here, the StaticInput defines a read-only memory
|
||||
# for the recurrent_group.
|
||||
decoder = layer.recurrent_group(
|
||||
name=decoder_group_name,
|
||||
step=gru_decoder_with_attention,
|
||||
input=group_inputs)
|
||||
|
||||
lbl = layer.data(
|
||||
name='target_language_next_word',
|
||||
type=data_type.integer_value_sequence(target_dict_dim))
|
||||
cost = layer.classification_cost(input=decoder, label=lbl)
|
||||
|
||||
return cost
|
@ -0,0 +1,80 @@
|
||||
import math
|
||||
|
||||
import paddle.v2 as paddle
|
||||
|
||||
dictsize = 1953
|
||||
embsize = 32
|
||||
hiddensize = 256
|
||||
N = 5
|
||||
|
||||
|
||||
def wordemb(inlayer):
|
||||
wordemb = paddle.layer.table_projection(
|
||||
input=inlayer,
|
||||
size=embsize,
|
||||
param_attr=paddle.attr.Param(
|
||||
name="_proj",
|
||||
initial_std=0.001,
|
||||
learning_rate=1,
|
||||
l2_rate=0, ))
|
||||
return wordemb
|
||||
|
||||
|
||||
def main():
|
||||
paddle.init(use_gpu=False, trainer_count=1)
|
||||
word_dict = paddle.dataset.imikolov.build_dict()
|
||||
dict_size = len(word_dict)
|
||||
firstword = paddle.layer.data(
|
||||
name="firstw", type=paddle.data_type.integer_value(dict_size))
|
||||
secondword = paddle.layer.data(
|
||||
name="secondw", type=paddle.data_type.integer_value(dict_size))
|
||||
thirdword = paddle.layer.data(
|
||||
name="thirdw", type=paddle.data_type.integer_value(dict_size))
|
||||
fourthword = paddle.layer.data(
|
||||
name="fourthw", type=paddle.data_type.integer_value(dict_size))
|
||||
nextword = paddle.layer.data(
|
||||
name="fifthw", type=paddle.data_type.integer_value(dict_size))
|
||||
|
||||
Efirst = wordemb(firstword)
|
||||
Esecond = wordemb(secondword)
|
||||
Ethird = wordemb(thirdword)
|
||||
Efourth = wordemb(fourthword)
|
||||
|
||||
contextemb = paddle.layer.concat(input=[Efirst, Esecond, Ethird, Efourth])
|
||||
hidden1 = paddle.layer.fc(input=contextemb,
|
||||
size=hiddensize,
|
||||
act=paddle.activation.Sigmoid(),
|
||||
layer_attr=paddle.attr.Extra(drop_rate=0.5),
|
||||
bias_attr=paddle.attr.Param(learning_rate=2),
|
||||
param_attr=paddle.attr.Param(
|
||||
initial_std=1. / math.sqrt(embsize * 8),
|
||||
learning_rate=1))
|
||||
predictword = paddle.layer.fc(input=hidden1,
|
||||
size=dict_size,
|
||||
bias_attr=paddle.attr.Param(learning_rate=2),
|
||||
act=paddle.activation.Softmax())
|
||||
|
||||
def event_handler(event):
|
||||
if isinstance(event, paddle.event.EndIteration):
|
||||
if event.batch_id % 100 == 0:
|
||||
result = trainer.test(
|
||||
paddle.batch(
|
||||
paddle.dataset.imikolov.test(word_dict, N), 32))
|
||||
print "Pass %d, Batch %d, Cost %f, %s, Testing metrics %s" % (
|
||||
event.pass_id, event.batch_id, event.cost, event.metrics,
|
||||
result.metrics)
|
||||
|
||||
cost = paddle.layer.classification_cost(input=predictword, label=nextword)
|
||||
parameters = paddle.parameters.create(cost)
|
||||
adam_optimizer = paddle.optimizer.Adam(
|
||||
learning_rate=3e-3,
|
||||
regularization=paddle.optimizer.L2Regularization(8e-4))
|
||||
trainer = paddle.trainer.SGD(cost, parameters, adam_optimizer)
|
||||
trainer.train(
|
||||
paddle.batch(paddle.dataset.imikolov.train(word_dict, N), 32),
|
||||
num_passes=30,
|
||||
event_handler=event_handler)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,2 +1,26 @@
|
||||
API
|
||||
===
|
||||
===
|
||||
|
||||
模型配置 API
|
||||
------------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
v2/model_configs.rst
|
||||
|
||||
数据 API
|
||||
--------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
v2/data.rst
|
||||
|
||||
训练 API
|
||||
--------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
v2/run_logic.rst
|
@ -0,0 +1,93 @@
|
||||
================
|
||||
Data Related API
|
||||
================
|
||||
|
||||
|
||||
#########
|
||||
DataTypes
|
||||
#########
|
||||
|
||||
.. automodule:: paddle.v2.data_type
|
||||
:members:
|
||||
|
||||
##########
|
||||
DataFeeder
|
||||
##########
|
||||
|
||||
.. automodule:: paddle.v2.data_feeder
|
||||
:members:
|
||||
|
||||
######
|
||||
Reader
|
||||
######
|
||||
|
||||
.. automodule:: paddle.v2.reader
|
||||
:members:
|
||||
|
||||
.. automodule:: paddle.v2.reader.creator
|
||||
:members:
|
||||
|
||||
#########
|
||||
minibatch
|
||||
#########
|
||||
|
||||
.. automodule:: paddle.v2.minibatch
|
||||
:members:
|
||||
|
||||
#######
|
||||
Dataset
|
||||
#######
|
||||
|
||||
.. automodule:: paddle.v2.dataset
|
||||
:members:
|
||||
|
||||
|
||||
mnist
|
||||
+++++
|
||||
|
||||
.. automodule:: paddle.v2.dataset.mnist
|
||||
:members:
|
||||
|
||||
|
||||
cifar
|
||||
+++++
|
||||
|
||||
.. automodule:: paddle.v2.dataset.cifar
|
||||
:members:
|
||||
|
||||
conll05
|
||||
+++++++
|
||||
|
||||
.. automodule:: paddle.v2.dataset.conll05
|
||||
:members:
|
||||
|
||||
imdb
|
||||
++++
|
||||
|
||||
.. automodule:: paddle.v2.dataset.imdb
|
||||
:members:
|
||||
|
||||
imikolov
|
||||
++++++++
|
||||
|
||||
.. automodule:: paddle.v2.dataset.imikolov
|
||||
:members:
|
||||
|
||||
movielens
|
||||
+++++++++
|
||||
|
||||
.. automodule:: paddle.v2.dataset.movielens
|
||||
:members:
|
||||
|
||||
sentiment
|
||||
+++++++++
|
||||
|
||||
.. automodule:: paddle.v2.dataset.sentiment
|
||||
:members:
|
||||
|
||||
uci_housing
|
||||
+++++++++++
|
||||
|
||||
.. automodule:: paddle.v2.dataset.uci_housing
|
||||
:members:
|
||||
|
@ -1,6 +1,46 @@
|
||||
#########################
|
||||
Configuration Related API
|
||||
#########################
|
||||
|
||||
======
|
||||
Layers
|
||||
======
|
||||
|
||||
.. automodule:: paddle.v2.layer
|
||||
:members:
|
||||
|
||||
|
||||
==========
|
||||
Attributes
|
||||
==========
|
||||
|
||||
.. automodule:: paddle.v2.attr
|
||||
:members:
|
||||
|
||||
===========
|
||||
Activations
|
||||
===========
|
||||
|
||||
.. automodule:: paddle.v2.activation
|
||||
:members:
|
||||
|
||||
========
|
||||
Poolings
|
||||
========
|
||||
|
||||
.. automodule:: paddle.v2.pooling
|
||||
:members:
|
||||
|
||||
========
|
||||
Networks
|
||||
========
|
||||
|
||||
.. automodule:: paddle.v2.networks
|
||||
:members:
|
||||
|
||||
==========
|
||||
Optimizers
|
||||
==========
|
||||
|
||||
.. automodule:: paddle.v2.optimizer
|
||||
:members:
|
||||
|
@ -0,0 +1,34 @@
|
||||
###########
|
||||
Trainer API
|
||||
###########
|
||||
|
||||
|
||||
==========
|
||||
Parameters
|
||||
==========
|
||||
|
||||
.. automodule:: paddle.v2.parameters
|
||||
:members:
|
||||
|
||||
|
||||
=======
|
||||
Trainer
|
||||
=======
|
||||
|
||||
.. automodule:: paddle.v2.trainer
|
||||
:members:
|
||||
|
||||
|
||||
=====
|
||||
Event
|
||||
=====
|
||||
|
||||
.. automodule:: paddle.v2.event
|
||||
:members:
|
||||
|
||||
|
||||
=========
|
||||
Inference
|
||||
=========
|
||||
|
||||
.. autofunction:: paddle.v2.infer
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue