From 55b96287aeb08305b90d3351c602c207d9c38290 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 23 Feb 2017 22:45:51 +0800 Subject: [PATCH 01/87] support rnn --- python/paddle/v2/layer.py | 87 +++++++++++++++++++++++++++++++++++---- 1 file changed, 79 insertions(+), 8 deletions(-) diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py index 507725ee4f..bebe7c6690 100644 --- a/python/paddle/v2/layer.py +++ b/python/paddle/v2/layer.py @@ -73,6 +73,7 @@ from paddle.trainer_config_helpers.config_parser_utils import \ parse_network_config as __parse__ from paddle.trainer_config_helpers.default_decorators import wrap_name_default +import activation import data_type __all__ = [ @@ -97,10 +98,11 @@ def parse_network(*outputs): class Layer(object): - def __init__(self, name, parent_layers): + def __init__(self, name, parent_layers, step_input=None): assert isinstance(parent_layers, dict) assert isinstance(name, basestring) self.name = name + self.step_input = step_input self.__parent_layers__ = parent_layers def to_proto(self, context): @@ -116,8 +118,14 @@ class Layer(object): else: v1_layer = map(lambda x: x.to_proto(context=context), self.__parent_layers__[layer_name]) + if layer_name == "input" and self.step_input is not None: + v1_layer.insert(0, self.step_input) kwargs[layer_name] = v1_layer + # memory may have the same name with some layer + if isinstance(self, MemoryV2): + return self.to_proto_impl(**kwargs) + if self.name not in context: context[self.name] = self.to_proto_impl(**kwargs) return context[self.name] @@ -133,7 +141,7 @@ def __convert_to_v2__(method_name, name_prefix, parent_names): wrapper = None class V2LayerImpl(Layer): - def __init__(self, name=None, **kwargs): + def __init__(self, name=None, step_input=None, **kwargs): parent_layers = dict() other_kwargs = dict() for pname in parent_names: @@ -143,7 +151,7 @@ def __convert_to_v2__(method_name, name_prefix, parent_names): if key not in parent_names: other_kwargs[key] = kwargs[key] - super(V2LayerImpl, self).__init__(name, parent_layers) + super(V2LayerImpl, self).__init__(name, parent_layers, step_input) self.__other_kwargs__ = other_kwargs if wrapper is not None: @@ -186,6 +194,22 @@ class DataLayerV2(Layer): return getattr(conf_helps, self.__method_name__)(name=self.name, **args) +class MemoryV2(Layer): + def __init__(self, name, size, **kwargs): + self.name = name + self.size = size + self.__kwargs__ = kwargs + super(MemoryV2, self).__init__(name=name, parent_layers=dict()) + + def to_proto_impl(self, **kwargs): + args = dict() + for each in kwargs: + args[each] = kwargs[each] + for each in self.__kwargs__: + args[each] = self.__kwargs__[each] + return conf_helps.memory(name=self.name, size=self.size, **args) + + data = DataLayerV2 fc = __convert_to_v2__('fc_layer', name_prefix='fc', parent_names=['input']) max_id = __convert_to_v2__( @@ -198,6 +222,13 @@ cross_entropy_cost = __convert_to_v2__( 'cross_entropy', name_prefix='cross_entropy', parent_names=['input', 'label']) +embedding = __convert_to_v2__( + 'embedding_layer', name_prefix='embedding', parent_names=['input']) +last_seq = __convert_to_v2__( + 'last_seq', name_prefix='last_seq', parent_names=['input']) +recurrent_group = __convert_to_v2__( + 'recurrent_group', name_prefix='recurrent_layer', parent_names=['input']) +memory = MemoryV2 if __name__ == '__main__': pixel = data(name='pixel', type=data_type.dense_vector(784)) @@ -208,8 +239,48 @@ if __name__ == '__main__': cost1 = classification_cost(input=inference, label=label) cost2 = cross_entropy_cost(input=inference, label=label) - print parse_network(cost1) - print parse_network(cost2) - print parse_network(cost1, cost2) - print parse_network(cost2) - print parse_network(inference, maxid) + mem = memory(name="rnn_state", size=10) + + # print parse_network(cost1) + # print parse_network(cost2) + # print parse_network(cost1, cost2) + # print parse_network(cost2) + # print parse_network(inference, maxid) + + dict_dim = 10 + word_dim = 8 + hidden_dim = 8 + label_dim = 3 + + def step(y): + mem = conf_helps.memory(name="rnn_state", size=hidden_dim) + out = conf_helps.fc_layer( + input=[y, mem], + size=hidden_dim, + act=activation.Tanh(), + bias_attr=True, + name="rnn_state") + return out + + def test(): + data1 = conf_helps.data_layer(name="word", size=dict_dim) + embd = conf_helps.embedding_layer(input=data1, size=word_dim) + conf_helps.recurrent_group(name="rnn", step=step, input=embd) + + # print __parse__(test) + + # yyyyyyyy + def new_step(y): + mem = memory(name="rnn_state", size=hidden_dim) + out = fc(input=[mem], + step_input=y, + size=hidden_dim, + act=activation.Tanh(), + bias_attr=True, + name="rnn_state") + return out.to_proto(dict()) + + data1 = data(name="word", type=data_type.integer_value(dict_dim)) + embd = embedding(input=data1, size=word_dim) + aaa = recurrent_group(name="rnn", step=new_step, input=embd) + print parse_network(aaa) From 92f52e3bb7a1a203a01d3641887c6bdfd03dce67 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 24 Feb 2017 13:46:59 +0800 Subject: [PATCH 02/87] add rnn test --- demo/mnist/api_train_v2.py | 3 ++ python/paddle/v2/layer.py | 43 +---------------- python/paddle/v2/tests/layer_test.py | 72 ++++++++++++++++++++++++++++ 3 files changed, 77 insertions(+), 41 deletions(-) create mode 100644 python/paddle/v2/tests/layer_test.py diff --git a/demo/mnist/api_train_v2.py b/demo/mnist/api_train_v2.py index 6fc01ce58b..5e66b7399a 100644 --- a/demo/mnist/api_train_v2.py +++ b/demo/mnist/api_train_v2.py @@ -3,6 +3,9 @@ import paddle.v2 as paddle import mnist_util +import pudb +pudb.set_trace() + def train_reader(): train_file = './data/raw_data/train' diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py index 44ebebcaea..e1952ce747 100644 --- a/python/paddle/v2/layer.py +++ b/python/paddle/v2/layer.py @@ -73,16 +73,15 @@ from paddle.trainer_config_helpers.config_parser_utils import \ parse_network_config as __parse__ from paddle.trainer_config_helpers.default_decorators import wrap_name_default -import activation -import data_type import activation import attr +import data_type __all__ = [ 'parse_network', 'data', 'fc', 'max_id', 'classification_cost', 'cross_entropy_cost', 'cross_entropy_with_selfnorm_cost', 'regression_cost', 'multi_binary_label_cross_entropy_cost', 'rank_cost', 'lambda_cost', - 'sum_cost', 'huber_cost' + 'sum_cost', 'huber_cost', 'memory', 'embedding', 'recurrent_group' ] @@ -294,41 +293,3 @@ if __name__ == '__main__': print parse_network(cost5, cost6) print parse_network(cost7, cost8, cost9, cost10, cost11) print parse_network(inference, maxid) - - dict_dim = 10 - word_dim = 8 - hidden_dim = 8 - label_dim = 3 - - def step(y): - mem = conf_helps.memory(name="rnn_state", size=hidden_dim) - out = conf_helps.fc_layer( - input=[y, mem], - size=hidden_dim, - act=activation.Tanh(), - bias_attr=True, - name="rnn_state") - return out - - def test(): - data1 = conf_helps.data_layer(name="word", size=dict_dim) - embd = conf_helps.embedding_layer(input=data1, size=word_dim) - conf_helps.recurrent_group(name="rnn", step=step, input=embd) - - # print __parse__(test) - - # yyyyyyyy - def new_step(y): - mem = memory(name="rnn_state", size=hidden_dim) - out = fc(input=[mem], - step_input=y, - size=hidden_dim, - act=activation.Tanh(), - bias_attr=True, - name="rnn_state") - return out.to_proto(dict()) - - data1 = data(name="word", type=data_type.integer_value(dict_dim)) - embd = embedding(input=data1, size=word_dim) - aaa = recurrent_group(name="rnn", step=new_step, input=embd) - print parse_network(aaa) diff --git a/python/paddle/v2/tests/layer_test.py b/python/paddle/v2/tests/layer_test.py new file mode 100644 index 0000000000..87e601a60a --- /dev/null +++ b/python/paddle/v2/tests/layer_test.py @@ -0,0 +1,72 @@ +# Copyright PaddlePaddle contributors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import difflib +import unittest + +import paddle.trainer_config_helpers as conf_helps +import paddle.v2.activation as activation +import paddle.v2.data_type as data_type +import paddle.v2.layer as layer +from paddle.trainer_config_helpers.config_parser_utils import \ + parse_network_config as parse_network + + +class RNNTest(unittest.TestCase): + def test_simple_rnn(self): + dict_dim = 10 + word_dim = 8 + hidden_dim = 8 + + def test_old_rnn(): + def step(y): + mem = conf_helps.memory(name="rnn_state", size=hidden_dim) + out = conf_helps.fc_layer( + input=[y, mem], + size=hidden_dim, + act=activation.Tanh(), + bias_attr=True, + name="rnn_state") + return out + + def test(): + data1 = conf_helps.data_layer(name="word", size=dict_dim) + embd = conf_helps.embedding_layer(input=data1, size=word_dim) + conf_helps.recurrent_group(name="rnn", step=step, input=embd) + + return str(parse_network(test)) + + def test_new_rnn(): + def new_step(y): + mem = layer.memory(name="rnn_state", size=hidden_dim) + out = layer.fc(input=[mem], + step_input=y, + size=hidden_dim, + act=activation.Tanh(), + bias_attr=True, + name="rnn_state") + return out.to_proto(dict()) + + data1 = layer.data( + name="word", type=data_type.integer_value(dict_dim)) + embd = layer.embedding(input=data1, size=word_dim) + aaa = layer.recurrent_group(name="rnn", step=new_step, input=embd) + return str(layer.parse_network(aaa)) + + diff = difflib.unified_diff(test_old_rnn().splitlines(1), + test_new_rnn().splitlines(1)) + print ''.join(diff) + + +if __name__ == '__main__': + unittest.main() From 6b80c2b4f9a626efa911f715dcb45bee99d80729 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 24 Feb 2017 15:29:11 +0800 Subject: [PATCH 03/87] add cost test --- python/paddle/v2/layer.py | 2 -- python/paddle/v2/tests/layer_test.py | 35 ++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py index e1952ce747..f333c0af96 100644 --- a/python/paddle/v2/layer.py +++ b/python/paddle/v2/layer.py @@ -281,8 +281,6 @@ if __name__ == '__main__': cost10 = sum_cost(input=inference) cost11 = huber_cost(input=score, label=label) - mem = memory(name="rnn_state", size=10) - # print parse_network(cost1) # print parse_network(cost2) # print parse_network(cost1, cost2) diff --git a/python/paddle/v2/tests/layer_test.py b/python/paddle/v2/tests/layer_test.py index 87e601a60a..6c4b458914 100644 --- a/python/paddle/v2/tests/layer_test.py +++ b/python/paddle/v2/tests/layer_test.py @@ -18,10 +18,45 @@ import paddle.trainer_config_helpers as conf_helps import paddle.v2.activation as activation import paddle.v2.data_type as data_type import paddle.v2.layer as layer +import paddle.v2.attr as attr from paddle.trainer_config_helpers.config_parser_utils import \ parse_network_config as parse_network +class CostLyaerTest(unittest.TestCase): + def test_cost_layer(self): + pixel = layer.data(name='pixel', type=data_type.dense_vector(784)) + label = layer.data(name='label', type=data_type.integer_value(10)) + weight = layer.data(name='weight', type=data_type.dense_vector(10)) + score = layer.data(name='score', type=data_type.dense_vector(1)) + hidden = layer.fc(input=pixel, + size=100, + act=activation.Sigmoid(), + param_attr=attr.Param(name='hidden')) + inference = layer.fc(input=hidden, size=10, act=activation.Softmax()) + + cost1 = layer.classification_cost(input=inference, label=label) + cost2 = layer.classification_cost( + input=inference, label=label, weight=weight) + cost3 = layer.cross_entropy_cost(input=inference, label=label) + cost4 = layer.cross_entropy_with_selfnorm_cost( + input=inference, label=label) + cost5 = layer.regression_cost(input=inference, label=label) + cost6 = layer.regression_cost( + input=inference, label=label, weight=weight) + cost7 = layer.multi_binary_label_cross_entropy_cost( + input=inference, label=label) + cost8 = layer.rank_cost(left=score, right=score, label=score) + cost9 = layer.lambda_cost(input=inference, score=score) + cost10 = layer.sum_cost(input=inference) + cost11 = layer.huber_cost(input=score, label=label) + + print layer.parse_network(cost1, cost2) + print layer.parse_network(cost3, cost4) + print layer.parse_network(cost5, cost6) + print layer.parse_network(cost7, cost8, cost9, cost10, cost11) + + class RNNTest(unittest.TestCase): def test_simple_rnn(self): dict_dim = 10 From db92e3c884a586d0f28dcc7c7e3be99c1e6203f6 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 24 Feb 2017 16:04:59 +0800 Subject: [PATCH 04/87] refine code --- python/paddle/v2/layer.py | 35 ---------------------------- python/paddle/v2/tests/layer_test.py | 7 +++--- 2 files changed, 4 insertions(+), 38 deletions(-) diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py index f333c0af96..5ecc96c685 100644 --- a/python/paddle/v2/layer.py +++ b/python/paddle/v2/layer.py @@ -256,38 +256,3 @@ sum_cost = __convert_to_v2__( 'sum_cost', name_prefix='sum_cost', parent_names=['input']) huber_cost = __convert_to_v2__( 'huber_cost', name_prefix='huber_cost', parent_names=['input', 'label']) - -if __name__ == '__main__': - pixel = data(name='pixel', type=data_type.dense_vector(784)) - label = data(name='label', type=data_type.integer_value(10)) - weight = data(name='weight', type=data_type.dense_vector(10)) - score = data(name='score', type=data_type.dense_vector(1)) - - hidden = fc(input=pixel, - size=100, - act=activation.Sigmoid(), - param_attr=attr.Param(name='hidden')) - inference = fc(input=hidden, size=10, act=activation.Softmax()) - maxid = max_id(input=inference) - cost1 = classification_cost(input=inference, label=label) - cost2 = classification_cost(input=inference, label=label, weight=weight) - cost3 = cross_entropy_cost(input=inference, label=label) - cost4 = cross_entropy_with_selfnorm_cost(input=inference, label=label) - cost5 = regression_cost(input=inference, label=label) - cost6 = regression_cost(input=inference, label=label, weight=weight) - cost7 = multi_binary_label_cross_entropy_cost(input=inference, label=label) - cost8 = rank_cost(left=score, right=score, label=score) - cost9 = lambda_cost(input=inference, score=score) - cost10 = sum_cost(input=inference) - cost11 = huber_cost(input=score, label=label) - - # print parse_network(cost1) - # print parse_network(cost2) - # print parse_network(cost1, cost2) - # print parse_network(cost2) - # print parse_network(inference, maxid) - print parse_network(cost1, cost2) - print parse_network(cost3, cost4) - print parse_network(cost5, cost6) - print parse_network(cost7, cost8, cost9, cost10, cost11) - print parse_network(inference, maxid) diff --git a/python/paddle/v2/tests/layer_test.py b/python/paddle/v2/tests/layer_test.py index 6c4b458914..2958cbd9eb 100644 --- a/python/paddle/v2/tests/layer_test.py +++ b/python/paddle/v2/tests/layer_test.py @@ -16,9 +16,9 @@ import unittest import paddle.trainer_config_helpers as conf_helps import paddle.v2.activation as activation +import paddle.v2.attr as attr import paddle.v2.data_type as data_type import paddle.v2.layer as layer -import paddle.v2.attr as attr from paddle.trainer_config_helpers.config_parser_utils import \ parse_network_config as parse_network @@ -95,8 +95,9 @@ class RNNTest(unittest.TestCase): data1 = layer.data( name="word", type=data_type.integer_value(dict_dim)) embd = layer.embedding(input=data1, size=word_dim) - aaa = layer.recurrent_group(name="rnn", step=new_step, input=embd) - return str(layer.parse_network(aaa)) + rnn_layer = layer.recurrent_group( + name="rnn", step=new_step, input=embd) + return str(layer.parse_network(rnn_layer)) diff = difflib.unified_diff(test_old_rnn().splitlines(1), test_new_rnn().splitlines(1)) From e4327a7cd9408839900c0f82b4aedf2ce6672cbd Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 24 Feb 2017 16:11:02 +0800 Subject: [PATCH 05/87] add CMakeLists.txt --- python/paddle/v2/tests/CMakeLists.txt | 4 ++++ python/paddle/v2/tests/layer_test.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 python/paddle/v2/tests/CMakeLists.txt diff --git a/python/paddle/v2/tests/CMakeLists.txt b/python/paddle/v2/tests/CMakeLists.txt new file mode 100644 index 0000000000..dc5efdab6a --- /dev/null +++ b/python/paddle/v2/tests/CMakeLists.txt @@ -0,0 +1,4 @@ +add_test(NAME layer_test + COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/ + ${PYTHON_EXECUTABLE} ${PROJ_ROOT}/python/paddle/v2/tests/layer_test.py + WORKING_DIRECTORY ${PROJ_ROOT}/python/paddle) diff --git a/python/paddle/v2/tests/layer_test.py b/python/paddle/v2/tests/layer_test.py index 2958cbd9eb..83c8c26d6b 100644 --- a/python/paddle/v2/tests/layer_test.py +++ b/python/paddle/v2/tests/layer_test.py @@ -23,7 +23,7 @@ from paddle.trainer_config_helpers.config_parser_utils import \ parse_network_config as parse_network -class CostLyaerTest(unittest.TestCase): +class CostLayerTest(unittest.TestCase): def test_cost_layer(self): pixel = layer.data(name='pixel', type=data_type.dense_vector(784)) label = layer.data(name='label', type=data_type.integer_value(10)) From f13f1f1ce5cfe428c272e90f85dc9a9c1ed55f6b Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 24 Feb 2017 17:37:38 +0800 Subject: [PATCH 06/87] use test_layer instead of layer_test --- python/paddle/v2/tests/test_layer.py | 57 +++++++++++++++++++++++++--- 1 file changed, 51 insertions(+), 6 deletions(-) diff --git a/python/paddle/v2/tests/test_layer.py b/python/paddle/v2/tests/test_layer.py index b600e8cf76..73d769a358 100644 --- a/python/paddle/v2/tests/test_layer.py +++ b/python/paddle/v2/tests/test_layer.py @@ -51,12 +51,57 @@ class CostLayerTest(unittest.TestCase): cost10 = layer.sum_cost(input=inference) cost11 = layer.huber_cost(input=score, label=label) - print dir(layer) - layer.parse_network(cost1, cost2) - print dir(layer) - #print layer.parse_network(cost3, cost4) - #print layer.parse_network(cost5, cost6) - #print layer.parse_network(cost7, cost8, cost9, cost10, cost11) + print layer.parse_network(cost1, cost2) + print layer.parse_network(cost3, cost4) + print layer.parse_network(cost5, cost6) + print layer.parse_network(cost7, cost8, cost9, cost10, cost11) + + +class RNNTest(unittest.TestCase): + def test_simple_rnn(self): + dict_dim = 10 + word_dim = 8 + hidden_dim = 8 + + def test_old_rnn(): + def step(y): + mem = conf_helps.memory(name="rnn_state", size=hidden_dim) + out = conf_helps.fc_layer( + input=[y, mem], + size=hidden_dim, + act=activation.Tanh(), + bias_attr=True, + name="rnn_state") + return out + + def test(): + data1 = conf_helps.data_layer(name="word", size=dict_dim) + embd = conf_helps.embedding_layer(input=data1, size=word_dim) + conf_helps.recurrent_group(name="rnn", step=step, input=embd) + + return str(parse_network(test)) + + def test_new_rnn(): + def new_step(y): + mem = layer.memory(name="rnn_state", size=hidden_dim) + out = layer.fc(input=[mem], + step_input=y, + size=hidden_dim, + act=activation.Tanh(), + bias_attr=True, + name="rnn_state") + return out.to_proto(dict()) + + data1 = layer.data( + name="word", type=data_type.integer_value(dict_dim)) + embd = layer.embedding(input=data1, size=word_dim) + rnn_layer = layer.recurrent_group( + name="rnn", step=new_step, input=embd) + return str(layer.parse_network(rnn_layer)) + + diff = difflib.unified_diff(test_old_rnn().splitlines(1), + test_new_rnn().splitlines(1)) + print ''.join(diff) if __name__ == '__main__': From ad4ab5ac811d90dd2bbb661ad34ba5ee3aa510a1 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sun, 26 Feb 2017 16:29:02 +0800 Subject: [PATCH 07/87] remove step_input in recurrent_group step_input --- .../paddle/trainer_config_helpers/layers.py | 8 ++- python/paddle/v2/layer.py | 61 +++++++++++++++---- python/paddle/v2/tests/test_layer.py | 13 ++-- 3 files changed, 62 insertions(+), 20 deletions(-) diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 00aef80691..4e200517fc 100755 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -3042,7 +3042,8 @@ def recurrent_group(step, reverse=False, name=None, targetInlink=None, - is_generating=False): + is_generating=False, + in_args_converter=None): """ Recurrent layer group is an extremely flexible recurrent unit in PaddlePaddle. As long as the user defines the calculation done within a @@ -3185,7 +3186,10 @@ def recurrent_group(step, assert (is_generating != has_LayerOutput) - layer_outs = step(*in_args) + if in_args_converter is None: + layer_outs = step(*in_args) + else: + layer_outs = step(*in_args_converter(*in_args)).to_proto(dict()) if isinstance(layer_outs, LayerOutput): layer_outs = [layer_outs] diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py index 5ecc96c685..44c7661b24 100644 --- a/python/paddle/v2/layer.py +++ b/python/paddle/v2/layer.py @@ -73,8 +73,6 @@ from paddle.trainer_config_helpers.config_parser_utils import \ parse_network_config as __parse__ from paddle.trainer_config_helpers.default_decorators import wrap_name_default -import activation -import attr import data_type __all__ = [ @@ -101,11 +99,10 @@ def parse_network(*outputs): class Layer(object): - def __init__(self, name, parent_layers, step_input=None): + def __init__(self, name, parent_layers): assert isinstance(parent_layers, dict) assert isinstance(name, basestring) self.name = name - self.step_input = step_input self.__parent_layers__ = parent_layers def to_proto(self, context): @@ -121,12 +118,13 @@ class Layer(object): else: v1_layer = map(lambda x: x.to_proto(context=context), self.__parent_layers__[layer_name]) - if layer_name == "input" and self.step_input is not None: - v1_layer.insert(0, self.step_input) kwargs[layer_name] = v1_layer + if self.name is None: + return self.to_proto_impl(**kwargs) + # memory may have the same name with some layer - if isinstance(self, MemoryV2): + if isinstance(self, MemoryV2) or isinstance(self, LayerOutputV2): return self.to_proto_impl(**kwargs) if self.name not in context: @@ -144,7 +142,7 @@ def __convert_to_v2__(method_name, name_prefix, parent_names): wrapper = None class V2LayerImpl(Layer): - def __init__(self, name=None, step_input=None, **kwargs): + def __init__(self, name=None, **kwargs): parent_layers = dict() other_kwargs = dict() for pname in parent_names: @@ -155,7 +153,7 @@ def __convert_to_v2__(method_name, name_prefix, parent_names): if key not in parent_names: other_kwargs[key] = kwargs[key] - super(V2LayerImpl, self).__init__(name, parent_layers, step_input) + super(V2LayerImpl, self).__init__(name, parent_layers) self.__other_kwargs__ = other_kwargs if wrapper is not None: @@ -214,6 +212,48 @@ class MemoryV2(Layer): return conf_helps.memory(name=self.name, size=self.size, **args) +class LayerOutputV2(Layer): + def __init__(self, layer_output): + assert isinstance(layer_output, conf_helps.LayerOutput) + self.layer_output = layer_output + super(LayerOutputV2, self).__init__( + name=layer_output.name, parent_layers=dict()) + + def to_proto_impl(self): + return self.layer_output + + +class RecurrentGroupV2(Layer): + def __init__(self, name, **kwargs): + self.__parent_names__ = ['input'] + other_kwargs = dict() + parent_layers = dict() + for pname in self.__parent_names__: + if kwargs.has_key(pname): + parent_layers[pname] = kwargs[pname] + for key in kwargs.keys(): + if key not in self.__parent_names__: + other_kwargs[key] = kwargs[key] + self.__kwargs__ = other_kwargs + + super(RecurrentGroupV2, self).__init__( + name=name, parent_layers=parent_layers) + + def to_proto_impl(self, **kwargs): + def in_args_converter(in_args): + if not isinstance(in_args, collections.Sequence): + in_args = [in_args] + return [LayerOutputV2(input) for input in in_args] + + args = dict() + for each in kwargs: + args[each] = kwargs[each] + for each in self.__kwargs__: + args[each] = self.__kwargs__[each] + return conf_helps.recurrent_group( + name=self.name, in_args_converter=in_args_converter, **args) + + data = DataLayerV2 fc = __convert_to_v2__('fc_layer', name_prefix='fc', parent_names=['input']) max_id = __convert_to_v2__( @@ -234,8 +274,7 @@ embedding = __convert_to_v2__( 'embedding_layer', name_prefix='embedding', parent_names=['input']) last_seq = __convert_to_v2__( 'last_seq', name_prefix='last_seq', parent_names=['input']) -recurrent_group = __convert_to_v2__( - 'recurrent_group', name_prefix='recurrent_layer', parent_names=['input']) +recurrent_group = RecurrentGroupV2 memory = MemoryV2 cross_entropy_with_selfnorm_cost = __convert_to_v2__( diff --git a/python/paddle/v2/tests/test_layer.py b/python/paddle/v2/tests/test_layer.py index 73d769a358..04c0fc7cb0 100644 --- a/python/paddle/v2/tests/test_layer.py +++ b/python/paddle/v2/tests/test_layer.py @@ -63,7 +63,7 @@ class RNNTest(unittest.TestCase): word_dim = 8 hidden_dim = 8 - def test_old_rnn(): + def parse_old_rnn(): def step(y): mem = conf_helps.memory(name="rnn_state", size=hidden_dim) out = conf_helps.fc_layer( @@ -81,16 +81,15 @@ class RNNTest(unittest.TestCase): return str(parse_network(test)) - def test_new_rnn(): + def parse_new_rnn(): def new_step(y): mem = layer.memory(name="rnn_state", size=hidden_dim) - out = layer.fc(input=[mem], - step_input=y, + out = layer.fc(input=[y, mem], size=hidden_dim, act=activation.Tanh(), bias_attr=True, name="rnn_state") - return out.to_proto(dict()) + return out data1 = layer.data( name="word", type=data_type.integer_value(dict_dim)) @@ -99,8 +98,8 @@ class RNNTest(unittest.TestCase): name="rnn", step=new_step, input=embd) return str(layer.parse_network(rnn_layer)) - diff = difflib.unified_diff(test_old_rnn().splitlines(1), - test_new_rnn().splitlines(1)) + diff = difflib.unified_diff(parse_old_rnn().splitlines(1), + parse_new_rnn().splitlines(1)) print ''.join(diff) From 632ad5c9e25c906b0189be308ecf22c2409abb2c Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 27 Feb 2017 19:59:38 +0800 Subject: [PATCH 08/87] support sequence_rnn_multi_input --- demo/mnist/api_train_v2.py | 3 - python/paddle/trainer/config_parser.py | 6 +- python/paddle/v2/layer.py | 30 ++++- python/paddle/v2/tests/CMakeLists.txt | 6 +- python/paddle/v2/tests/test_layer.py | 50 -------- python/paddle/v2/tests/test_rnn_layer.py | 143 +++++++++++++++++++++++ 6 files changed, 178 insertions(+), 60 deletions(-) create mode 100644 python/paddle/v2/tests/test_rnn_layer.py diff --git a/demo/mnist/api_train_v2.py b/demo/mnist/api_train_v2.py index 5e66b7399a..6fc01ce58b 100644 --- a/demo/mnist/api_train_v2.py +++ b/demo/mnist/api_train_v2.py @@ -3,9 +3,6 @@ import paddle.v2 as paddle import mnist_util -import pudb -pudb.set_trace() - def train_reader(): train_file = './data/raw_data/train' diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index da937152ee..487d4dfd5b 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -3474,6 +3474,8 @@ def update_g_config(): for name in g_config.model_config.output_layer_names: assert name in g_layer_map, \ 'input name "%s" does not correspond to a layer name' % name + for hook in _parse_config_hooks: + hook() return g_config @@ -3485,8 +3487,8 @@ def parse_config(trainer_config, config_arg_str): passed to config script as a dictionary CONFIG_ARGS ''' init_config_environment() - for hook in _parse_config_hooks: - hook() + # for hook in _parse_config_hooks: + # hook() config_args = {} diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py index 44c7661b24..5328070456 100644 --- a/python/paddle/v2/layer.py +++ b/python/paddle/v2/layer.py @@ -124,11 +124,13 @@ class Layer(object): return self.to_proto_impl(**kwargs) # memory may have the same name with some layer - if isinstance(self, MemoryV2) or isinstance(self, LayerOutputV2): + if isinstance(self, MemoryV2): return self.to_proto_impl(**kwargs) + # store v1 API's layer_output in context with the key of it's name. if self.name not in context: context[self.name] = self.to_proto_impl(**kwargs) + return context[self.name] def to_proto_impl(self, **kwargs): @@ -200,8 +202,19 @@ class MemoryV2(Layer): def __init__(self, name, size, **kwargs): self.name = name self.size = size - self.__kwargs__ = kwargs - super(MemoryV2, self).__init__(name=name, parent_layers=dict()) + + parent_names = ['boot_layer'] + parent_layers = dict() + other_kwargs = dict() + for pname in parent_names: + if kwargs.has_key(pname): + parent_layers[pname] = kwargs[pname] + + for key in kwargs.keys(): + if key not in parent_names: + other_kwargs[key] = kwargs[key] + super(MemoryV2, self).__init__(name=name, parent_layers=parent_layers) + self.__kwargs__ = other_kwargs def to_proto_impl(self, **kwargs): args = dict() @@ -209,10 +222,16 @@ class MemoryV2(Layer): args[each] = kwargs[each] for each in self.__kwargs__: args[each] = self.__kwargs__[each] + return conf_helps.memory(name=self.name, size=self.size, **args) class LayerOutputV2(Layer): + """ + LayerOutputV2 is used to store the result of LayerOutput in v1 api. + It will not store it's parents because layer_output has been parsed already. + """ + def __init__(self, layer_output): assert isinstance(layer_output, conf_helps.LayerOutput) self.layer_output = layer_output @@ -239,8 +258,11 @@ class RecurrentGroupV2(Layer): super(RecurrentGroupV2, self).__init__( name=name, parent_layers=parent_layers) + wrapper = wrap_name_default(name_prefix='recurrent_group') + __init__ = wrapper(__init__) + def to_proto_impl(self, **kwargs): - def in_args_converter(in_args): + def in_args_converter(*in_args): if not isinstance(in_args, collections.Sequence): in_args = [in_args] return [LayerOutputV2(input) for input in in_args] diff --git a/python/paddle/v2/tests/CMakeLists.txt b/python/paddle/v2/tests/CMakeLists.txt index ceb71c1454..bc0f247927 100644 --- a/python/paddle/v2/tests/CMakeLists.txt +++ b/python/paddle/v2/tests/CMakeLists.txt @@ -1,3 +1,7 @@ add_test(NAME test_v2_layer COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/ - ${PYTHON_EXECUTABLE} ${PROJ_ROOT}/python/paddle/v2/tests/test_layer.py + ${PYTHON_EXECUTABLE} ${PROJ_ROOT}/python/paddle/v2/tests/test_layer.py) + +add_test(NAME test_v2_rnn_layer + COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/ + ${PYTHON_EXECUTABLE} ${PROJ_ROOT}/python/paddle/v2/tests/test_rnn_layer.py) diff --git a/python/paddle/v2/tests/test_layer.py b/python/paddle/v2/tests/test_layer.py index 04c0fc7cb0..41d9683464 100644 --- a/python/paddle/v2/tests/test_layer.py +++ b/python/paddle/v2/tests/test_layer.py @@ -11,16 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import difflib import unittest -import paddle.trainer_config_helpers as conf_helps import paddle.v2.activation as activation import paddle.v2.attr as attr import paddle.v2.data_type as data_type import paddle.v2.layer as layer -from paddle.trainer_config_helpers.config_parser_utils import \ - parse_network_config as parse_network pixel = layer.data(name='pixel', type=data_type.dense_vector(784)) label = layer.data(name='label', type=data_type.integer_value(10)) @@ -57,51 +53,5 @@ class CostLayerTest(unittest.TestCase): print layer.parse_network(cost7, cost8, cost9, cost10, cost11) -class RNNTest(unittest.TestCase): - def test_simple_rnn(self): - dict_dim = 10 - word_dim = 8 - hidden_dim = 8 - - def parse_old_rnn(): - def step(y): - mem = conf_helps.memory(name="rnn_state", size=hidden_dim) - out = conf_helps.fc_layer( - input=[y, mem], - size=hidden_dim, - act=activation.Tanh(), - bias_attr=True, - name="rnn_state") - return out - - def test(): - data1 = conf_helps.data_layer(name="word", size=dict_dim) - embd = conf_helps.embedding_layer(input=data1, size=word_dim) - conf_helps.recurrent_group(name="rnn", step=step, input=embd) - - return str(parse_network(test)) - - def parse_new_rnn(): - def new_step(y): - mem = layer.memory(name="rnn_state", size=hidden_dim) - out = layer.fc(input=[y, mem], - size=hidden_dim, - act=activation.Tanh(), - bias_attr=True, - name="rnn_state") - return out - - data1 = layer.data( - name="word", type=data_type.integer_value(dict_dim)) - embd = layer.embedding(input=data1, size=word_dim) - rnn_layer = layer.recurrent_group( - name="rnn", step=new_step, input=embd) - return str(layer.parse_network(rnn_layer)) - - diff = difflib.unified_diff(parse_old_rnn().splitlines(1), - parse_new_rnn().splitlines(1)) - print ''.join(diff) - - if __name__ == '__main__': unittest.main() diff --git a/python/paddle/v2/tests/test_rnn_layer.py b/python/paddle/v2/tests/test_rnn_layer.py new file mode 100644 index 0000000000..bf2c4db61a --- /dev/null +++ b/python/paddle/v2/tests/test_rnn_layer.py @@ -0,0 +1,143 @@ +# Copyright PaddlePaddle contributors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import difflib +import unittest + +import paddle.trainer_config_helpers as conf_helps +import paddle.v2.activation as activation +import paddle.v2.data_type as data_type +import paddle.v2.layer as layer +from paddle.trainer_config_helpers.config_parser_utils import \ + parse_network_config as parse_network + + +class RNNTest(unittest.TestCase): + def test_simple_rnn(self): + dict_dim = 10 + word_dim = 8 + hidden_dim = 8 + + def parse_old_rnn(): + def step(y): + mem = conf_helps.memory(name="rnn_state", size=hidden_dim) + out = conf_helps.fc_layer( + input=[y, mem], + size=hidden_dim, + act=activation.Tanh(), + bias_attr=True, + name="rnn_state") + return out + + def test(): + data = conf_helps.data_layer(name="word", size=dict_dim) + embd = conf_helps.embedding_layer(input=data, size=word_dim) + conf_helps.recurrent_group(name="rnn", step=step, input=embd) + + return str(parse_network(test)) + + def parse_new_rnn(): + def new_step(y): + mem = layer.memory(name="rnn_state", size=hidden_dim) + out = layer.fc(input=[y, mem], + size=hidden_dim, + act=activation.Tanh(), + bias_attr=True, + name="rnn_state") + return out + + data = layer.data( + name="word", type=data_type.integer_value(dict_dim)) + embd = layer.embedding(input=data, size=word_dim) + rnn_layer = layer.recurrent_group( + name="rnn", step=new_step, input=embd) + return str(layer.parse_network(rnn_layer)) + + diff = difflib.unified_diff(parse_old_rnn().splitlines(1), + parse_new_rnn().splitlines(1)) + print ''.join(diff) + + def test_sequence_rnn_multi_input(self): + dict_dim = 10 + word_dim = 8 + hidden_dim = 8 + label_dim = 3 + + def parse_old_rnn(): + def step(y, wid): + z = conf_helps.embedding_layer(input=wid, size=word_dim) + mem = conf_helps.memory(name="rnn_state", size=hidden_dim) + out = conf_helps.fc_layer( + input=[y, z, mem], + size=hidden_dim, + act=conf_helps.TanhActivation(), + bias_attr=True, + name="rnn_state") + return out + + def test(): + data = conf_helps.data_layer(name="word", size=dict_dim) + label = conf_helps.data_layer(name="label", size=label_dim) + emb = conf_helps.embedding_layer(input=data, size=word_dim) + out = conf_helps.recurrent_group( + name="rnn", step=step, input=[emb, data]) + + rep = conf_helps.last_seq(input=out) + prob = conf_helps.fc_layer( + size=label_dim, + input=rep, + act=conf_helps.SoftmaxActivation(), + bias_attr=True) + + conf_helps.outputs( + conf_helps.classification_cost( + input=prob, label=label)) + + return str(parse_network(test)) + + def parse_new_rnn(): + def step(y, wid): + z = layer.embedding(input=wid, size=word_dim) + mem = layer.memory(name="rnn_state", size=hidden_dim) + out = layer.fc(input=[y, z, mem], + size=hidden_dim, + act=activation.Tanh(), + bias_attr=True, + name="rnn_state") + return out + + data = layer.data( + name="word", type=data_type.dense_vector(dict_dim)) + label = layer.data( + name="label", type=data_type.dense_vector(label_dim)) + emb = layer.embedding(input=data, size=word_dim) + out = layer.recurrent_group( + name="rnn", step=step, input=[emb, data]) + + rep = layer.last_seq(input=out) + prob = layer.fc(size=label_dim, + input=rep, + act=activation.Softmax(), + bias_attr=True) + + cost = layer.classification_cost(input=prob, label=label) + + return str(layer.parse_network(cost)) + + diff = difflib.unified_diff(parse_old_rnn().splitlines(1), + parse_new_rnn().splitlines(1)) + print ''.join(diff) + + +if __name__ == '__main__': + unittest.main() From 876d597495015ba416639af7426258d32587986e Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 27 Feb 2017 20:41:00 +0800 Subject: [PATCH 09/87] memory have same name with some layer --- python/paddle/v2/layer.py | 2 ++ python/paddle/v2/tests/CMakeLists.txt | 6 +++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py index 6dea3afbcf..dbd802bee8 100644 --- a/python/paddle/v2/layer.py +++ b/python/paddle/v2/layer.py @@ -139,6 +139,8 @@ class Layer(object): if self.name is None: return self.to_proto_impl(**kwargs) + elif isinstance(self, MemoryV2): + return self.to_proto_impl(**kwargs) elif self.name not in context: context[self.name] = self.to_proto_impl(**kwargs) diff --git a/python/paddle/v2/tests/CMakeLists.txt b/python/paddle/v2/tests/CMakeLists.txt index bc0f247927..b2f43c42de 100644 --- a/python/paddle/v2/tests/CMakeLists.txt +++ b/python/paddle/v2/tests/CMakeLists.txt @@ -1,7 +1,11 @@ add_test(NAME test_v2_layer COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/ - ${PYTHON_EXECUTABLE} ${PROJ_ROOT}/python/paddle/v2/tests/test_layer.py) + ${PYTHON_EXECUTABLE} ${PROJ_ROOT}/python/paddle/v2/tests/test_layer.py + WORKING_DIRECTORY ${PROJ_ROOT}/python/paddle) add_test(NAME test_v2_rnn_layer COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/ ${PYTHON_EXECUTABLE} ${PROJ_ROOT}/python/paddle/v2/tests/test_rnn_layer.py) + +add_test(NAME test_v2_api + COMMAND bash ${PROJ_ROOT}/python/paddle/v2/tests/run_tests.sh ${PYTHON_EXECUTABLE}) From 9ccc94f4a4d5bd87793730be1a73888c09a55cb3 Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Mon, 27 Feb 2017 19:56:33 +0800 Subject: [PATCH 10/87] srl api training --- demo/semantic_role_labeling/api_train_v2.py | 112 ++++++++++++++++++++ demo/semantic_role_labeling/model_v2.py | 103 ++++++++++++++++++ 2 files changed, 215 insertions(+) create mode 100644 demo/semantic_role_labeling/api_train_v2.py create mode 100644 demo/semantic_role_labeling/model_v2.py diff --git a/demo/semantic_role_labeling/api_train_v2.py b/demo/semantic_role_labeling/api_train_v2.py new file mode 100644 index 0000000000..33b966cca5 --- /dev/null +++ b/demo/semantic_role_labeling/api_train_v2.py @@ -0,0 +1,112 @@ +import numpy +import paddle.v2 as paddle +from paddle.trainer_config_helpers.atts import ParamAttr + +from mode_v2 import db_lstm + +word_dict_file = './data/wordDict.txt' +label_dict_file = './data/targetDict.txt' +predicate_file = './data/verbDict.txt' + +word_dict = dict() +label_dict = dict() +predicate_dict = dict() + +with open(word_dict_file, 'r') as f_word, \ + open(label_dict_file, 'r') as f_label, \ + open(predicate_file, 'r') as f_pre: + for i, line in enumerate(f_word): + w = line.strip() + word_dict[w] = i + + for i, line in enumerate(f_label): + w = line.strip() + label_dict[w] = i + + for i, line in enumerate(f_pre): + w = line.strip() + predicate_dict[w] = i + +word_dict_len = len(word_dict) +label_dict_len = len(label_dict) +pred_len = len(predicate_dict) + + +def train_reader(file_name="data/feature"): + def reader(): + with open(file_name, 'r') as fdata: + for line in fdata: + sentence, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, label = \ + line.strip().split('\t') + + words = sentence.split() + sen_len = len(words) + word_slot = [word_dict.get(w, UNK_IDX) for w in words] + + predicate_slot = [predicate_dict.get(predicate)] * sen_len + ctx_n2_slot = [word_dict.get(ctx_n2, UNK_IDX)] * sen_len + ctx_n1_slot = [word_dict.get(ctx_n1, UNK_IDX)] * sen_len + ctx_0_slot = [word_dict.get(ctx_0, UNK_IDX)] * sen_len + ctx_p1_slot = [word_dict.get(ctx_p1, UNK_IDX)] * sen_len + ctx_p2_slot = [word_dict.get(ctx_p2, UNK_IDX)] * sen_len + + marks = mark.split() + mark_slot = [int(w) for w in marks] + + label_list = label.split() + label_slot = [label_dict.get(w) for w in label_list] + yield word_slot, ctx_n2_slot, ctx_n1_slot, \ + ctx_0_slot, ctx_p1_slot, ctx_p2_slot, predicate_slot, mark_slot, label_slot + + return reader + + +def main(): + paddle.init(use_gpu=False, trainer_count=1) + + label_dict_len = 500 + # define network topology + output = db_lstm() + target = paddle.layer.data(name='target', size=label_dict_len) + crf_cost = paddle.layer.crf_layer( + size=500, + input=output, + label=target, + param_attr=paddle.attr.Param( + name='crfw', initial_std=default_std, learning_rate=mix_hidden_lr)) + + crf_dec = paddle.layer.crf_decoding_layer( + name='crf_dec_l', + size=label_dict_len, + input=output, + label=target, + param_attr=paddle.attr.Param(name='crfw')) + + topo = [crf_cost, crf_dec] + parameters = paddle.parameters.create(topo) + optimizer = paddle.optimizer.Momentum(momentum=0.01, learning_rate=2e-2) + + def event_handler(event): + if isinstance(event, paddle.event.EndIteration): + para = parameters.get('___fc_2__.w0') + print "Pass %d, Batch %d, Cost %f" % (event.pass_id, event.batch_id, + event.cost, para.mean()) + + else: + pass + + trainer = paddle.trainer.SGD(update_equation=optimizer) + + trainer.train( + train_data_reader=train_reader, + batch_size=32, + topology=topo, + parameters=parameters, + event_handler=event_handler, + num_passes=10000, + data_types=[], + reader_dict={}) + + +if __name__ == '__main__': + main() diff --git a/demo/semantic_role_labeling/model_v2.py b/demo/semantic_role_labeling/model_v2.py new file mode 100644 index 0000000000..d4d011770d --- /dev/null +++ b/demo/semantic_role_labeling/model_v2.py @@ -0,0 +1,103 @@ +import paddle.v2 as paddle + + +def db_lstm(word_dict_len, label_dict_len, pred_len): + mark_dict_len = 2 + word_dim = 32 + mark_dim = 5 + hidden_dim = 512 + depth = 8 + + #8 features + word = paddle.layer.data(name='word_data', size=word_dict_len) + predicate = paddle.layer.data(name='verb_data', size=pred_len) + + ctx_n2 = paddle.layer.data(name='ctx_n2_data', size=word_dict_len) + ctx_n1 = paddle.layer.data(name='ctx_n1_data', size=word_dict_len) + ctx_0 = paddle.layer.data(name='ctx_0_data', size=word_dict_len) + ctx_p1 = paddle.layer.data(name='ctx_p1_data', size=word_dict_len) + ctx_p2 = paddle.layer.data(name='ctx_p2_data', size=word_dict_len) + mark = paddle.layer.data(name='mark_data', size=mark_dict_len) + + default_std = 1 / math.sqrt(hidden_dim) / 3.0 + + emb_para = paddle.attr.Param(name='emb', initial_std=0., learning_rate=0.) + std_0 = paddle.attr.Param(initial_std=0.) + std_default = paddle.attr.Param(initial_std=default_std) + + predicate_embedding = paddle.layer.embeding( + size=word_dim, + input=predicate, + param_attr=paddle.attr.Param( + name='vemb', initial_std=default_std)) + mark_embedding = paddle.layer.embeding( + name='word_ctx-in_embedding', + size=mark_dim, + input=mark, + param_attr=std_0) + + word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] + emb_layers = [ + paddle.layer.embeding( + size=word_dim, input=x, param_attr=emb_para) for x in word_input + ] + emb_layers.append(predicate_embedding) + emb_layers.append(mark_embedding) + + hidden_0 = paddle.layer.mixed( + size=hidden_dim, + bias_attr=std_default, + input=[ + paddle.layer.full_matrix_projection( + input=emb, param_attr=std_default) for emb in emb_layers + ]) + + mix_hidden_lr = 1e-3 + lstm_para_attr = paddle.attr.Param(initial_std=0.0, learning_rate=1.0) + hidden_para_attr = paddle.attr.Param( + initial_std=default_std, learning_rate=mix_hidden_lr) + + lstm_0 = paddle.layer.lstmemory( + input=hidden_0, + act=paddle.activation.Relu(), + gate_act=paddle.activation.Sigmoid(), + state_act=paddle.activation.Sigmoid(), + bias_attr=std_0, + param_attr=lstm_para_attr) + + #stack L-LSTM and R-LSTM with direct edges + input_tmp = [hidden_0, lstm_0] + + for i in range(1, depth): + mix_hidden = paddle.layer.mixed( + size=hidden_dim, + bias_attr=std_default, + input=[ + paddle.layer.full_matrix_projection( + input=input_tmp[0], param_attr=hidden_para_attr), + paddle.layer.full_matrix_projection( + input=input_tmp[1], param_attr=lstm_para_attr) + ]) + + lstm = paddle.layer.lstmemory( + input=mix_hidden, + act=paddle.activation.Relu(), + gate_act=paddle.activation.Sigmoid(), + state_act=paddle.activation.Sigmoid(), + reverse=((i % 2) == 1), + bias_attr=std_0, + param_attr=lstm_para_attr) + + input_tmp = [mix_hidden, lstm] + + feature_out = paddle.layer.mixed( + size=label_dict_len, + bias_attr=std_default, + input=[ + paddle.layer.full_matrix_projection( + input=input_tmp[0], param_attr=hidden_para_attr), + paddle.layer.full_matrix_projection( + input=input_tmp[1], param_attr=lstm_para_attr) + ], ) + + return feature_out From da754d85de3ffcf850ad2b375d8922110c7279e1 Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Tue, 28 Feb 2017 14:55:16 +0800 Subject: [PATCH 11/87] srl api training --- demo/semantic_role_labeling/api_train_v2.py | 21 +++++++++++------ demo/semantic_role_labeling/model_v2.py | 25 +++++++++++---------- 2 files changed, 27 insertions(+), 19 deletions(-) diff --git a/demo/semantic_role_labeling/api_train_v2.py b/demo/semantic_role_labeling/api_train_v2.py index 33b966cca5..daaf0f0582 100644 --- a/demo/semantic_role_labeling/api_train_v2.py +++ b/demo/semantic_role_labeling/api_train_v2.py @@ -1,8 +1,6 @@ import numpy import paddle.v2 as paddle -from paddle.trainer_config_helpers.atts import ParamAttr - -from mode_v2 import db_lstm +from model_v2 import db_lstm word_dict_file = './data/wordDict.txt' label_dict_file = './data/targetDict.txt' @@ -64,9 +62,8 @@ def train_reader(file_name="data/feature"): def main(): paddle.init(use_gpu=False, trainer_count=1) - label_dict_len = 500 # define network topology - output = db_lstm() + output = db_lstm(word_dict_len, label_dict_len, pred_len) target = paddle.layer.data(name='target', size=label_dict_len) crf_cost = paddle.layer.crf_layer( size=500, @@ -97,6 +94,17 @@ def main(): trainer = paddle.trainer.SGD(update_equation=optimizer) + reader_dict = { + 'word_data': 0, + 'verb_data': 1, + 'ctx_n2_data': 2, + 'ctx_n1_data': 3, + 'ctx_0_data': 4, + 'ctx_p1_data': 5, + 'ctx_p2_data': 6, + 'mark_data': 7, + 'target': 8 + } trainer.train( train_data_reader=train_reader, batch_size=32, @@ -104,8 +112,7 @@ def main(): parameters=parameters, event_handler=event_handler, num_passes=10000, - data_types=[], - reader_dict={}) + reader_dict=reader_dict) if __name__ == '__main__': diff --git a/demo/semantic_role_labeling/model_v2.py b/demo/semantic_role_labeling/model_v2.py index d4d011770d..a78190a2b2 100644 --- a/demo/semantic_role_labeling/model_v2.py +++ b/demo/semantic_role_labeling/model_v2.py @@ -1,3 +1,4 @@ +import math import paddle.v2 as paddle @@ -9,15 +10,18 @@ def db_lstm(word_dict_len, label_dict_len, pred_len): depth = 8 #8 features - word = paddle.layer.data(name='word_data', size=word_dict_len) - predicate = paddle.layer.data(name='verb_data', size=pred_len) + def d_type(size): + return paddle.data_type.integer_value_sequence(size) - ctx_n2 = paddle.layer.data(name='ctx_n2_data', size=word_dict_len) - ctx_n1 = paddle.layer.data(name='ctx_n1_data', size=word_dict_len) - ctx_0 = paddle.layer.data(name='ctx_0_data', size=word_dict_len) - ctx_p1 = paddle.layer.data(name='ctx_p1_data', size=word_dict_len) - ctx_p2 = paddle.layer.data(name='ctx_p2_data', size=word_dict_len) - mark = paddle.layer.data(name='mark_data', size=mark_dict_len) + word = paddle.layer.data(name='word_data', type=d_type(word_dict_len)) + predicate = paddle.layer.data(name='verb_data', type=d_type(pred_len)) + + ctx_n2 = paddle.layer.data(name='ctx_n2_data', type=d_type(word_dict_len)) + ctx_n1 = paddle.layer.data(name='ctx_n1_data', type=d_type(word_dict_len)) + ctx_0 = paddle.layer.data(name='ctx_0_data', type=d_type(word_dict_len)) + ctx_p1 = paddle.layer.data(name='ctx_p1_data', type=d_type(word_dict_len)) + ctx_p2 = paddle.layer.data(name='ctx_p2_data', type=d_type(word_dict_len)) + mark = paddle.layer.data(name='mark_data', type=d_type(mark_dict_len)) default_std = 1 / math.sqrt(hidden_dim) / 3.0 @@ -31,10 +35,7 @@ def db_lstm(word_dict_len, label_dict_len, pred_len): param_attr=paddle.attr.Param( name='vemb', initial_std=default_std)) mark_embedding = paddle.layer.embeding( - name='word_ctx-in_embedding', - size=mark_dim, - input=mark, - param_attr=std_0) + size=mark_dim, input=mark, param_attr=std_0) word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] emb_layers = [ From b6a0f9a32a4a3e35f9d8ffa4728c69fada5fe5ed Mon Sep 17 00:00:00 2001 From: liaogang Date: Tue, 28 Feb 2017 18:00:34 +0800 Subject: [PATCH 12/87] Add vgg training via api v2 --- demo/image_classification/train_v2_vgg.py | 85 +++++++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 demo/image_classification/train_v2_vgg.py diff --git a/demo/image_classification/train_v2_vgg.py b/demo/image_classification/train_v2_vgg.py new file mode 100644 index 0000000000..33b53b27da --- /dev/null +++ b/demo/image_classification/train_v2_vgg.py @@ -0,0 +1,85 @@ +import paddle.v2 as paddle + + +def event_handler(event): + if isinstance(event, paddle.event.EndIteration): + if event.batch_id % 100 == 0: + print "Pass %d, Batch %d, Cost %f" % (event.pass_id, event.batch_id, + event.cost) + else: + pass + + +def vgg_bn_drop(input): + def conv_block(ipt, num_filter, groups, dropouts, num_channels=None): + return paddle.layer.img_conv_group( + input=ipt, + num_channels=num_channels, + pool_size=2, + pool_stride=2, + conv_num_filter=[num_filter] * groups, + conv_filter_size=3, + conv_act=paddle.activation.Relu(), + conv_with_batchnorm=True, + conv_batchnorm_drop_rate=dropouts, + pool_type=pooling.Max()) + + conv1 = conv_block(input, 64, 2, [0.3, 0], 3) + conv2 = conv_block(conv1, 128, 2, [0.4, 0]) + conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0]) + conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0]) + conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0]) + + drop = paddle.layer.dropout(input=conv5, dropout_rate=0.5) + fc1 = paddle.layer.fc(input=drop, size=512, act=paddle.activation.Linear()) + bn = paddle.layer.batch_norm( + input=fc1, + act=paddle.activation.Relu(), + layer_attr=ExtraAttr(drop_rate=0.5)) + fc2 = paddle.layer.fc(input=bn, size=512, act=paddle.activation.Linear()) + return fc2 + + +def main(): + datadim = 3 * 32 * 32 + classdim = 10 + + paddle.init(use_gpu=False, trainer_count=1) + + image = paddle.layer.data( + name="image", type=paddle.data_type.dense_vector(datadim)) + # net = vgg_bn_drop(image) + out = paddle.layer.fc(input=image, + size=classdim, + act=paddle.activation.Softmax()) + + lbl = paddle.layer.data( + name="label", type=paddle.data_type.integer_value(classdim)) + cost = paddle.layer.classification_cost(input=out, label=lbl) + + parameters = paddle.parameters.create(cost) + momentum_optimizer = paddle.optimizer.Momentum( + momentum=0.9, + regularization=paddle.optimizer.L2Regularization(rate=0.0005 * 128), + learning_rate=0.1 / 128.0, + learning_rate_decay_a=0.1, + learning_rate_decay_b=50000 * 100, + learning_rate_schedule='discexp', + batch_size=128) + + trainer = paddle.trainer.SGD(update_equation=momentum_optimizer) + trainer.train( + reader=paddle.reader.batched( + paddle.reader.shuffle( + paddle.dataset.cifar.train10(), buf_size=3072), + batch_size=128), + cost=cost, + num_passes=1, + parameters=parameters, + event_handler=event_handler, + reader_dict={'image': 0, + 'label': 1}, ) + + +if __name__ == '__main__': + main() From 7ad8363036af9d8ae91e6698ff09804023602bdf Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Tue, 28 Feb 2017 19:44:56 +0800 Subject: [PATCH 13/87] support boot_layer --- .../paddle/trainer_config_helpers/layers.py | 6 ++- python/paddle/v2/layer.py | 37 ++++++++++++++++--- python/paddle/v2/tests/CMakeLists.txt | 1 - 3 files changed, 37 insertions(+), 7 deletions(-) diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index a8b536dda4..4f7a2735e2 100755 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -3110,7 +3110,8 @@ def recurrent_group(step, name=None, targetInlink=None, is_generating=False, - in_args_converter=None): + in_args_converter=None, + boot_layer=None): """ Recurrent layer group is an extremely flexible recurrent unit in PaddlePaddle. As long as the user defines the calculation done within a @@ -3256,6 +3257,9 @@ def recurrent_group(step, if in_args_converter is None: layer_outs = step(*in_args) else: + # append boot_layer to the last of in_args + if boot_layer is not None: + in_args.append(boot_layer) layer_outs = step(*in_args_converter(*in_args)).to_proto(dict()) if isinstance(layer_outs, LayerOutput): diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py index 1155eca9c8..542d5a515c 100644 --- a/python/paddle/v2/layer.py +++ b/python/paddle/v2/layer.py @@ -140,10 +140,13 @@ class Layer(object): if self.name is None: return self.to_proto_impl(**kwargs) elif isinstance(self, MemoryV2): - return self.to_proto_impl(**kwargs) - elif self.name not in context: - context[self.name] = self.to_proto_impl(**kwargs) + name = self.name + "#__memory__" + if name not in context: + context[name] = self.to_proto_impl(**kwargs) + return context[name] + if self.name not in context: + context[self.name] = self.to_proto_impl(**kwargs) return context[self.name] def to_proto_impl(self, **kwargs): @@ -256,9 +259,32 @@ class LayerOutputV2(Layer): return self.layer_output +class StaticInputV2(Layer): + def __init__(self, **kwargs): + self.__parent_names__ = ['input'] + other_kwargs = dict() + parent_layers = dict() + for pname in self.__parent_names__: + if kwargs.has_key(pname): + parent_layers[pname] = kwargs[pname] + for key in kwargs.keys(): + if key not in self.__parent_names__: + other_kwargs[key] = kwargs[key] + self.__kwargs__ = other_kwargs + super(StaticInputV2, self).__init__(parent_layers=parent_layers) + + def to_proto_impl(self, **kwargs): + args = dict() + for each in kwargs: + args[each] = kwargs[each] + for each in self.__kwargs__: + args[each] = self.__kwargs__[each] + return conf_helps.StaticInput(**args) + + class RecurrentGroupV2(Layer): def __init__(self, name, **kwargs): - self.__parent_names__ = ['input'] + self.__parent_names__ = ['input', 'boot_layer'] other_kwargs = dict() parent_layers = dict() for pname in self.__parent_names__: @@ -443,7 +469,8 @@ layer_list = [ ['nce', 'nce_layer', ['input', 'label']], ['hsigmoid', 'hsigmoid', ['input', 'label']], # check layers - ['eos', 'eos_layer', ['input']] + ['eos', 'eos_layer', ['input']], + ['gru_step_layer', 'gru_step_layer', ['input', 'output_mem']] ] for l in layer_list: globals()[l[0]] = __convert_to_v2__(l[1], l[2]) diff --git a/python/paddle/v2/tests/CMakeLists.txt b/python/paddle/v2/tests/CMakeLists.txt index 948cebdf72..572deaff35 100644 --- a/python/paddle/v2/tests/CMakeLists.txt +++ b/python/paddle/v2/tests/CMakeLists.txt @@ -10,7 +10,6 @@ add_test(NAME test_v2_rnn_layer COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/ ${PYTHON_EXECUTABLE} ${PROJ_ROOT}/python/paddle/v2/tests/test_rnn_layer.py) - add_test(NAME test_topology COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/ ${PYTHON_EXECUTABLE} ${PROJ_ROOT}/python/paddle/v2/tests/test_topology.py From e6e8bfb44ef70320bcf1cca1abeebd6ff58281b4 Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Tue, 28 Feb 2017 19:48:22 +0800 Subject: [PATCH 14/87] update --- demo/semantic_role_labeling/api_train_v2.py | 55 ++++++++++----------- demo/semantic_role_labeling/model_v2.py | 25 ++++++++-- 2 files changed, 46 insertions(+), 34 deletions(-) diff --git a/demo/semantic_role_labeling/api_train_v2.py b/demo/semantic_role_labeling/api_train_v2.py index daaf0f0582..0317c818db 100644 --- a/demo/semantic_role_labeling/api_train_v2.py +++ b/demo/semantic_role_labeling/api_train_v2.py @@ -2,6 +2,8 @@ import numpy import paddle.v2 as paddle from model_v2 import db_lstm +UNK_IDX = 0 + word_dict_file = './data/wordDict.txt' label_dict_file = './data/targetDict.txt' predicate_file = './data/verbDict.txt' @@ -29,6 +31,10 @@ word_dict_len = len(word_dict) label_dict_len = len(label_dict) pred_len = len(predicate_dict) +print 'word_dict_len=%d' % word_dict_len +print 'label_dict_len=%d' % label_dict_len +print 'pred_len=%d' % pred_len + def train_reader(file_name="data/feature"): def reader(): @@ -63,31 +69,16 @@ def main(): paddle.init(use_gpu=False, trainer_count=1) # define network topology - output = db_lstm(word_dict_len, label_dict_len, pred_len) - target = paddle.layer.data(name='target', size=label_dict_len) - crf_cost = paddle.layer.crf_layer( - size=500, - input=output, - label=target, - param_attr=paddle.attr.Param( - name='crfw', initial_std=default_std, learning_rate=mix_hidden_lr)) - - crf_dec = paddle.layer.crf_decoding_layer( - name='crf_dec_l', - size=label_dict_len, - input=output, - label=target, - param_attr=paddle.attr.Param(name='crfw')) - - topo = [crf_cost, crf_dec] - parameters = paddle.parameters.create(topo) + crf_cost, crf_dec = db_lstm(word_dict_len, label_dict_len, pred_len) + + #parameters = paddle.parameters.create([crf_cost, crf_dec]) + parameters = paddle.parameters.create(crf_cost) optimizer = paddle.optimizer.Momentum(momentum=0.01, learning_rate=2e-2) def event_handler(event): if isinstance(event, paddle.event.EndIteration): - para = parameters.get('___fc_2__.w0') print "Pass %d, Batch %d, Cost %f" % (event.pass_id, event.batch_id, - event.cost, para.mean()) + event.cost) else: pass @@ -96,23 +87,27 @@ def main(): reader_dict = { 'word_data': 0, - 'verb_data': 1, - 'ctx_n2_data': 2, - 'ctx_n1_data': 3, - 'ctx_0_data': 4, - 'ctx_p1_data': 5, - 'ctx_p2_data': 6, + 'ctx_n2_data': 1, + 'ctx_n1_data': 2, + 'ctx_0_data': 3, + 'ctx_p1_data': 4, + 'ctx_p2_data': 5, + 'verb_data': 6, 'mark_data': 7, - 'target': 8 + 'target': 8, } + #trn_reader = paddle.reader.batched( + # paddle.reader.shuffle( + # train_reader(), buf_size=8192), batch_size=2) + trn_reader = paddle.reader.batched(train_reader(), batch_size=1) trainer.train( - train_data_reader=train_reader, - batch_size=32, - topology=topo, + reader=trn_reader, + cost=crf_cost, parameters=parameters, event_handler=event_handler, num_passes=10000, reader_dict=reader_dict) + #cost=[crf_cost, crf_dec], if __name__ == '__main__': diff --git a/demo/semantic_role_labeling/model_v2.py b/demo/semantic_role_labeling/model_v2.py index a78190a2b2..cec58e52c7 100644 --- a/demo/semantic_role_labeling/model_v2.py +++ b/demo/semantic_role_labeling/model_v2.py @@ -23,23 +23,25 @@ def db_lstm(word_dict_len, label_dict_len, pred_len): ctx_p2 = paddle.layer.data(name='ctx_p2_data', type=d_type(word_dict_len)) mark = paddle.layer.data(name='mark_data', type=d_type(mark_dict_len)) + target = paddle.layer.data(name='target', type=d_type(label_dict_len)) + default_std = 1 / math.sqrt(hidden_dim) / 3.0 emb_para = paddle.attr.Param(name='emb', initial_std=0., learning_rate=0.) std_0 = paddle.attr.Param(initial_std=0.) std_default = paddle.attr.Param(initial_std=default_std) - predicate_embedding = paddle.layer.embeding( + predicate_embedding = paddle.layer.embedding( size=word_dim, input=predicate, param_attr=paddle.attr.Param( name='vemb', initial_std=default_std)) - mark_embedding = paddle.layer.embeding( + mark_embedding = paddle.layer.embedding( size=mark_dim, input=mark, param_attr=std_0) word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] emb_layers = [ - paddle.layer.embeding( + paddle.layer.embedding( size=word_dim, input=x, param_attr=emb_para) for x in word_input ] emb_layers.append(predicate_embedding) @@ -101,4 +103,19 @@ def db_lstm(word_dict_len, label_dict_len, pred_len): input=input_tmp[1], param_attr=lstm_para_attr) ], ) - return feature_out + crf_cost = paddle.layer.crf(size=label_dict_len, + input=feature_out, + label=target, + param_attr=paddle.attr.Param( + name='crfw', + initial_std=default_std, + learning_rate=mix_hidden_lr)) + + crf_dec = paddle.layer.crf_decoding( + name='crf_dec_l', + size=label_dict_len, + input=feature_out, + label=target, + param_attr=paddle.attr.Param(name='crfw')) + + return crf_cost, crf_dec From f7ecd312c5a56c48eeafd63fb168f106ad973e66 Mon Sep 17 00:00:00 2001 From: liaogang Date: Wed, 1 Mar 2017 12:49:35 +0800 Subject: [PATCH 15/87] update event handler --- demo/image_classification/train_v2_vgg.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/demo/image_classification/train_v2_vgg.py b/demo/image_classification/train_v2_vgg.py index 33b53b27da..25bfd798eb 100644 --- a/demo/image_classification/train_v2_vgg.py +++ b/demo/image_classification/train_v2_vgg.py @@ -6,8 +6,6 @@ def event_handler(event): if event.batch_id % 100 == 0: print "Pass %d, Batch %d, Cost %f" % (event.pass_id, event.batch_id, event.cost) - else: - pass def vgg_bn_drop(input): From d227f4479e5d9b58c45059871c5cd4e221b1a05f Mon Sep 17 00:00:00 2001 From: liaogang Date: Wed, 1 Mar 2017 15:06:24 +0800 Subject: [PATCH 16/87] Add resnet --- demo/image_classification/train_v2_resnet.py | 158 +++++++++++++++++++ demo/image_classification/train_v2_vgg.py | 16 +- 2 files changed, 173 insertions(+), 1 deletion(-) create mode 100644 demo/image_classification/train_v2_resnet.py diff --git a/demo/image_classification/train_v2_resnet.py b/demo/image_classification/train_v2_resnet.py new file mode 100644 index 0000000000..fdfa87cd87 --- /dev/null +++ b/demo/image_classification/train_v2_resnet.py @@ -0,0 +1,158 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.v2 as paddle + + +def event_handler(event): + if isinstance(event, paddle.event.EndIteration): + if event.batch_id % 100 == 0: + print "Pass %d, Batch %d, Cost %f" % (event.pass_id, event.batch_id, + event.cost) + + +def conv_bn_layer(input, + ch_out, + filter_size, + stride, + padding, + active_type=paddle.activation.Relu(), + ch_in=None): + tmp = paddle.layer.img_conv( + input=input, + filter_size=filter_size, + num_channels=ch_in, + num_filters=ch_out, + stride=stride, + padding=padding, + act=paddle.activation.Linear(), + bias_attr=False) + return paddle.layer.batch_norm(input=tmp, act=active_type) + + +def shortcut(ipt, n_in, n_out, stride): + if n_in != n_out: + print("n_in != n_out") + return conv_bn_layer(ipt, n_out, 1, stride, 0, + paddle.activation.Linear()) + else: + return ipt + + +def basicblock(ipt, ch_out, stride): + ch_in = ipt.num_filters + tmp = conv_bn_layer(ipt, ch_out, 3, stride, 1) + tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, paddle.activation.Linear()) + short = shortcut(ipt, ch_in, ch_out, stride) + return paddle.layer.addto(input=[tmp, short], act=paddle.activation.Relu()) + + +def bottleneck(ipt, ch_out, stride): + ch_in = ipt.num_filter + tmp = conv_bn_layer(ipt, ch_out, 1, stride, 0) + tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1) + tmp = conv_bn_layer(tmp, ch_out * 4, 1, 1, 0, paddle.activation.Linear()) + short = shortcut(ipt, ch_in, ch_out * 4, stride) + return paddle.layer.addto(input=[tmp, short], act=paddle.activation.Relu()) + + +def layer_warp(block_func, ipt, features, count, stride): + tmp = block_func(ipt, features, stride) + for i in range(1, count): + tmp = block_func(tmp, features, 1) + return tmp + + +def resnet_imagenet(ipt, depth=50): + cfg = { + 18: ([2, 2, 2, 1], basicblock), + 34: ([3, 4, 6, 3], basicblock), + 50: ([3, 4, 6, 3], bottleneck), + 101: ([3, 4, 23, 3], bottleneck), + 152: ([3, 8, 36, 3], bottleneck) + } + stages, block_func = cfg[depth] + tmp = conv_bn_layer( + ipt, ch_in=3, ch_out=64, filter_size=7, stride=2, padding=3) + tmp = paddle.layer.img_pool(input=tmp, pool_size=3, stride=2) + tmp = layer_warp(block_func, tmp, 64, stages[0], 1) + tmp = layer_warp(block_func, tmp, 128, stages[1], 2) + tmp = layer_warp(block_func, tmp, 256, stages[2], 2) + tmp = layer_warp(block_func, tmp, 512, stages[3], 2) + tmp = paddle.layer.img_pool( + input=tmp, pool_size=7, stride=1, pool_type=paddle.pooling.Avg()) + + tmp = paddle.layer.fc(input=tmp, size=1000, act=paddle.activation.Softmax()) + return tmp + + +def resnet_cifar10(ipt, depth=32): + # depth should be one of 20, 32, 44, 56, 110, 1202 + assert (depth - 2) % 6 == 0 + n = (depth - 2) / 6 + nStages = {16, 64, 128} + conv1 = conv_bn_layer( + ipt, ch_in=3, ch_out=16, filter_size=3, stride=1, padding=1) + res1 = layer_warp(basicblock, conv1, 16, n, 1) + res2 = layer_warp(basicblock, res1, 32, n, 2) + res3 = layer_warp(basicblock, res2, 64, n, 2) + pool = paddle.layer.img_pool( + input=res3, pool_size=8, stride=1, pool_type=paddle.pooling.Avg()) + return pool + + +def main(): + datadim = 3 * 32 * 32 + classdim = 10 + + paddle.init(use_gpu=False, trainer_count=1) + + image = paddle.layer.data( + name="image", type=paddle.data_type.dense_vector(datadim)) + net = resnet_cifar10(image, depth=32) + out = paddle.layer.fc(input=net, + size=classdim, + act=paddle.activation.Softmax()) + + lbl = paddle.layer.data( + name="label", type=paddle.data_type.integer_value(classdim)) + cost = paddle.layer.classification_cost(input=out, label=lbl) + + parameters = paddle.parameters.create(cost) + + momentum_optimizer = paddle.optimizer.Momentum( + momentum=0.9, + regularization=paddle.optimizer.L2Regularization(rate=0.0002 * 128), + learning_rate=0.1 / 128.0, + learning_rate_decay_a=0.1, + learning_rate_decay_b=50000 * 100, + learning_rate_schedule='discexp', + batch_size=128) + + trainer = paddle.trainer.SGD(update_equation=momentum_optimizer) + trainer.train( + reader=paddle.reader.batched( + paddle.reader.shuffle( + paddle.dataset.cifar.train10(), buf_size=3072), + batch_size=128), + cost=cost, + num_passes=1, + parameters=parameters, + event_handler=event_handler, + reader_dict={'image': 0, + 'label': 1}, ) + + +if __name__ == '__main__': + main() diff --git a/demo/image_classification/train_v2_vgg.py b/demo/image_classification/train_v2_vgg.py index 25bfd798eb..5656ac85c6 100644 --- a/demo/image_classification/train_v2_vgg.py +++ b/demo/image_classification/train_v2_vgg.py @@ -1,3 +1,17 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import paddle.v2 as paddle @@ -20,7 +34,7 @@ def vgg_bn_drop(input): conv_act=paddle.activation.Relu(), conv_with_batchnorm=True, conv_batchnorm_drop_rate=dropouts, - pool_type=pooling.Max()) + pool_type=paddle.pooling.Max()) conv1 = conv_block(input, 64, 2, [0.3, 0], 3) conv2 = conv_block(conv1, 128, 2, [0.4, 0]) From 73af1942c8fcf89fffa0a13b7d8fdc6cdcdcb815 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Wed, 1 Mar 2017 15:54:46 +0800 Subject: [PATCH 17/87] add the implementation of rnn by yuyang --- .../paddle/trainer_config_helpers/layers.py | 2 +- python/paddle/v2/layer.py | 151 +++++++++++------- 2 files changed, 97 insertions(+), 56 deletions(-) diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 250878cbe1..dcb39784a5 100755 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -822,7 +822,7 @@ def data_layer(name, size, height=None, width=None, layer_attr=None): return LayerOutput(name, LayerType.DATA, size=size) -@wrap_name_default("embedding") +@wrap_name_default("embedding_layer") @wrap_param_attr_default() @layer_support(ERROR_CLIPPING) def embedding_layer(input, size, name=None, param_attr=None, layer_attr=None): diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py index 4f6c71664a..71d0e54c0a 100644 --- a/python/paddle/v2/layer.py +++ b/python/paddle/v2/layer.py @@ -76,6 +76,9 @@ from paddle.trainer_config_helpers.default_decorators import \ wrap_bias_attr_default from paddle.trainer_config_helpers.default_decorators import wrap_name_default from paddle.trainer_config_helpers.layers import layer_support +from paddle.trainer.config_parser import \ + RecurrentLayerGroupWithoutOutLinksBegin, RecurrentLayerGroupSetOutLink, \ + RecurrentLayerGroupEnd, model_type import activation import data_type @@ -126,21 +129,28 @@ class Layer(object): self.__parent_layers__[layer_name]) kwargs[layer_name] = v1_layer - if self.name is None: + if self.context_name() is None: return self.to_proto_impl(**kwargs) elif isinstance(self, MemoryV2): name = self.name + "#__memory__" if name not in context: context[name] = self.to_proto_impl(**kwargs) return context[name] - - if self.name not in context: - context[self.name] = self.to_proto_impl(**kwargs) + elif self.context_name() not in context: + context[self.context_name()] = self.to_proto_impl(**kwargs) return context[self.name] def to_proto_impl(self, **kwargs): raise NotImplementedError() + def context_name(self): + """ + Context name means the context which stores `to_proto_impl` result. + If multiple layer share same context_name, the `to_proto_impl` of them + will be invoked only once. + """ + return self.name + def __convert_to_v2__(method_name, parent_names, is_default_name=True): if is_default_name: @@ -231,6 +241,9 @@ class MemoryV2(Layer): return conf_helps.memory(name=self.name, size=self.size, **args) + def context_name(self): + return self.name + "#memory" + class LayerOutputV2(Layer): """ @@ -249,60 +262,20 @@ class LayerOutputV2(Layer): class StaticInputV2(Layer): - def __init__(self, **kwargs): - self.__parent_names__ = ['input'] - other_kwargs = dict() - parent_layers = dict() - for pname in self.__parent_names__: - if kwargs.has_key(pname): - parent_layers[pname] = kwargs[pname] - for key in kwargs.keys(): - if key not in self.__parent_names__: - other_kwargs[key] = kwargs[key] - self.__kwargs__ = other_kwargs - super(StaticInputV2, self).__init__(parent_layers=parent_layers) - - def to_proto_impl(self, **kwargs): - args = dict() - for each in kwargs: - args[each] = kwargs[each] - for each in self.__kwargs__: - args[each] = self.__kwargs__[each] - return conf_helps.StaticInput(**args) - - -class RecurrentGroupV2(Layer): - def __init__(self, name, **kwargs): - self.__parent_names__ = ['input', 'boot_layer'] - other_kwargs = dict() - parent_layers = dict() - for pname in self.__parent_names__: - if kwargs.has_key(pname): - parent_layers[pname] = kwargs[pname] - for key in kwargs.keys(): - if key not in self.__parent_names__: - other_kwargs[key] = kwargs[key] - self.__kwargs__ = other_kwargs - - super(RecurrentGroupV2, self).__init__( - name=name, parent_layers=parent_layers) + def __init__(self, input=None, **kwargs): + assert input is not None + self.__kwargs__ = kwargs + super(StaticInputV2, self).__init__( + name=input.name, parent_layers={'input': input}) - wrapper = wrap_name_default(name_prefix='recurrent_group') - __init__ = wrapper(__init__) + def context_name(self): + return self.name + "#static_input" def to_proto_impl(self, **kwargs): - def in_args_converter(*in_args): - if not isinstance(in_args, collections.Sequence): - in_args = [in_args] - return [LayerOutputV2(input) for input in in_args] - args = dict() - for each in kwargs: - args[each] = kwargs[each] - for each in self.__kwargs__: - args[each] = self.__kwargs__[each] - return conf_helps.recurrent_group( - name=self.name, in_args_converter=in_args_converter, **args) + args.update(kwargs) + args.update(self.__kwargs__) + return conf_helps.StaticInput(**args) class MixedLayerV2(Layer): @@ -377,11 +350,79 @@ def mixed(size=0, return MixedLayerV2(size, input, name, act, bias_attr, layer_attr) +class RecurrentLayerInput(Layer): + def __init__(self, recurrent_name, index, parent_layers): + assert len(parent_layers) == 1 + self.__parents__ = parent_layers.values()[0] + print self.__parents__, parent_layers + super(RecurrentLayerInput, self).__init__( + name=self.__parents__[index].name, parent_layers=parent_layers) + self.__recurrent_name__ = recurrent_name + + def context_name(self): + return self.__recurrent_name__ + ".begin" + + def to_proto_impl(self, **kwargs): + model_type('recurrent_nn') + RecurrentLayerGroupWithoutOutLinksBegin( + name=self.__recurrent_name__, + in_links=map(lambda x: x.name, self.__parents__)) + return self + + +class RecurrentLayerOutput(Layer): + def __init__(self, recurrent_name, index, parent_layers): + assert len(parent_layers) == 1 + self.__parents__ = parent_layers.values()[0] + super(RecurrentLayerOutput, self).__init__( + name=self.__parents__[index].name, parent_layers=parent_layers) + self.__recurrent_name__ = recurrent_name + + def context_name(self): + return self.__recurrent_name__ + ".end" + + def to_proto_impl(self, **kwargs): + for l in self.__parents__: + RecurrentLayerGroupSetOutLink(l.name) + RecurrentLayerGroupEnd(name=self.__recurrent_name__) + + +@wrap_name_default() +def recurrent_group(step, input, name=None): + if not isinstance(input, collections.Sequence): + input = [input] + + actual_input = [ + RecurrentLayerInput( + recurrent_name=name, + index=i, + parent_layers={'recurrent_inputs': input}) + for i in xrange(len(input)) + ] + + actual_output = step(*actual_input) + + if not isinstance(actual_output, collections.Sequence): + actual_output = [actual_output] + + retv = [ + RecurrentLayerOutput( + recurrent_name=name, + index=i, + parent_layers={'recurrent_outputs': actual_output}) + for i in xrange(len(actual_output)) + ] + if len(retv) == 1: + return retv[0] + else: + return retv + + LayerV2 = Layer data = DataLayerV2 AggregateLevel = conf_helps.layers.AggregateLevel ExpandLevel = conf_helps.layers.ExpandLevel -recurrent_group = RecurrentGroupV2 +recurrent_group = recurrent_group memory = MemoryV2 From fd3be087d603bc1ea399769b79c1b0f9e1758161 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Wed, 1 Mar 2017 16:01:42 +0800 Subject: [PATCH 18/87] restore recurrent_group in v1 --- python/paddle/trainer_config_helpers/layers.py | 12 ++---------- python/paddle/v2/layer.py | 5 ----- 2 files changed, 2 insertions(+), 15 deletions(-) diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index dcb39784a5..2b95c2ed0f 100755 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -3115,9 +3115,7 @@ def recurrent_group(step, reverse=False, name=None, targetInlink=None, - is_generating=False, - in_args_converter=None, - boot_layer=None): + is_generating=False): """ Recurrent layer group is an extremely flexible recurrent unit in PaddlePaddle. As long as the user defines the calculation done within a @@ -3260,13 +3258,7 @@ def recurrent_group(step, assert (is_generating != has_LayerOutput) - if in_args_converter is None: - layer_outs = step(*in_args) - else: - # append boot_layer to the last of in_args - if boot_layer is not None: - in_args.append(boot_layer) - layer_outs = step(*in_args_converter(*in_args)).to_proto(dict()) + layer_outs = step(*in_args) if isinstance(layer_outs, LayerOutput): layer_outs = [layer_outs] diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py index 71d0e54c0a..f1ca0b46eb 100644 --- a/python/paddle/v2/layer.py +++ b/python/paddle/v2/layer.py @@ -131,11 +131,6 @@ class Layer(object): if self.context_name() is None: return self.to_proto_impl(**kwargs) - elif isinstance(self, MemoryV2): - name = self.name + "#__memory__" - if name not in context: - context[name] = self.to_proto_impl(**kwargs) - return context[name] elif self.context_name() not in context: context[self.context_name()] = self.to_proto_impl(**kwargs) return context[self.name] From 6b199367e0339119a699292ff488976bdb22554f Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Wed, 1 Mar 2017 16:27:55 +0800 Subject: [PATCH 19/87] handle memory layer --- python/paddle/v2/layer.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py index f1ca0b46eb..bdb0c29a47 100644 --- a/python/paddle/v2/layer.py +++ b/python/paddle/v2/layer.py @@ -133,7 +133,11 @@ class Layer(object): return self.to_proto_impl(**kwargs) elif self.context_name() not in context: context[self.context_name()] = self.to_proto_impl(**kwargs) - return context[self.name] + + if self.use_context_name(): + return context[self.context_name()] + else: + return context[self.name] def to_proto_impl(self, **kwargs): raise NotImplementedError() @@ -146,6 +150,9 @@ class Layer(object): """ return self.name + def use_context_name(self): + return False + def __convert_to_v2__(method_name, parent_names, is_default_name=True): if is_default_name: @@ -239,6 +246,13 @@ class MemoryV2(Layer): def context_name(self): return self.name + "#memory" + def use_context_name(self): + """ + memory layer will have the same name with some layer + :return: + """ + return True + class LayerOutputV2(Layer): """ From 542eb736ab66ca5f7f974fde8d6a91bbfa781f4b Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Wed, 1 Mar 2017 15:47:07 +0800 Subject: [PATCH 20/87] update --- demo/semantic_role_labeling/api_train_v2.py | 37 +++++++++++---------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/demo/semantic_role_labeling/api_train_v2.py b/demo/semantic_role_labeling/api_train_v2.py index 0317c818db..cfbd2a0224 100644 --- a/demo/semantic_role_labeling/api_train_v2.py +++ b/demo/semantic_role_labeling/api_train_v2.py @@ -1,4 +1,4 @@ -import numpy +import numpy as np import paddle.v2 as paddle from model_v2 import db_lstm @@ -31,10 +31,6 @@ word_dict_len = len(word_dict) label_dict_len = len(label_dict) pred_len = len(predicate_dict) -print 'word_dict_len=%d' % word_dict_len -print 'label_dict_len=%d' % label_dict_len -print 'pred_len=%d' % pred_len - def train_reader(file_name="data/feature"): def reader(): @@ -65,25 +61,34 @@ def train_reader(file_name="data/feature"): return reader +def load_parameter(file_name, h, w): + with open(file_name, 'rb') as f: + f.read(16) # skip header for float type. + return np.fromfile(f, dtype=np.float32).reshape(h, w) + + def main(): paddle.init(use_gpu=False, trainer_count=1) # define network topology crf_cost, crf_dec = db_lstm(word_dict_len, label_dict_len, pred_len) - #parameters = paddle.parameters.create([crf_cost, crf_dec]) - parameters = paddle.parameters.create(crf_cost) + parameters = paddle.parameters.create([crf_cost, crf_dec]) optimizer = paddle.optimizer.Momentum(momentum=0.01, learning_rate=2e-2) def event_handler(event): if isinstance(event, paddle.event.EndIteration): - print "Pass %d, Batch %d, Cost %f" % (event.pass_id, event.batch_id, - event.cost) - + if event.batch_id % 100 == 0: + print "Pass %d, Batch %d, Cost %f, %s" % ( + event.pass_id, event.batch_id, event.cost, event.metrics) else: pass - trainer = paddle.trainer.SGD(update_equation=optimizer) + trainer = paddle.trainer.SGD(cost=crf_cost, + parameters=parameters, + update_equation=optimizer) + + parameters.set('emb', load_parameter("data/emb", 44068, 32)) reader_dict = { 'word_data': 0, @@ -96,18 +101,14 @@ def main(): 'mark_data': 7, 'target': 8, } - #trn_reader = paddle.reader.batched( - # paddle.reader.shuffle( - # train_reader(), buf_size=8192), batch_size=2) - trn_reader = paddle.reader.batched(train_reader(), batch_size=1) + trn_reader = paddle.reader.batched( + paddle.reader.shuffle( + train_reader(), buf_size=8192), batch_size=10) trainer.train( reader=trn_reader, - cost=crf_cost, - parameters=parameters, event_handler=event_handler, num_passes=10000, reader_dict=reader_dict) - #cost=[crf_cost, crf_dec], if __name__ == '__main__': From 82ec9f225b210ff99d83b97e0e09938061aba4ee Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Wed, 1 Mar 2017 17:50:19 +0800 Subject: [PATCH 21/87] Training the understand sentiment model with the new API. --- demo/sentiment/train_with_new_api.py | 182 +++++++++++++++++++++++++++ 1 file changed, 182 insertions(+) create mode 100644 demo/sentiment/train_with_new_api.py diff --git a/demo/sentiment/train_with_new_api.py b/demo/sentiment/train_with_new_api.py new file mode 100644 index 0000000000..f937b02906 --- /dev/null +++ b/demo/sentiment/train_with_new_api.py @@ -0,0 +1,182 @@ +from os.path import join as join_path +import paddle.v2 as paddle +import paddle.v2.layer as layer +import paddle.v2.activation as activation +import paddle.v2.data_type as data_type + + +def sequence_conv_pool(input, + input_size, + context_len, + hidden_size, + name=None, + context_start=None, + pool_type=None, + context_proj_layer_name=None, + context_proj_param_attr=False, + fc_layer_name=None, + fc_param_attr=None, + fc_bias_attr=None, + fc_act=None, + pool_bias_attr=None, + fc_attr=None, + context_attr=None, + pool_attr=None): + """ + Text convolution pooling layers helper. + + Text input => Context Projection => FC Layer => Pooling => Output. + + :param name: name of output layer(pooling layer name) + :type name: basestring + :param input: name of input layer + :type input: LayerOutput + :param context_len: context projection length. See + context_projection's document. + :type context_len: int + :param hidden_size: FC Layer size. + :type hidden_size: int + :param context_start: context projection length. See + context_projection's context_start. + :type context_start: int or None + :param pool_type: pooling layer type. See pooling_layer's document. + :type pool_type: BasePoolingType. + :param context_proj_layer_name: context projection layer name. + None if user don't care. + :type context_proj_layer_name: basestring + :param context_proj_param_attr: context projection parameter attribute. + None if user don't care. + :type context_proj_param_attr: ParameterAttribute or None. + :param fc_layer_name: fc layer name. None if user don't care. + :type fc_layer_name: basestring + :param fc_param_attr: fc layer parameter attribute. None if user don't care. + :type fc_param_attr: ParameterAttribute or None + :param fc_bias_attr: fc bias parameter attribute. False if no bias, + None if user don't care. + :type fc_bias_attr: ParameterAttribute or None + :param fc_act: fc layer activation type. None means tanh + :type fc_act: BaseActivation + :param pool_bias_attr: pooling layer bias attr. None if don't care. + False if no bias. + :type pool_bias_attr: ParameterAttribute or None. + :param fc_attr: fc layer extra attribute. + :type fc_attr: ExtraLayerAttribute + :param context_attr: context projection layer extra attribute. + :type context_attr: ExtraLayerAttribute + :param pool_attr: pooling layer extra attribute. + :type pool_attr: ExtraLayerAttribute + :return: output layer name. + :rtype: LayerOutput + """ + # Set Default Value to param + context_proj_layer_name = "%s_conv_proj" % name \ + if context_proj_layer_name is None else context_proj_layer_name + + with layer.mixed( + name=context_proj_layer_name, + size=input_size * context_len, + act=activation.Linear(), + layer_attr=context_attr) as m: + m += layer.context_projection( + input=input, + context_len=context_len, + context_start=context_start, + padding_attr=context_proj_param_attr) + + fc_layer_name = "%s_conv_fc" % name \ + if fc_layer_name is None else fc_layer_name + fl = layer.fc(name=fc_layer_name, + input=m, + size=hidden_size, + act=fc_act, + layer_attr=fc_attr, + param_attr=fc_param_attr, + bias_attr=fc_bias_attr) + + return layer.pooling( + name=name, + input=fl, + pooling_type=pool_type, + bias_attr=pool_bias_attr, + layer_attr=pool_attr) + + +def convolution_net(input_dim, + class_dim=2, + emb_dim=128, + hid_dim=128, + is_predict=False): + data = layer.data("word", data_type.integer_value_sequence(input_dim)) + emb = layer.embedding(input=data, size=emb_dim) + conv_3 = sequence_conv_pool( + input=emb, input_size=emb_dim, context_len=3, hidden_size=hid_dim) + conv_4 = sequence_conv_pool( + input=emb, input_size=emb_dim, context_len=4, hidden_size=hid_dim) + output = layer.fc(input=[conv_3, conv_4], + size=class_dim, + act=activation.Softmax()) + lbl = layer.data("label", data_type.integer_value(1)) + cost = layer.classification_cost(input=output, label=lbl) + return cost + + +def data_reader(): + data_dir = "./data/pre-imdb" + train_file = "train_part_000" + test_file = "test_part_000" + dict_file = "dict.txt" + train_file = join_path(data_dir, train_file) + test_file = join_path(data_dir, test_file) + dict_file = join_path(data_dir, dict_file) + + with open(dict_file, 'r') as fdict, open(train_file, 'r') as fdata: + dictionary = dict() + for i, line in enumerate(fdict): + dictionary[line.split('\t')[0]] = i + + print('dict len : %d' % (len(dictionary))) + for line_count, line in enumerate(fdata): + label, comment = line.strip().split('\t\t') + label = int(label) + words = comment.split() + word_slot = [dictionary[w] for w in words if w in dictionary] + yield (word_slot, label) + + +if __name__ == '__main__': + data_dir = "./data/pre-imdb" + train_list = "train.list" + test_list = "test.list" + dict_file = "dict.txt" + dict_dim = len(open(join_path(data_dir, "dict.txt")).readlines()) + class_dim = len(open(join_path(data_dir, 'labels.list')).readlines()) + is_predict = False + + # init + paddle.init(use_gpu=True, trainer_count=4) + + # network config + cost = convolution_net(dict_dim, class_dim=class_dim, is_predict=is_predict) + + # create parameters + parameters = paddle.parameters.create(cost) + + adam_optimizer = paddle.optimizer.Adam(learning_rate=0.01) + + def event_handler(event): + if isinstance(event, paddle.event.EndIteration): + if event.batch_id % 1 == 0: + print "Pass %d, Batch %d, Cost %f, %s" % ( + event.pass_id, event.batch_id, event.cost, event.metrics) + + trainer = paddle.trainer.SGD(cost=cost, + parameters=parameters, + update_equation=adam_optimizer) + + trainer.train( + reader=paddle.reader.batched( + data_reader, batch_size=128), + event_handler=event_handler, + reader_dict={'word': 0, + 'label': 1}, + num_passes=10) From 3a5f98c36a13a4c027ee87461f52b49ebb6b6002 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Wed, 1 Mar 2017 18:59:00 +0800 Subject: [PATCH 22/87] Add reader.shuffle --- demo/sentiment/train_with_new_api.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/demo/sentiment/train_with_new_api.py b/demo/sentiment/train_with_new_api.py index f937b02906..59a303c0d5 100644 --- a/demo/sentiment/train_with_new_api.py +++ b/demo/sentiment/train_with_new_api.py @@ -134,7 +134,6 @@ def data_reader(): for i, line in enumerate(fdict): dictionary[line.split('\t')[0]] = i - print('dict len : %d' % (len(dictionary))) for line_count, line in enumerate(fdata): label, comment = line.strip().split('\t\t') label = int(label) @@ -165,7 +164,7 @@ if __name__ == '__main__': def event_handler(event): if isinstance(event, paddle.event.EndIteration): - if event.batch_id % 1 == 0: + if event.batch_id % 100 == 0: print "Pass %d, Batch %d, Cost %f, %s" % ( event.pass_id, event.batch_id, event.cost, event.metrics) @@ -175,7 +174,8 @@ if __name__ == '__main__': trainer.train( reader=paddle.reader.batched( - data_reader, batch_size=128), + paddle.reader.shuffle( + data_reader, buf_size=4096), batch_size=128), event_handler=event_handler, reader_dict={'word': 0, 'label': 1}, From 41f04e5ae4a459c0934cc7bca55e75dbbbb51b8a Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Wed, 1 Mar 2017 19:25:09 +0800 Subject: [PATCH 23/87] Add regularization and model_average --- demo/sentiment/train_with_new_api.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/demo/sentiment/train_with_new_api.py b/demo/sentiment/train_with_new_api.py index 59a303c0d5..bec07de92a 100644 --- a/demo/sentiment/train_with_new_api.py +++ b/demo/sentiment/train_with_new_api.py @@ -160,7 +160,10 @@ if __name__ == '__main__': # create parameters parameters = paddle.parameters.create(cost) - adam_optimizer = paddle.optimizer.Adam(learning_rate=0.01) + adam_optimizer = paddle.optimizer.Adam( + learning_rate=2e-3, + regularization=paddle.optimizer.L2Regularization(rate=8e-4), + model_average=paddle.optimizer.ModelAverage(average_window=0.5)) def event_handler(event): if isinstance(event, paddle.event.EndIteration): From 1d0a8c2f745dc15d17a83ac43e8e3ca9296d6216 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Wed, 1 Mar 2017 19:31:57 +0800 Subject: [PATCH 24/87] rename train_v2.py --- demo/sentiment/{train_with_new_api.py => train_v2.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename demo/sentiment/{train_with_new_api.py => train_v2.py} (100%) diff --git a/demo/sentiment/train_with_new_api.py b/demo/sentiment/train_v2.py similarity index 100% rename from demo/sentiment/train_with_new_api.py rename to demo/sentiment/train_v2.py From 803da664eddfc85bb55e192b7a98c696bf4fe112 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Wed, 1 Mar 2017 19:49:17 +0800 Subject: [PATCH 25/87] Add test --- demo/sentiment/train_v2.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/demo/sentiment/train_v2.py b/demo/sentiment/train_v2.py index bec07de92a..a764798add 100644 --- a/demo/sentiment/train_v2.py +++ b/demo/sentiment/train_v2.py @@ -142,6 +142,28 @@ def data_reader(): yield (word_slot, label) +def test_reader(): + data_dir = "./data/pre-imdb" + train_file = "train_part_000" + test_file = "test_part_000" + dict_file = "dict.txt" + train_file = join_path(data_dir, train_file) + test_file = join_path(data_dir, test_file) + dict_file = join_path(data_dir, dict_file) + + with open(dict_file, 'r') as fdict, open(test_file, 'r') as ftest: + dictionary = dict() + for i, line in enumerate(fdict): + dictionary[line.split('\t')[0]] = i + + for line_count, line in enumerate(ftest): + label, comment = line.strip().split('\t\t') + label = int(label) + words = comment.split() + word_slot = [dictionary[w] for w in words if w in dictionary] + yield (word_slot, label) + + if __name__ == '__main__': data_dir = "./data/pre-imdb" train_list = "train.list" @@ -170,6 +192,13 @@ if __name__ == '__main__': if event.batch_id % 100 == 0: print "Pass %d, Batch %d, Cost %f, %s" % ( event.pass_id, event.batch_id, event.cost, event.metrics) + if isinstance(event, paddle.event.EndPass): + result = trainer.test( + reader=paddle.reader.batched( + test_reader, batch_size=128), + reader_dict={'word': 0, + 'label': 1}) + print "Test with Pass %d, %s" % (event.pass_id, result.metrics) trainer = paddle.trainer.SGD(cost=cost, parameters=parameters, From ad44a3ebcaa062342ec799f020bd3975e6b5f899 Mon Sep 17 00:00:00 2001 From: liaogang Date: Wed, 1 Mar 2017 20:14:43 +0800 Subject: [PATCH 26/87] Update vgg and resnet via api v2 --- demo/image_classification/api_v2_resnet.py | 74 ++++++++ .../{train_v2_vgg.py => api_v2_train.py} | 59 ++----- demo/image_classification/api_v2_vgg.py | 47 ++++++ demo/image_classification/train_v2_resnet.py | 158 ------------------ 4 files changed, 139 insertions(+), 199 deletions(-) create mode 100644 demo/image_classification/api_v2_resnet.py rename demo/image_classification/{train_v2_vgg.py => api_v2_train.py} (55%) create mode 100644 demo/image_classification/api_v2_vgg.py delete mode 100644 demo/image_classification/train_v2_resnet.py diff --git a/demo/image_classification/api_v2_resnet.py b/demo/image_classification/api_v2_resnet.py new file mode 100644 index 0000000000..19d2054078 --- /dev/null +++ b/demo/image_classification/api_v2_resnet.py @@ -0,0 +1,74 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.v2 as paddle + +__all__ = ['resnet_cifar10'] + + +def conv_bn_layer(input, + ch_out, + filter_size, + stride, + padding, + active_type=paddle.activation.Relu(), + ch_in=None): + tmp = paddle.layer.img_conv( + input=input, + filter_size=filter_size, + num_channels=ch_in, + num_filters=ch_out, + stride=stride, + padding=padding, + act=paddle.activation.Linear(), + bias_attr=False) + return paddle.layer.batch_norm(input=tmp, act=active_type) + + +def shortcut(ipt, n_in, n_out, stride): + if n_in != n_out: + return conv_bn_layer(ipt, n_out, 1, stride, 0, + paddle.activation.Linear()) + else: + return ipt + + +def basicblock(ipt, ch_out, stride): + ch_in = ch_out * 2 + tmp = conv_bn_layer(ipt, ch_out, 3, stride, 1) + tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, paddle.activation.Linear()) + short = shortcut(ipt, ch_in, ch_out, stride) + return paddle.layer.addto(input=[tmp, short], act=paddle.activation.Relu()) + + +def layer_warp(block_func, ipt, features, count, stride): + tmp = block_func(ipt, features, stride) + for i in range(1, count): + tmp = block_func(tmp, features, 1) + return tmp + + +def resnet_cifar10(ipt, depth=32): + # depth should be one of 20, 32, 44, 56, 110, 1202 + assert (depth - 2) % 6 == 0 + n = (depth - 2) / 6 + nStages = {16, 64, 128} + conv1 = conv_bn_layer( + ipt, ch_in=3, ch_out=16, filter_size=3, stride=1, padding=1) + res1 = layer_warp(basicblock, conv1, 16, n, 1) + res2 = layer_warp(basicblock, res1, 32, n, 2) + res3 = layer_warp(basicblock, res2, 64, n, 2) + pool = paddle.layer.img_pool( + input=res3, pool_size=8, stride=1, pool_type=paddle.pooling.Avg()) + return pool diff --git a/demo/image_classification/train_v2_vgg.py b/demo/image_classification/api_v2_train.py similarity index 55% rename from demo/image_classification/train_v2_vgg.py rename to demo/image_classification/api_v2_train.py index 5656ac85c6..44a8db3941 100644 --- a/demo/image_classification/train_v2_vgg.py +++ b/demo/image_classification/api_v2_train.py @@ -10,9 +10,10 @@ # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and -# limitations under the License. +# limitations under the License -import paddle.v2 as paddle +from api_v2_vgg import resnet_cifar10 +from api_v2_resnet import vgg_bn_drop def event_handler(event): @@ -22,46 +23,21 @@ def event_handler(event): event.cost) -def vgg_bn_drop(input): - def conv_block(ipt, num_filter, groups, dropouts, num_channels=None): - return paddle.layer.img_conv_group( - input=ipt, - num_channels=num_channels, - pool_size=2, - pool_stride=2, - conv_num_filter=[num_filter] * groups, - conv_filter_size=3, - conv_act=paddle.activation.Relu(), - conv_with_batchnorm=True, - conv_batchnorm_drop_rate=dropouts, - pool_type=paddle.pooling.Max()) - - conv1 = conv_block(input, 64, 2, [0.3, 0], 3) - conv2 = conv_block(conv1, 128, 2, [0.4, 0]) - conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0]) - conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0]) - conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0]) - - drop = paddle.layer.dropout(input=conv5, dropout_rate=0.5) - fc1 = paddle.layer.fc(input=drop, size=512, act=paddle.activation.Linear()) - bn = paddle.layer.batch_norm( - input=fc1, - act=paddle.activation.Relu(), - layer_attr=ExtraAttr(drop_rate=0.5)) - fc2 = paddle.layer.fc(input=bn, size=512, act=paddle.activation.Linear()) - return fc2 - - def main(): datadim = 3 * 32 * 32 classdim = 10 - paddle.init(use_gpu=False, trainer_count=1) + paddle.init(use_gpu=True, trainer_count=1) image = paddle.layer.data( name="image", type=paddle.data_type.dense_vector(datadim)) + + # option 1. resnet + net = resnet_cifar10(image, depth=32) + # option 2. vgg # net = vgg_bn_drop(image) - out = paddle.layer.fc(input=image, + + out = paddle.layer.fc(input=net, size=classdim, act=paddle.activation.Softmax()) @@ -70,27 +46,28 @@ def main(): cost = paddle.layer.classification_cost(input=out, label=lbl) parameters = paddle.parameters.create(cost) + momentum_optimizer = paddle.optimizer.Momentum( momentum=0.9, - regularization=paddle.optimizer.L2Regularization(rate=0.0005 * 128), + regularization=paddle.optimizer.L2Regularization(rate=0.0002 * 128), learning_rate=0.1 / 128.0, learning_rate_decay_a=0.1, learning_rate_decay_b=50000 * 100, learning_rate_schedule='discexp', batch_size=128) - trainer = paddle.trainer.SGD(update_equation=momentum_optimizer) + trainer = paddle.trainer.SGD(cost=cost, + parameters=parameters, + update_equation=momentum_optimizer) trainer.train( reader=paddle.reader.batched( paddle.reader.shuffle( - paddle.dataset.cifar.train10(), buf_size=3072), + paddle.dataset.cifar.train10(), buf_size=50000), batch_size=128), - cost=cost, - num_passes=1, - parameters=parameters, + num_passes=5, event_handler=event_handler, reader_dict={'image': 0, - 'label': 1}, ) + 'label': 1}) if __name__ == '__main__': diff --git a/demo/image_classification/api_v2_vgg.py b/demo/image_classification/api_v2_vgg.py new file mode 100644 index 0000000000..1e0e6b93ad --- /dev/null +++ b/demo/image_classification/api_v2_vgg.py @@ -0,0 +1,47 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.v2 as paddle + +__all__ = ['vgg_bn_drop'] + + +def vgg_bn_drop(input): + def conv_block(ipt, num_filter, groups, dropouts, num_channels=None): + return paddle.networks.img_conv_group( + input=ipt, + num_channels=num_channels, + pool_size=2, + pool_stride=2, + conv_num_filter=[num_filter] * groups, + conv_filter_size=3, + conv_act=paddle.activation.Relu(), + conv_with_batchnorm=True, + conv_batchnorm_drop_rate=dropouts, + pool_type=paddle.pooling.Max()) + + conv1 = conv_block(input, 64, 2, [0.3, 0], 3) + conv2 = conv_block(conv1, 128, 2, [0.4, 0]) + conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0]) + conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0]) + conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0]) + + drop = paddle.layer.dropout(input=conv5, dropout_rate=0.5) + fc1 = paddle.layer.fc(input=drop, size=512, act=paddle.activation.Linear()) + bn = paddle.layer.batch_norm( + input=fc1, + act=paddle.activation.Relu(), + layer_attr=paddle.attr.Extra(drop_rate=0.5)) + fc2 = paddle.layer.fc(input=bn, size=512, act=paddle.activation.Linear()) + return fc2 diff --git a/demo/image_classification/train_v2_resnet.py b/demo/image_classification/train_v2_resnet.py deleted file mode 100644 index fdfa87cd87..0000000000 --- a/demo/image_classification/train_v2_resnet.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle.v2 as paddle - - -def event_handler(event): - if isinstance(event, paddle.event.EndIteration): - if event.batch_id % 100 == 0: - print "Pass %d, Batch %d, Cost %f" % (event.pass_id, event.batch_id, - event.cost) - - -def conv_bn_layer(input, - ch_out, - filter_size, - stride, - padding, - active_type=paddle.activation.Relu(), - ch_in=None): - tmp = paddle.layer.img_conv( - input=input, - filter_size=filter_size, - num_channels=ch_in, - num_filters=ch_out, - stride=stride, - padding=padding, - act=paddle.activation.Linear(), - bias_attr=False) - return paddle.layer.batch_norm(input=tmp, act=active_type) - - -def shortcut(ipt, n_in, n_out, stride): - if n_in != n_out: - print("n_in != n_out") - return conv_bn_layer(ipt, n_out, 1, stride, 0, - paddle.activation.Linear()) - else: - return ipt - - -def basicblock(ipt, ch_out, stride): - ch_in = ipt.num_filters - tmp = conv_bn_layer(ipt, ch_out, 3, stride, 1) - tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, paddle.activation.Linear()) - short = shortcut(ipt, ch_in, ch_out, stride) - return paddle.layer.addto(input=[tmp, short], act=paddle.activation.Relu()) - - -def bottleneck(ipt, ch_out, stride): - ch_in = ipt.num_filter - tmp = conv_bn_layer(ipt, ch_out, 1, stride, 0) - tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1) - tmp = conv_bn_layer(tmp, ch_out * 4, 1, 1, 0, paddle.activation.Linear()) - short = shortcut(ipt, ch_in, ch_out * 4, stride) - return paddle.layer.addto(input=[tmp, short], act=paddle.activation.Relu()) - - -def layer_warp(block_func, ipt, features, count, stride): - tmp = block_func(ipt, features, stride) - for i in range(1, count): - tmp = block_func(tmp, features, 1) - return tmp - - -def resnet_imagenet(ipt, depth=50): - cfg = { - 18: ([2, 2, 2, 1], basicblock), - 34: ([3, 4, 6, 3], basicblock), - 50: ([3, 4, 6, 3], bottleneck), - 101: ([3, 4, 23, 3], bottleneck), - 152: ([3, 8, 36, 3], bottleneck) - } - stages, block_func = cfg[depth] - tmp = conv_bn_layer( - ipt, ch_in=3, ch_out=64, filter_size=7, stride=2, padding=3) - tmp = paddle.layer.img_pool(input=tmp, pool_size=3, stride=2) - tmp = layer_warp(block_func, tmp, 64, stages[0], 1) - tmp = layer_warp(block_func, tmp, 128, stages[1], 2) - tmp = layer_warp(block_func, tmp, 256, stages[2], 2) - tmp = layer_warp(block_func, tmp, 512, stages[3], 2) - tmp = paddle.layer.img_pool( - input=tmp, pool_size=7, stride=1, pool_type=paddle.pooling.Avg()) - - tmp = paddle.layer.fc(input=tmp, size=1000, act=paddle.activation.Softmax()) - return tmp - - -def resnet_cifar10(ipt, depth=32): - # depth should be one of 20, 32, 44, 56, 110, 1202 - assert (depth - 2) % 6 == 0 - n = (depth - 2) / 6 - nStages = {16, 64, 128} - conv1 = conv_bn_layer( - ipt, ch_in=3, ch_out=16, filter_size=3, stride=1, padding=1) - res1 = layer_warp(basicblock, conv1, 16, n, 1) - res2 = layer_warp(basicblock, res1, 32, n, 2) - res3 = layer_warp(basicblock, res2, 64, n, 2) - pool = paddle.layer.img_pool( - input=res3, pool_size=8, stride=1, pool_type=paddle.pooling.Avg()) - return pool - - -def main(): - datadim = 3 * 32 * 32 - classdim = 10 - - paddle.init(use_gpu=False, trainer_count=1) - - image = paddle.layer.data( - name="image", type=paddle.data_type.dense_vector(datadim)) - net = resnet_cifar10(image, depth=32) - out = paddle.layer.fc(input=net, - size=classdim, - act=paddle.activation.Softmax()) - - lbl = paddle.layer.data( - name="label", type=paddle.data_type.integer_value(classdim)) - cost = paddle.layer.classification_cost(input=out, label=lbl) - - parameters = paddle.parameters.create(cost) - - momentum_optimizer = paddle.optimizer.Momentum( - momentum=0.9, - regularization=paddle.optimizer.L2Regularization(rate=0.0002 * 128), - learning_rate=0.1 / 128.0, - learning_rate_decay_a=0.1, - learning_rate_decay_b=50000 * 100, - learning_rate_schedule='discexp', - batch_size=128) - - trainer = paddle.trainer.SGD(update_equation=momentum_optimizer) - trainer.train( - reader=paddle.reader.batched( - paddle.reader.shuffle( - paddle.dataset.cifar.train10(), buf_size=3072), - batch_size=128), - cost=cost, - num_passes=1, - parameters=parameters, - event_handler=event_handler, - reader_dict={'image': 0, - 'label': 1}, ) - - -if __name__ == '__main__': - main() From 49020f0be80428ba22913062ae877605114134eb Mon Sep 17 00:00:00 2001 From: liaogang Date: Wed, 1 Mar 2017 20:26:42 +0800 Subject: [PATCH 27/87] import paddle.v2 --- demo/image_classification/api_v2_train.py | 1 + 1 file changed, 1 insertion(+) diff --git a/demo/image_classification/api_v2_train.py b/demo/image_classification/api_v2_train.py index 44a8db3941..e6e4307242 100644 --- a/demo/image_classification/api_v2_train.py +++ b/demo/image_classification/api_v2_train.py @@ -14,6 +14,7 @@ from api_v2_vgg import resnet_cifar10 from api_v2_resnet import vgg_bn_drop +import paddle.v2 as paddle def event_handler(event): From 5fc572c29459faf0fbc342e3582ec8b6ee6f02ac Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 1 Mar 2017 20:28:00 +0800 Subject: [PATCH 28/87] Complete Memory --- python/paddle/trainer/config_parser.py | 6 +- python/paddle/v2/layer.py | 99 ++++++++++++++++++------ python/paddle/v2/tests/test_rnn_layer.py | 27 ++++--- 3 files changed, 96 insertions(+), 36 deletions(-) diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 487d4dfd5b..da937152ee 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -3474,8 +3474,6 @@ def update_g_config(): for name in g_config.model_config.output_layer_names: assert name in g_layer_map, \ 'input name "%s" does not correspond to a layer name' % name - for hook in _parse_config_hooks: - hook() return g_config @@ -3487,8 +3485,8 @@ def parse_config(trainer_config, config_arg_str): passed to config script as a dictionary CONFIG_ARGS ''' init_config_environment() - # for hook in _parse_config_hooks: - # hook() + for hook in _parse_config_hooks: + hook() config_args = {} diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py index bdb0c29a47..bf5d653e8a 100644 --- a/python/paddle/v2/layer.py +++ b/python/paddle/v2/layer.py @@ -67,7 +67,7 @@ paddle.v2.parameters.create, no longer exposed to users. """ import collections - +import inspect import paddle.trainer_config_helpers as conf_helps from paddle.trainer_config_helpers.config_parser_utils import \ parse_network_config as __parse__ @@ -216,31 +216,83 @@ class DataLayerV2(Layer): return getattr(conf_helps, self.__method_name__)(name=self.name, **args) -class MemoryV2(Layer): - def __init__(self, name, size, **kwargs): - self.name = name - self.size = size +class WithExtraParent(Layer): + def extra_parent(self): + return self.__extra_parent__ - parent_names = ['boot_layer'] - parent_layers = dict() - other_kwargs = dict() - for pname in parent_names: - if kwargs.has_key(pname): - parent_layers[pname] = kwargs[pname] + def __init__(self, name=None, parent_layers=None): + self.__extra_parent__ = [] + super(WithExtraParent, self).__init__(name, parent_layers) - for key in kwargs.keys(): - if key not in parent_names: - other_kwargs[key] = kwargs[key] - super(MemoryV2, self).__init__(name=name, parent_layers=parent_layers) - self.__kwargs__ = other_kwargs + def append_extra_parent(self, parent): + self.__extra_parent__.append(parent) - def to_proto_impl(self, **kwargs): + def to_proto(self, context): + """ + function to set proto attribute + """ + kwargs = dict() + for p in self.__extra_parent__: + p.to_proto(context=context) + + for layer_name in self.__parent_layers__: + if not isinstance(self.__parent_layers__[layer_name], + collections.Sequence): + v1_layer = self.__parent_layers__[layer_name].to_proto( + context=context) + else: + v1_layer = map(lambda x: x.to_proto(context=context), + self.__parent_layers__[layer_name]) + kwargs[layer_name] = v1_layer + + if self.context_name() is None: + return self.to_proto_impl(context=context, **kwargs) + elif self.context_name() not in context: + context[self.context_name()] = self.to_proto_impl( + context=context, **kwargs) + + if self.use_context_name(): + return context[self.context_name()] + else: + return context[self.name] + + +class MemoryV2(WithExtraParent): + def __init__(self, name, size, **kwargs): + self.name = name + self.size = size + super(MemoryV2, self).__init__(name=name, parent_layers=dict()) + self.__kwargs__ = kwargs + self.__boot_layer_name__ = None + if 'boot_layer' in kwargs: + begin_of_current_rnn = [] + # TODO(yuyang18): Fix inspect, it could be wrong when user invoke a + # function inside step. + st = inspect.stack() + for i in xrange(len(st)): + locs = inspect.stack()[i][0].f_locals + for val in locs.viewvalues(): + if isinstance(val, RecurrentLayerInput): + begin_of_current_rnn.append(val) + + if begin_of_current_rnn: + break + assert begin_of_current_rnn is not None + for extra in begin_of_current_rnn: + self.append_extra_parent(extra) + assert isinstance(extra, WithExtraParent) + extra.append_extra_parent(kwargs['boot_layer']) + self.__boot_layer_name__ = kwargs['boot_layer'].name + + def to_proto_impl(self, context, **kwargs): args = dict() for each in kwargs: args[each] = kwargs[each] for each in self.__kwargs__: args[each] = self.__kwargs__[each] + if self.__boot_layer_name__ is not None: + args['boot_layer'] = context[self.__boot_layer_name__] return conf_helps.memory(name=self.name, size=self.size, **args) def context_name(self): @@ -328,7 +380,7 @@ class MixedLayerV2(Layer): self.__inputs__.append(other) return self else: - raise MixedLayerTypeV2.AddToSealedMixedLayerExceptionV2() + raise MixedLayerV2.AddToSealedMixedLayerExceptionV2() def __enter__(self): assert len(self.__inputs__) == 0 @@ -359,11 +411,10 @@ def mixed(size=0, return MixedLayerV2(size, input, name, act, bias_attr, layer_attr) -class RecurrentLayerInput(Layer): +class RecurrentLayerInput(WithExtraParent): def __init__(self, recurrent_name, index, parent_layers): assert len(parent_layers) == 1 self.__parents__ = parent_layers.values()[0] - print self.__parents__, parent_layers super(RecurrentLayerInput, self).__init__( name=self.__parents__[index].name, parent_layers=parent_layers) self.__recurrent_name__ = recurrent_name @@ -371,7 +422,7 @@ class RecurrentLayerInput(Layer): def context_name(self): return self.__recurrent_name__ + ".begin" - def to_proto_impl(self, **kwargs): + def to_proto_impl(self, context, **kwargs): model_type('recurrent_nn') RecurrentLayerGroupWithoutOutLinksBegin( name=self.__recurrent_name__, @@ -458,8 +509,10 @@ def __layer_name_mapping__(inname): def __layer_name_mapping_parent_names__(inname): all_args = getattr(conf_helps, inname).argspec.args return filter( - lambda x: x in ['input1', 'input2','label', 'input', 'a', 'b', 'expand_as', - 'weights', 'vectors', 'weight', 'score', 'left', 'right'], + lambda x: x in ['input1', 'input2', 'label', 'input', 'a', 'b', + 'expand_as', + 'weights', 'vectors', 'weight', 'score', 'left', + 'right'], all_args) diff --git a/python/paddle/v2/tests/test_rnn_layer.py b/python/paddle/v2/tests/test_rnn_layer.py index bf2c4db61a..48aeb42391 100644 --- a/python/paddle/v2/tests/test_rnn_layer.py +++ b/python/paddle/v2/tests/test_rnn_layer.py @@ -106,9 +106,21 @@ class RNNTest(unittest.TestCase): return str(parse_network(test)) def parse_new_rnn(): + data = layer.data( + name="word", type=data_type.dense_vector(dict_dim)) + label = layer.data( + name="label", type=data_type.dense_vector(label_dim)) + emb = layer.embedding(input=data, size=word_dim) + + boot_layer = layer.data( + name="boot", type=data_type.dense_vector(10)) + + boot_layer = layer.fc(name='wtf', input=boot_layer, size=10) + def step(y, wid): z = layer.embedding(input=wid, size=word_dim) - mem = layer.memory(name="rnn_state", size=hidden_dim) + mem = layer.memory( + name="rnn_state", size=hidden_dim, boot_layer=boot_layer) out = layer.fc(input=[y, z, mem], size=hidden_dim, act=activation.Tanh(), @@ -116,11 +128,6 @@ class RNNTest(unittest.TestCase): name="rnn_state") return out - data = layer.data( - name="word", type=data_type.dense_vector(dict_dim)) - label = layer.data( - name="label", type=data_type.dense_vector(label_dim)) - emb = layer.embedding(input=data, size=word_dim) out = layer.recurrent_group( name="rnn", step=step, input=[emb, data]) @@ -134,9 +141,11 @@ class RNNTest(unittest.TestCase): return str(layer.parse_network(cost)) - diff = difflib.unified_diff(parse_old_rnn().splitlines(1), - parse_new_rnn().splitlines(1)) - print ''.join(diff) + with open("/Users/baidu/old.out", 'w') as f: + print >> f, parse_old_rnn() + with open("/Users/baidu/new.out", "w") as f: + print >> f, parse_new_rnn() + # print ''.join(diff) if __name__ == '__main__': From 0a33f170a423cc238f7b1c37a8e76a48ce9f48ec Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Wed, 1 Mar 2017 20:35:04 +0800 Subject: [PATCH 29/87] Add stacked lstm network --- demo/sentiment/train_v2.py | 74 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 72 insertions(+), 2 deletions(-) diff --git a/demo/sentiment/train_v2.py b/demo/sentiment/train_v2.py index a764798add..779bfee5b6 100644 --- a/demo/sentiment/train_v2.py +++ b/demo/sentiment/train_v2.py @@ -1,4 +1,6 @@ from os.path import join as join_path +import paddle.trainer_config_helpers.attrs as attrs +from paddle.trainer_config_helpers.poolings import MaxPooling import paddle.v2 as paddle import paddle.v2.layer as layer import paddle.v2.activation as activation @@ -115,7 +117,73 @@ def convolution_net(input_dim, output = layer.fc(input=[conv_3, conv_4], size=class_dim, act=activation.Softmax()) - lbl = layer.data("label", data_type.integer_value(1)) + lbl = layer.data("label", data_type.integer_value(2)) + cost = layer.classification_cost(input=output, label=lbl) + return cost + + +def stacked_lstm_net(input_dim, + class_dim=2, + emb_dim=128, + hid_dim=512, + stacked_num=3, + is_predict=False): + """ + A Wrapper for sentiment classification task. + This network uses bi-directional recurrent network, + consisting three LSTM layers. This configure is referred to + the paper as following url, but use fewer layrs. + http://www.aclweb.org/anthology/P15-1109 + + input_dim: here is word dictionary dimension. + class_dim: number of categories. + emb_dim: dimension of word embedding. + hid_dim: dimension of hidden layer. + stacked_num: number of stacked lstm-hidden layer. + is_predict: is predicting or not. + Some layers is not needed in network when predicting. + """ + assert stacked_num % 2 == 1 + + layer_attr = attrs.ExtraLayerAttribute(drop_rate=0.5) + fc_para_attr = attrs.ParameterAttribute(learning_rate=1e-3) + lstm_para_attr = attrs.ParameterAttribute(initial_std=0., learning_rate=1.) + para_attr = [fc_para_attr, lstm_para_attr] + bias_attr = attrs.ParameterAttribute(initial_std=0., l2_rate=0.) + relu = activation.Relu() + linear = activation.Linear() + + data = layer.data("word", data_type.integer_value_sequence(input_dim)) + emb = layer.embedding(input=data, size=emb_dim) + + fc1 = layer.fc(input=emb, size=hid_dim, act=linear, bias_attr=bias_attr) + lstm1 = layer.lstmemory( + input=fc1, act=relu, bias_attr=bias_attr, layer_attr=layer_attr) + + inputs = [fc1, lstm1] + for i in range(2, stacked_num + 1): + fc = layer.fc(input=inputs, + size=hid_dim, + act=linear, + param_attr=para_attr, + bias_attr=bias_attr) + lstm = layer.lstmemory( + input=fc, + reverse=(i % 2) == 0, + act=relu, + bias_attr=bias_attr, + layer_attr=layer_attr) + inputs = [fc, lstm] + + fc_last = layer.pooling(input=inputs[0], pooling_type=MaxPooling()) + lstm_last = layer.pooling(input=inputs[1], pooling_type=MaxPooling()) + output = layer.fc(input=[fc_last, lstm_last], + size=class_dim, + act=activation.Softmax(), + bias_attr=bias_attr, + param_attr=para_attr) + + lbl = layer.data("label", data_type.integer_value(2)) cost = layer.classification_cost(input=output, label=lbl) return cost @@ -177,7 +245,9 @@ if __name__ == '__main__': paddle.init(use_gpu=True, trainer_count=4) # network config - cost = convolution_net(dict_dim, class_dim=class_dim, is_predict=is_predict) + # cost = convolution_net(dict_dim, class_dim=class_dim, is_predict=is_predict) + cost = stacked_lstm_net( + dict_dim, class_dim=class_dim, stacked_num=3, is_predict=is_predict) # create parameters parameters = paddle.parameters.create(cost) From d935d88d42f7fa1aed6d14969bfebe3ffd01928b Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Wed, 1 Mar 2017 20:35:51 +0800 Subject: [PATCH 30/87] mnist api v2 --- demo/mnist/api_train_v2.py | 87 ++++++++++++++++++++++++++++++-------- 1 file changed, 70 insertions(+), 17 deletions(-) diff --git a/demo/mnist/api_train_v2.py b/demo/mnist/api_train_v2.py index a59b30ccdb..495c403e40 100644 --- a/demo/mnist/api_train_v2.py +++ b/demo/mnist/api_train_v2.py @@ -1,48 +1,101 @@ import paddle.v2 as paddle +def softmax_regression(img): + predict = paddle.layer.fc(input=img, + size=10, + act=paddle.activation.Softmax()) + return predict + + +def multilayer_perceptron(img): + # The first fully-connected layer + hidden1 = paddle.layer.fc(input=img, size=128, act=paddle.activation.Relu()) + # The second fully-connected layer and the according activation function + hidden2 = paddle.layer.fc(input=hidden1, + size=64, + act=paddle.activation.Relu()) + # The thrid fully-connected layer, note that the hidden size should be 10, + # which is the number of unique digits + predict = paddle.layer.fc(input=hidden2, + size=10, + act=paddle.activation.Softmax()) + return predict + + +def convolutional_neural_network(img): + # first conv layer + conv_pool_1 = paddle.networks.simple_img_conv_pool( + input=img, + filter_size=5, + num_filters=20, + num_channel=1, + pool_size=2, + pool_stride=2, + act=paddle.activation.Tanh()) + # second conv layer + conv_pool_2 = paddle.networks.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + num_channel=20, + pool_size=2, + pool_stride=2, + act=paddle.activation.Tanh()) + # The first fully-connected layer + fc1 = paddle.layer.fc(input=conv_pool_2, + size=128, + act=paddle.activation.Tanh()) + # The softmax layer, note that the hidden size should be 10, + # which is the number of unique digits + predict = paddle.layer.fc(input=fc1, + size=10, + act=paddle.activation.Softmax()) + return predict + + def main(): - paddle.init(use_gpu=False, trainer_count=1) + paddle.init(use_gpu=True, trainer_count=1) # define network topology images = paddle.layer.data( name='pixel', type=paddle.data_type.dense_vector(784)) label = paddle.layer.data( name='label', type=paddle.data_type.integer_value(10)) - hidden1 = paddle.layer.fc(input=images, size=200) - hidden2 = paddle.layer.fc(input=hidden1, size=200) - inference = paddle.layer.fc(input=hidden2, - size=10, - act=paddle.activation.Softmax()) - cost = paddle.layer.classification_cost(input=inference, label=label) + + predict = softmax_regression(images) + #predict = multilayer_perceptron(images) + #predict = convolutional_neural_network(images) + + cost = paddle.layer.classification_cost(input=predict, label=label) parameters = paddle.parameters.create(cost) - adam_optimizer = paddle.optimizer.Adam(learning_rate=0.01) + optimizer = paddle.optimizer.Momentum( + learning_rate=0.1 / 128.0, + momentum=0.9, + regularization=paddle.optimizer.L2Regularization(rate=0.0005 * 128)) trainer = paddle.trainer.SGD(cost=cost, parameters=parameters, - update_equation=adam_optimizer) + update_equation=optimizer) def event_handler(event): if isinstance(event, paddle.event.EndIteration): - if event.batch_id % 1000 == 0: + if event.batch_id % 100 == 0: result = trainer.test(reader=paddle.reader.batched( - paddle.dataset.mnist.test(), batch_size=256)) - + paddle.dataset.mnist.test(), batch_size=128)) print "Pass %d, Batch %d, Cost %f, %s, Testing metrics %s" % ( event.pass_id, event.batch_id, event.cost, event.metrics, result.metrics) - else: - pass - trainer.train( reader=paddle.reader.batched( paddle.reader.shuffle( paddle.dataset.mnist.train(), buf_size=8192), - batch_size=32), - event_handler=event_handler) + batch_size=128), + event_handler=event_handler, + num_passes=100) if __name__ == '__main__': From 06cbd81eecf40c6b90b72da01126c176e4fc0ebf Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 2 Mar 2017 02:23:02 +0800 Subject: [PATCH 31/87] CONLL05 dataset for SRL --- python/paddle/v2/dataset/conll05.py | 188 ++++++++++++++++++++++++++++ 1 file changed, 188 insertions(+) create mode 100644 python/paddle/v2/dataset/conll05.py diff --git a/python/paddle/v2/dataset/conll05.py b/python/paddle/v2/dataset/conll05.py new file mode 100644 index 0000000000..e902906371 --- /dev/null +++ b/python/paddle/v2/dataset/conll05.py @@ -0,0 +1,188 @@ +import paddle.v2.dataset.common +import tarfile +import gzip +import itertools + +__all__ = ['test, get_dict', 'get_embedding'] +""" +Conll 2005 dataset. Paddle semantic role labeling Book and demo use this +dataset as an example. Because Conll 2005 is not free in public, the default +downloaded URL is test set of Conll 2005 (which is public). Users can change +URL and MD5 to their Conll dataset. +""" + +DATA_URL = 'http://www.cs.upc.edu/~srlconll/conll05st-tests.tar.gz' +DATA_MD5 = '387719152ae52d60422c016e92a742fc' +WORDDICT_URL = 'http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/wordDict.txt' +WORDDICT_MD5 = 'ea7fb7d4c75cc6254716f0177a506baa' +VERBDICT_URL = 'http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/verbDict.txt' +VERBDICT_MD5 = '0d2977293bbb6cbefab5b0f97db1e77c' +TRGDICT_URL = 'http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/targetDict.txt' +TRGDICT_MD5 = 'd8c7f03ceb5fc2e5a0fa7503a4353751' +EMB_URL = 'http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/emb' +EMB_MD5 = 'bf436eb0faa1f6f9103017f8be57cdb7' + +UNK_IDX = 0 + + +def load_dict(filename): + d = dict() + with open(filename, 'r') as f: + for i, line in enumerate(f): + d[line.strip()] = i + return d + + +def corpus_reader(data_path, words_name, props_name): + """ + Read one corpus by corpus name. It returns an iterator. Each element of + this iterator is a tuple including sentence and labels. The sentence is + consist of a list of word IDs. The labels include a list of label IDs. + :param name: corpus name. + :type name: basestring + :return: a iterator of data. + :rtype: iterator + """ + + def reader(): + tf = tarfile.open(data_path) + wf = tf.extractfile(words_name) + pf = tf.extractfile(props_name) + with gzip.GzipFile(fileobj=wf) as words_file, gzip.GzipFile( + fileobj=pf) as props_file: + sentences = [] + labels = [] + one_seg = [] + for word, label in itertools.izip(words_file, props_file): + word = word.strip() + label = label.strip().split() + + if len(label) == 0: # end of sentence + for i in xrange(len(one_seg[0])): + a_kind_lable = [x[i] for x in one_seg] + labels.append(a_kind_lable) + + if len(labels) >= 1: + verb_list = [] + for x in labels[0]: + if x != '-': + verb_list.append(x) + + for i, lbl in enumerate(labels[1:]): + cur_tag = 'O' + is_in_bracket = False + lbl_seq = [] + verb_word = '' + for l in lbl: + if l == '*' and is_in_bracket == False: + lbl_seq.append('O') + elif l == '*' and is_in_bracket == True: + lbl_seq.append('I-' + cur_tag) + elif l == '*)': + lbl_seq.append('I-' + cur_tag) + is_in_bracket = False + elif l.find('(') != -1 and l.find(')') != -1: + cur_tag = l[1:l.find('*')] + lbl_seq.append('B-' + cur_tag) + is_in_bracket = False + elif l.find('(') != -1 and l.find(')') == -1: + cur_tag = l[1:l.find('*')] + lbl_seq.append('B-' + cur_tag) + is_in_bracket = True + else: + print 'error:', l + + yield sentences, verb_list[i], lbl_seq + + sentences = [] + labels = [] + one_seg = [] + else: + sentences.append(word) + one_seg.append(label) + + return reader + + +def reader_creator(corpus_reader, + word_dict=None, + predicate_dict=None, + label_dict=None): + def reader(): + for sentence, predicate, labels in corpus_reader(): + + sen_len = len(sentence) + + verb_index = labels.index('B-V') + mark = [0] * len(labels) + if verb_index > 0: + mark[verb_index - 1] = 1 + ctx_n1 = sentence[verb_index - 1] + else: + ctx_n1 = 'bos' + + if verb_index > 1: + mark[verb_index - 2] = 1 + ctx_n2 = sentence[verb_index - 2] + else: + ctx_n2 = 'bos' + + mark[verb_index] = 1 + ctx_0 = sentence[verb_index] + + if verb_index < len(labels) - 1: + mark[verb_index + 1] = 1 + ctx_p1 = sentence[verb_index + 1] + else: + ctx_p1 = 'eos' + + if verb_index < len(labels) - 2: + mark[verb_index + 2] = 1 + ctx_p2 = sentence[verb_index + 2] + else: + ctx_p2 = 'eos' + + word_idx = [word_dict.get(w, UNK_IDX) for w in sentence] + pred_idx = [predicate_dict.get(predicate)] * sen_len + + ctx_n2_idx = [word_dict.get(ctx_n2, UNK_IDX)] * sen_len + ctx_n1_idx = [word_dict.get(ctx_n1, UNK_IDX)] * sen_len + ctx_0_idx = [word_dict.get(ctx_0, UNK_IDX)] * sen_len + ctx_p1_idx = [word_dict.get(ctx_p1, UNK_IDX)] * sen_len + ctx_p2_idx = [word_dict.get(ctx_p2, UNK_IDX)] * sen_len + + label_idx = [label_dict.get(w) for w in labels] + + yield word_idx, pred_idx, ctx_n2_idx, ctx_n1_idx, \ + ctx_0_idx, ctx_p1_idx, ctx_p2_idx, mark, label_idx + + return reader() + + +def get_dict(): + word_dict = load_dict( + common.download(WORDDICT_URL, 'conll05st', WORDDICT_MD5)) + verb_dict = load_dict( + common.download(VERBDICT_URL, 'conll05st', VERBDICT_MD5)) + label_dict = load_dict( + common.download(TRGDICT_URL, 'conll05st', TRGDICT_MD5)) + return word_dict, verb_dict, label_dict + + +def get_embedding(): + return common.download(EMB_URL, 'conll05st', EMB_MD5) + + +def test(): + word_dict, verb_dict, label_dict = get_dict() + reader = corpus_reader( + common.download(DATA_URL, 'conll05st', DATA_MD5), + words_name='conll05st-release/test.wsj/words/test.wsj.words.gz', + props_name='conll05st-release/test.wsj/props/test.wsj.props.gz') + return reader_creator(reader, word_dict, verb_dict, label_dict) + + +if __name__ == '__main__': + print get_embedding() + for f in test(): + print f From 0dd53294caaefdee1d7809ef08cd64db3ba8561d Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 2 Mar 2017 02:32:51 +0800 Subject: [PATCH 32/87] add copyright --- python/paddle/v2/dataset/__init__.py | 14 ++++++++++++++ python/paddle/v2/dataset/cifar.py | 14 ++++++++++++++ python/paddle/v2/dataset/common.py | 14 ++++++++++++++ python/paddle/v2/dataset/conll05.py | 14 ++++++++++++++ python/paddle/v2/dataset/imdb.py | 4 +--- python/paddle/v2/dataset/imikolov.py | 13 +++++++++++++ python/paddle/v2/dataset/mnist.py | 13 +++++++++++++ python/paddle/v2/dataset/movielens.py | 14 ++++++++++++++ python/paddle/v2/dataset/tests/cifar_test.py | 14 ++++++++++++++ python/paddle/v2/dataset/tests/common_test.py | 14 ++++++++++++++ python/paddle/v2/dataset/tests/imdb_test.py | 14 ++++++++++++++ python/paddle/v2/dataset/tests/mnist_test.py | 14 ++++++++++++++ 12 files changed, 153 insertions(+), 3 deletions(-) diff --git a/python/paddle/v2/dataset/__init__.py b/python/paddle/v2/dataset/__init__.py index 9647e98503..15460b820d 100644 --- a/python/paddle/v2/dataset/__init__.py +++ b/python/paddle/v2/dataset/__init__.py @@ -1,3 +1,17 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import mnist __all__ = ['mnist'] diff --git a/python/paddle/v2/dataset/cifar.py b/python/paddle/v2/dataset/cifar.py index 77c54bd268..5c6f5d8556 100644 --- a/python/paddle/v2/dataset/cifar.py +++ b/python/paddle/v2/dataset/cifar.py @@ -1,6 +1,20 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ CIFAR dataset: https://www.cs.toronto.edu/~kriz/cifar.html """ + import cPickle import itertools import numpy diff --git a/python/paddle/v2/dataset/common.py b/python/paddle/v2/dataset/common.py index fcf4437ffa..397c9e66d4 100644 --- a/python/paddle/v2/dataset/common.py +++ b/python/paddle/v2/dataset/common.py @@ -1,3 +1,17 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import requests import hashlib import os diff --git a/python/paddle/v2/dataset/conll05.py b/python/paddle/v2/dataset/conll05.py index e902906371..7c43c7c634 100644 --- a/python/paddle/v2/dataset/conll05.py +++ b/python/paddle/v2/dataset/conll05.py @@ -1,3 +1,17 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import paddle.v2.dataset.common import tarfile import gzip diff --git a/python/paddle/v2/dataset/imdb.py b/python/paddle/v2/dataset/imdb.py index 433e37380f..ffd7d89049 100644 --- a/python/paddle/v2/dataset/imdb.py +++ b/python/paddle/v2/dataset/imdb.py @@ -1,6 +1,3 @@ -# /usr/bin/env python -# -*- coding:utf-8 -*- - # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,6 +14,7 @@ """ IMDB dataset: http://ai.stanford.edu/%7Eamaas/data/sentiment/aclImdb_v1.tar.gz """ + import paddle.v2.dataset.common import tarfile import Queue diff --git a/python/paddle/v2/dataset/imikolov.py b/python/paddle/v2/dataset/imikolov.py index b3791ddad6..285d3eaca8 100644 --- a/python/paddle/v2/dataset/imikolov.py +++ b/python/paddle/v2/dataset/imikolov.py @@ -1,3 +1,16 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ imikolov's simple dataset: http://www.fit.vutbr.cz/~imikolov/rnnlm/ """ diff --git a/python/paddle/v2/dataset/mnist.py b/python/paddle/v2/dataset/mnist.py index 1512a3c318..7cecb34164 100644 --- a/python/paddle/v2/dataset/mnist.py +++ b/python/paddle/v2/dataset/mnist.py @@ -1,3 +1,16 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ MNIST dataset. """ diff --git a/python/paddle/v2/dataset/movielens.py b/python/paddle/v2/dataset/movielens.py index dcffcff2f5..c22bcfa38b 100644 --- a/python/paddle/v2/dataset/movielens.py +++ b/python/paddle/v2/dataset/movielens.py @@ -1,3 +1,17 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import zipfile from common import download import re diff --git a/python/paddle/v2/dataset/tests/cifar_test.py b/python/paddle/v2/dataset/tests/cifar_test.py index a2af45ecf5..e0e18229da 100644 --- a/python/paddle/v2/dataset/tests/cifar_test.py +++ b/python/paddle/v2/dataset/tests/cifar_test.py @@ -1,3 +1,17 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import paddle.v2.dataset.cifar import unittest diff --git a/python/paddle/v2/dataset/tests/common_test.py b/python/paddle/v2/dataset/tests/common_test.py index 7d8406171b..5babcef0eb 100644 --- a/python/paddle/v2/dataset/tests/common_test.py +++ b/python/paddle/v2/dataset/tests/common_test.py @@ -1,3 +1,17 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import paddle.v2.dataset.common import unittest import tempfile diff --git a/python/paddle/v2/dataset/tests/imdb_test.py b/python/paddle/v2/dataset/tests/imdb_test.py index e887af1663..c4d82f2689 100644 --- a/python/paddle/v2/dataset/tests/imdb_test.py +++ b/python/paddle/v2/dataset/tests/imdb_test.py @@ -1,3 +1,17 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import paddle.v2.dataset.imdb import unittest import re diff --git a/python/paddle/v2/dataset/tests/mnist_test.py b/python/paddle/v2/dataset/tests/mnist_test.py index b4408cc2f5..1d344cac3e 100644 --- a/python/paddle/v2/dataset/tests/mnist_test.py +++ b/python/paddle/v2/dataset/tests/mnist_test.py @@ -1,3 +1,17 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import paddle.v2.dataset.mnist import unittest From 35ec5f0f1a5b497c0e927c98df882a1e9ab40d16 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 2 Mar 2017 09:51:46 +0800 Subject: [PATCH 33/87] Support StaticInput --- python/paddle/v2/__init__.py | 3 +- python/paddle/v2/layer.py | 53 +++++++++++++----------- python/paddle/v2/networks.py | 19 +++++++++ python/paddle/v2/tests/test_rnn_layer.py | 41 +++++++++--------- 4 files changed, 72 insertions(+), 44 deletions(-) create mode 100644 python/paddle/v2/networks.py diff --git a/python/paddle/v2/__init__.py b/python/paddle/v2/__init__.py index b31efe170d..4dbcd3bb6b 100644 --- a/python/paddle/v2/__init__.py +++ b/python/paddle/v2/__init__.py @@ -20,6 +20,7 @@ import event import data_type import topology import data_feeder +import networks from . import dataset from . import reader import attr @@ -29,7 +30,7 @@ import py_paddle.swig_paddle as api __all__ = [ 'optimizer', 'layer', 'activation', 'parameters', 'init', 'trainer', 'event', 'data_type', 'attr', 'pooling', 'data_feeder', 'dataset', 'reader', - 'topology' + 'topology', 'networks' ] diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py index bf5d653e8a..82ccd8498a 100644 --- a/python/paddle/v2/layer.py +++ b/python/paddle/v2/layer.py @@ -109,9 +109,10 @@ def parse_network(*outputs): class Layer(object): - def __init__(self, name=None, parent_layers=None): + def __init__(self, name=None, size=None, parent_layers=None): assert isinstance(parent_layers, dict) self.name = name + self.size = size self.__parent_layers__ = parent_layers def to_proto(self, context): @@ -173,7 +174,8 @@ def __convert_to_v2__(method_name, parent_names, is_default_name=True): other_kwargs[key] = kwargs[key] name = kwargs.get('name', None) - super(V2LayerImpl, self).__init__(name, parent_layers) + size = kwargs.get('size', None) + super(V2LayerImpl, self).__init__(name, size, parent_layers) self.__other_kwargs__ = other_kwargs if wrapper is not None: @@ -220,9 +222,10 @@ class WithExtraParent(Layer): def extra_parent(self): return self.__extra_parent__ - def __init__(self, name=None, parent_layers=None): + def __init__(self, name=None, size=None, parent_layers=None): self.__extra_parent__ = [] - super(WithExtraParent, self).__init__(name, parent_layers) + super(WithExtraParent, self).__init__( + name=name, size=size, parent_layers=parent_layers) def append_extra_parent(self, parent): self.__extra_parent__.append(parent) @@ -261,7 +264,8 @@ class MemoryV2(WithExtraParent): def __init__(self, name, size, **kwargs): self.name = name self.size = size - super(MemoryV2, self).__init__(name=name, parent_layers=dict()) + super(MemoryV2, self).__init__( + name=name, size=size, parent_layers=dict()) self.__kwargs__ = kwargs self.__boot_layer_name__ = None if 'boot_layer' in kwargs: @@ -271,7 +275,9 @@ class MemoryV2(WithExtraParent): st = inspect.stack() for i in xrange(len(st)): locs = inspect.stack()[i][0].f_locals - for val in locs.viewvalues(): + keys = locs.keys() + for key in keys: + val = locs[key] if isinstance(val, RecurrentLayerInput): begin_of_current_rnn.append(val) @@ -322,21 +328,15 @@ class LayerOutputV2(Layer): return self.layer_output -class StaticInputV2(Layer): - def __init__(self, input=None, **kwargs): - assert input is not None - self.__kwargs__ = kwargs - super(StaticInputV2, self).__init__( - name=input.name, parent_layers={'input': input}) - - def context_name(self): - return self.name + "#static_input" - - def to_proto_impl(self, **kwargs): - args = dict() - args.update(kwargs) - args.update(self.__kwargs__) - return conf_helps.StaticInput(**args) +class StaticInputV2(object): + def __init__(self, input, is_seq=False, size=None): + assert isinstance(input, LayerV2) + self.name = input.name + self.input = input + self.is_seq = is_seq + self.size = size + # TODO(qiaolongfei): add size + # assert input.size is not None or size is not None class MixedLayerV2(Layer): @@ -370,9 +370,8 @@ class MixedLayerV2(Layer): other_kwargs['act'] = act other_kwargs['bias_attr'] = bias_attr other_kwargs['layer_attr'] = layer_attr - parent_layers = {"input": self.__inputs__} - super(MixedLayerV2, self).__init__(name, parent_layers) + super(MixedLayerV2, self).__init__(name, size, parent_layers) self.__other_kwargs__ = other_kwargs def __iadd__(self, other): @@ -452,6 +451,12 @@ def recurrent_group(step, input, name=None): if not isinstance(input, collections.Sequence): input = [input] + # TODO(qiaolongfei) convert StaticInput to memory according to v2 recurrent_group + for i in xrange(len(input)): + cur_input = input[i] + if isinstance(cur_input, StaticInputV2): + input[i] = cur_input.input + actual_input = [ RecurrentLayerInput( recurrent_name=name, @@ -512,7 +517,7 @@ def __layer_name_mapping_parent_names__(inname): lambda x: x in ['input1', 'input2', 'label', 'input', 'a', 'b', 'expand_as', 'weights', 'vectors', 'weight', 'score', 'left', - 'right'], + 'right', 'output_mem'], all_args) diff --git a/python/paddle/v2/networks.py b/python/paddle/v2/networks.py new file mode 100644 index 0000000000..2877b56b18 --- /dev/null +++ b/python/paddle/v2/networks.py @@ -0,0 +1,19 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from layer import __convert_to_v2__ + +simple_gru = __convert_to_v2__('simple_gru', ['input']) +simple_attention = __convert_to_v2__( + 'simple_attention', ['encoded_sequence', 'encoded_proj', 'decoder_state']) diff --git a/python/paddle/v2/tests/test_rnn_layer.py b/python/paddle/v2/tests/test_rnn_layer.py index 48aeb42391..5fbbd20eb7 100644 --- a/python/paddle/v2/tests/test_rnn_layer.py +++ b/python/paddle/v2/tests/test_rnn_layer.py @@ -74,21 +74,28 @@ class RNNTest(unittest.TestCase): label_dim = 3 def parse_old_rnn(): - def step(y, wid): - z = conf_helps.embedding_layer(input=wid, size=word_dim) - mem = conf_helps.memory(name="rnn_state", size=hidden_dim) - out = conf_helps.fc_layer( - input=[y, z, mem], - size=hidden_dim, - act=conf_helps.TanhActivation(), - bias_attr=True, - name="rnn_state") - return out - def test(): data = conf_helps.data_layer(name="word", size=dict_dim) label = conf_helps.data_layer(name="label", size=label_dim) emb = conf_helps.embedding_layer(input=data, size=word_dim) + boot_layer = conf_helps.data_layer(name="boot", size=10) + boot_layer = conf_helps.fc_layer( + name='boot_fc', input=boot_layer, size=10) + + def step(y, wid): + z = conf_helps.embedding_layer(input=wid, size=word_dim) + mem = conf_helps.memory( + name="rnn_state", + size=hidden_dim, + boot_layer=boot_layer) + out = conf_helps.fc_layer( + input=[y, z, mem], + size=hidden_dim, + act=conf_helps.TanhActivation(), + bias_attr=True, + name="rnn_state") + return out + out = conf_helps.recurrent_group( name="rnn", step=step, input=[emb, data]) @@ -111,11 +118,9 @@ class RNNTest(unittest.TestCase): label = layer.data( name="label", type=data_type.dense_vector(label_dim)) emb = layer.embedding(input=data, size=word_dim) - boot_layer = layer.data( name="boot", type=data_type.dense_vector(10)) - - boot_layer = layer.fc(name='wtf', input=boot_layer, size=10) + boot_layer = layer.fc(name='boot_fc', input=boot_layer, size=10) def step(y, wid): z = layer.embedding(input=wid, size=word_dim) @@ -141,11 +146,9 @@ class RNNTest(unittest.TestCase): return str(layer.parse_network(cost)) - with open("/Users/baidu/old.out", 'w') as f: - print >> f, parse_old_rnn() - with open("/Users/baidu/new.out", "w") as f: - print >> f, parse_new_rnn() - # print ''.join(diff) + diff = difflib.unified_diff(parse_old_rnn().splitlines(1), + parse_new_rnn().splitlines(1)) + print ''.join(diff) if __name__ == '__main__': From b400c8f02c76ce74828cc999d6bef335cca18a57 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 2 Mar 2017 11:47:33 +0800 Subject: [PATCH 34/87] update to latest --- python/paddle/v2/config_base.py | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/python/paddle/v2/config_base.py b/python/paddle/v2/config_base.py index 035f96b0f2..be3e39a06e 100644 --- a/python/paddle/v2/config_base.py +++ b/python/paddle/v2/config_base.py @@ -19,9 +19,10 @@ import paddle.trainer_config_helpers as conf_helps class Layer(object): - def __init__(self, name=None, parent_layers=None): + def __init__(self, name=None, size=None, parent_layers=None): assert isinstance(parent_layers, dict) self.name = name + self.size = size self.__parent_layers__ = parent_layers def to_proto(self, context): @@ -39,16 +40,30 @@ class Layer(object): self.__parent_layers__[layer_name]) kwargs[layer_name] = v1_layer - if self.name is None: + if self.context_name() is None: return self.to_proto_impl(**kwargs) - elif self.name not in context: - context[self.name] = self.to_proto_impl(**kwargs) + elif self.context_name() not in context: + context[self.context_name()] = self.to_proto_impl(**kwargs) - return context[self.name] + if self.use_context_name(): + return context[self.context_name()] + else: + return context[self.name] def to_proto_impl(self, **kwargs): raise NotImplementedError() + def context_name(self): + """ + Context name means the context which stores `to_proto_impl` result. + If multiple layer share same context_name, the `to_proto_impl` of them + will be invoked only once. + """ + return self.name + + def use_context_name(self): + return False + def __convert_to_v2__(method_name, parent_names, is_default_name=True): if is_default_name: @@ -69,7 +84,8 @@ def __convert_to_v2__(method_name, parent_names, is_default_name=True): other_kwargs[key] = kwargs[key] name = kwargs.get('name', None) - super(V2LayerImpl, self).__init__(name, parent_layers) + size = kwargs.get('size', None) + super(V2LayerImpl, self).__init__(name, size, parent_layers) self.__other_kwargs__ = other_kwargs if wrapper is not None: From 1524f2041ee3e5dd6bf1613afeb16ed3884939e9 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 2 Mar 2017 13:03:41 +0800 Subject: [PATCH 35/87] Add testing cost. --- demo/mnist/api_train_v2.py | 9 +++++---- python/paddle/v2/event.py | 3 ++- python/paddle/v2/trainer.py | 12 ++++++++---- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/demo/mnist/api_train_v2.py b/demo/mnist/api_train_v2.py index 06beb7024d..00d1022175 100644 --- a/demo/mnist/api_train_v2.py +++ b/demo/mnist/api_train_v2.py @@ -30,10 +30,11 @@ def main(): result = trainer.test(reader=paddle.reader.batched( paddle.dataset.mnist.test(), batch_size=256)) - print "Pass %d, Batch %d, Cost %f, %s, Testing metrics %s" % ( - event.pass_id, event.batch_id, event.cost, event.metrics, - result.metrics) - + print "Pass %d, Batch %d, Cost %.2f, %s, " \ + "Testing cost %.2f metrics %s" % ( + event.pass_id, event.batch_id, event.cost, + event.metrics, + result.cost, result.metrics) else: pass diff --git a/python/paddle/v2/event.py b/python/paddle/v2/event.py index a78bcf076c..a429e36b63 100644 --- a/python/paddle/v2/event.py +++ b/python/paddle/v2/event.py @@ -34,8 +34,9 @@ class WithMetric(object): class TestResult(WithMetric): - def __init__(self, evaluator): + def __init__(self, evaluator, cost): super(TestResult, self).__init__(evaluator) + self.cost = cost class BeginPass(object): diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py index 5003f55f3e..58ec6dd5fe 100644 --- a/python/paddle/v2/trainer.py +++ b/python/paddle/v2/trainer.py @@ -123,9 +123,8 @@ class SGD(ITrainer): for each_param in self.__gradient_machine__.getParameters(): updater.update(each_param) # Get cost. We use numpy to calculate total cost for this batch. - cost_vec = out_args.getSlotValue(0) - cost_vec = cost_vec.copyToNumpyMat() - cost = cost_vec.sum() / len(data_batch) + cost_sum = out_args.sumCosts() + cost = cost_sum / len(data_batch) updater.finishBatch(cost) batch_evaluator.finish() event_handler( @@ -154,13 +153,18 @@ class SGD(ITrainer): evaluator = self.__gradient_machine__.makeEvaluator() out_args = api.Arguments.createArguments(0) evaluator.start() + total_cost = 0 + num_samples = 0.0 for data_batch in reader(): + num_samples += len(data_batch) self.__gradient_machine__.forward( feeder(data_batch), out_args, api.PASS_TEST) + total_cost += out_args.sumCosts() self.__gradient_machine__.eval(evaluator) evaluator.finish() - return v2_event.TestResult(evaluator=evaluator) + return v2_event.TestResult( + evaluator=evaluator, cost=total_cost / num_samples) def __check_train_args__(reader, event_handler, **kwargs): From ba1c978514c05500a858644a09033e2afc5e1f7a Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 2 Mar 2017 13:41:41 +0800 Subject: [PATCH 36/87] close file and compare the result with the old way. --- python/paddle/v2/dataset/conll05.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/python/paddle/v2/dataset/conll05.py b/python/paddle/v2/dataset/conll05.py index 7c43c7c634..b6a4c252d1 100644 --- a/python/paddle/v2/dataset/conll05.py +++ b/python/paddle/v2/dataset/conll05.py @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle.v2.dataset.common +#import paddle.v2.dataset.common +import common import tarfile import gzip import itertools @@ -49,11 +50,9 @@ def load_dict(filename): def corpus_reader(data_path, words_name, props_name): """ - Read one corpus by corpus name. It returns an iterator. Each element of + Read one corpus. It returns an iterator. Each element of this iterator is a tuple including sentence and labels. The sentence is consist of a list of word IDs. The labels include a list of label IDs. - :param name: corpus name. - :type name: basestring :return: a iterator of data. :rtype: iterator """ @@ -104,7 +103,8 @@ def corpus_reader(data_path, words_name, props_name): lbl_seq.append('B-' + cur_tag) is_in_bracket = True else: - print 'error:', l + raise RuntimeError('Unexpected label: %s' % + l) yield sentences, verb_list[i], lbl_seq @@ -115,6 +115,10 @@ def corpus_reader(data_path, words_name, props_name): sentences.append(word) one_seg.append(label) + pf.close() + wf.close() + tf.close() + return reader From d3c755df3fe6009ed2cde1b5dca41196e4024aa7 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Thu, 2 Mar 2017 13:41:51 +0800 Subject: [PATCH 37/87] Refine code --- demo/sentiment/train_v2.py | 95 ++++++++++++++++---------------------- 1 file changed, 39 insertions(+), 56 deletions(-) diff --git a/demo/sentiment/train_v2.py b/demo/sentiment/train_v2.py index 779bfee5b6..3d595fad30 100644 --- a/demo/sentiment/train_v2.py +++ b/demo/sentiment/train_v2.py @@ -1,3 +1,4 @@ +import sys from os.path import join as join_path import paddle.trainer_config_helpers.attrs as attrs from paddle.trainer_config_helpers.poolings import MaxPooling @@ -188,88 +189,69 @@ def stacked_lstm_net(input_dim, return cost -def data_reader(): - data_dir = "./data/pre-imdb" - train_file = "train_part_000" - test_file = "test_part_000" - dict_file = "dict.txt" - train_file = join_path(data_dir, train_file) - test_file = join_path(data_dir, test_file) - dict_file = join_path(data_dir, dict_file) - - with open(dict_file, 'r') as fdict, open(train_file, 'r') as fdata: - dictionary = dict() - for i, line in enumerate(fdict): - dictionary[line.split('\t')[0]] = i - - for line_count, line in enumerate(fdata): - label, comment = line.strip().split('\t\t') - label = int(label) - words = comment.split() - word_slot = [dictionary[w] for w in words if w in dictionary] - yield (word_slot, label) - - -def test_reader(): - data_dir = "./data/pre-imdb" - train_file = "train_part_000" - test_file = "test_part_000" - dict_file = "dict.txt" - train_file = join_path(data_dir, train_file) - test_file = join_path(data_dir, test_file) - dict_file = join_path(data_dir, dict_file) - - with open(dict_file, 'r') as fdict, open(test_file, 'r') as ftest: - dictionary = dict() - for i, line in enumerate(fdict): - dictionary[line.split('\t')[0]] = i - - for line_count, line in enumerate(ftest): - label, comment = line.strip().split('\t\t') - label = int(label) - words = comment.split() - word_slot = [dictionary[w] for w in words if w in dictionary] - yield (word_slot, label) +def data_reader(data_file, dict_file): + def reader(): + with open(dict_file, 'r') as fdict, open(data_file, 'r') as fdata: + dictionary = dict() + for i, line in enumerate(fdict): + dictionary[line.split('\t')[0]] = i + + for line_count, line in enumerate(fdata): + label, comment = line.strip().split('\t\t') + label = int(label) + words = comment.split() + word_slot = [dictionary[w] for w in words if w in dictionary] + yield (word_slot, label) + + return reader if __name__ == '__main__': - data_dir = "./data/pre-imdb" - train_list = "train.list" - test_list = "test.list" - dict_file = "dict.txt" - dict_dim = len(open(join_path(data_dir, "dict.txt")).readlines()) - class_dim = len(open(join_path(data_dir, 'labels.list')).readlines()) - is_predict = False + # data file + train_file = "./data/pre-imdb/train_part_000" + test_file = "./data/pre-imdb/test_part_000" + dict_file = "./data/pre-imdb/dict.txt" + labels = "./data/pre-imdb/labels.list" # init paddle.init(use_gpu=True, trainer_count=4) # network config - # cost = convolution_net(dict_dim, class_dim=class_dim, is_predict=is_predict) - cost = stacked_lstm_net( - dict_dim, class_dim=class_dim, stacked_num=3, is_predict=is_predict) + dict_dim = len(open(dict_file).readlines()) + class_dim = len(open(labels).readlines()) + + # Please choose the way to build the network + # by uncommenting the corresponding line. + cost = convolution_net(dict_dim, class_dim=class_dim) + # cost = stacked_lstm_net(dict_dim, class_dim=class_dim, stacked_num=3) # create parameters parameters = paddle.parameters.create(cost) + # create optimizer adam_optimizer = paddle.optimizer.Adam( learning_rate=2e-3, regularization=paddle.optimizer.L2Regularization(rate=8e-4), model_average=paddle.optimizer.ModelAverage(average_window=0.5)) + # End batch and end pass event handler def event_handler(event): if isinstance(event, paddle.event.EndIteration): if event.batch_id % 100 == 0: - print "Pass %d, Batch %d, Cost %f, %s" % ( + print "\nPass %d, Batch %d, Cost %f, %s" % ( event.pass_id, event.batch_id, event.cost, event.metrics) + else: + sys.stdout.write('.') + sys.stdout.flush() if isinstance(event, paddle.event.EndPass): result = trainer.test( reader=paddle.reader.batched( - test_reader, batch_size=128), + data_reader(test_file, dict_file), batch_size=128), reader_dict={'word': 0, 'label': 1}) - print "Test with Pass %d, %s" % (event.pass_id, result.metrics) + print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics) + # create trainer trainer = paddle.trainer.SGD(cost=cost, parameters=parameters, update_equation=adam_optimizer) @@ -277,7 +259,8 @@ if __name__ == '__main__': trainer.train( reader=paddle.reader.batched( paddle.reader.shuffle( - data_reader, buf_size=4096), batch_size=128), + data_reader(train_file, dict_file), buf_size=4096), + batch_size=128), event_handler=event_handler, reader_dict={'word': 0, 'label': 1}, From 4dd2e40bd07c0ff1123d35328687f53682e67a62 Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 2 Mar 2017 13:55:41 +0800 Subject: [PATCH 38/87] remove comments --- python/paddle/v2/dataset/conll05.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/python/paddle/v2/dataset/conll05.py b/python/paddle/v2/dataset/conll05.py index b6a4c252d1..7874161a05 100644 --- a/python/paddle/v2/dataset/conll05.py +++ b/python/paddle/v2/dataset/conll05.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -#import paddle.v2.dataset.common -import common +import paddle.v2.dataset.common import tarfile import gzip import itertools From 0dc68a2c90e2432a3b5678881268fa22e1f0d990 Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 2 Mar 2017 14:48:51 +0800 Subject: [PATCH 39/87] add getNonStaticParameters --- demo/image_classification/api_v2_train.py | 4 ++-- paddle/api/GradientMachine.cpp | 14 ++++++++++++++ paddle/api/PaddleAPI.h | 3 +++ paddle/py_paddle/util.py | 6 ++++++ 4 files changed, 25 insertions(+), 2 deletions(-) diff --git a/demo/image_classification/api_v2_train.py b/demo/image_classification/api_v2_train.py index e6e4307242..0b4dc4d929 100644 --- a/demo/image_classification/api_v2_train.py +++ b/demo/image_classification/api_v2_train.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License -from api_v2_vgg import resnet_cifar10 -from api_v2_resnet import vgg_bn_drop +from api_v2_vgg import vgg_bn_drop +from api_v2_resnet import resnet_cifar10 import paddle.v2 as paddle diff --git a/paddle/api/GradientMachine.cpp b/paddle/api/GradientMachine.cpp index 538ca2999f..dcb5fe086f 100644 --- a/paddle/api/GradientMachine.cpp +++ b/paddle/api/GradientMachine.cpp @@ -142,6 +142,20 @@ Parameter* GradientMachine::getParameter(size_t i) throw(RangeError) { } } +size_t GradientMachine::getNonStaticParameterSize() const { + return m->machine->getNonStaticParameters().size(); +} + +Parameter* GradientMachine::getNonStaticParameter(size_t i) throw(RangeError) { + auto params = m->machine->getNonStaticParameters(); + if (i < params.size()) { + return Parameter::createFromSharedPtr( + &m->machine->getNonStaticParameters()[i]); + } else { + throw RangeError(); + } +} + void GradientMachine::randParameters() { m->machine->randParameters(); } Arguments* GradientMachine::getLayerOutput(const std::string& layerName) const diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index 1831b8e170..764946cf53 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -768,6 +768,9 @@ public: size_t getParameterSize() const; Parameter* getParameter(size_t i) throw(RangeError); + size_t getNonStaticParameterSize() const; + Parameter* getNonStaticParameter(size_t i) throw(RangeError); + void randParameters(); Arguments* getLayerOutput(const std::string& layerName) const diff --git a/paddle/py_paddle/util.py b/paddle/py_paddle/util.py index a708def1d2..fb337b8af3 100644 --- a/paddle/py_paddle/util.py +++ b/paddle/py_paddle/util.py @@ -195,6 +195,12 @@ def __monkeypatch_gradient_machine__(): swig_paddle.GradientMachine.getParameters = getParameters + def getNonStaticParameters(self): + return (self.getNonStaticParameter(i) + for i in xrange(self.getNonStaticParameterSize())) + + swig_paddle.GradientMachine.getParameters = getParameters + def getLayerOutputs(self, layerNames): """ getLayerOutputs. get outputs of layers and return a numpy matrix dict. From ce3a399d3d5654804c8f258cb4b2d0455e013606 Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 2 Mar 2017 14:54:56 +0800 Subject: [PATCH 40/87] update util.py --- paddle/py_paddle/util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/py_paddle/util.py b/paddle/py_paddle/util.py index fb337b8af3..1c9455fab5 100644 --- a/paddle/py_paddle/util.py +++ b/paddle/py_paddle/util.py @@ -199,7 +199,7 @@ def __monkeypatch_gradient_machine__(): return (self.getNonStaticParameter(i) for i in xrange(self.getNonStaticParameterSize())) - swig_paddle.GradientMachine.getParameters = getParameters + swig_paddle.GradientMachine.getNonStaticParameters = getNonStaticParameters def getLayerOutputs(self, layerNames): """ From e4007337ae88d0874f8d7c0bc41e9aa641de38b7 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 2 Mar 2017 15:09:21 +0800 Subject: [PATCH 41/87] Follow comments --- python/paddle/v2/trainer.py | 1 - 1 file changed, 1 deletion(-) diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py index 58ec6dd5fe..21a1642c36 100644 --- a/python/paddle/v2/trainer.py +++ b/python/paddle/v2/trainer.py @@ -122,7 +122,6 @@ class SGD(ITrainer): self.__gradient_machine__.eval(batch_evaluator) for each_param in self.__gradient_machine__.getParameters(): updater.update(each_param) - # Get cost. We use numpy to calculate total cost for this batch. cost_sum = out_args.sumCosts() cost = cost_sum / len(data_batch) updater.finishBatch(cost) From c9bb48b308807f80b3ba238cafb97ba4b0eda983 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 2 Mar 2017 15:09:26 +0800 Subject: [PATCH 42/87] support calculate size --- python/paddle/v2/config_base.py | 7 +- python/paddle/v2/layer.py | 110 ++++++++++++++++++++------------ 2 files changed, 75 insertions(+), 42 deletions(-) diff --git a/python/paddle/v2/config_base.py b/python/paddle/v2/config_base.py index be3e39a06e..573539a30c 100644 --- a/python/paddle/v2/config_base.py +++ b/python/paddle/v2/config_base.py @@ -22,7 +22,7 @@ class Layer(object): def __init__(self, name=None, size=None, parent_layers=None): assert isinstance(parent_layers, dict) self.name = name - self.size = size + self.__contex__ = {} self.__parent_layers__ = parent_layers def to_proto(self, context): @@ -44,7 +44,7 @@ class Layer(object): return self.to_proto_impl(**kwargs) elif self.context_name() not in context: context[self.context_name()] = self.to_proto_impl(**kwargs) - + self.__contex__ = context if self.use_context_name(): return context[self.context_name()] else: @@ -64,6 +64,9 @@ class Layer(object): def use_context_name(self): return False + def calcalted_size(self): + return self.__contex__[self.context_name()].size + def __convert_to_v2__(method_name, parent_names, is_default_name=True): if is_default_name: diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py index e24244a48c..a97518ed52 100644 --- a/python/paddle/v2/layer.py +++ b/python/paddle/v2/layer.py @@ -197,6 +197,10 @@ class MemoryV2(WithExtraParent): val = locs[key] if isinstance(val, RecurrentLayerInput): begin_of_current_rnn.append(val) + elif isinstance(val, collections.Sequence): + for v in val: + if isinstance(v, RecurrentLayerInput): + begin_of_current_rnn.append(v) if begin_of_current_rnn: break @@ -216,7 +220,13 @@ class MemoryV2(WithExtraParent): if self.__boot_layer_name__ is not None: args['boot_layer'] = context[self.__boot_layer_name__] - return conf_helps.memory(name=self.name, size=self.size, **args) + + if callable(self.size): + real_size = self.size() + else: + real_size = self.size + args['size'] = real_size + return conf_helps.memory(name=self.name, **args) def context_name(self): return self.name + "#memory" @@ -311,6 +321,12 @@ class MixedLayerV2(Layer): args[each] = kwargs[each] for each in self.__other_kwargs__: args[each] = self.__other_kwargs__[each] + size = args.get('size', None) + if callable(size): + real_size = size() + else: + real_size = size + args['size'] = real_size return getattr(conf_helps, self.__method_name__)(**args) @@ -363,53 +379,15 @@ class RecurrentLayerOutput(Layer): RecurrentLayerGroupEnd(name=self.__recurrent_name__) -@wrap_name_default() -def recurrent_group(step, input, name=None): - if not isinstance(input, collections.Sequence): - input = [input] - - # TODO(qiaolongfei) convert StaticInput to memory according to v2 recurrent_group - for i in xrange(len(input)): - cur_input = input[i] - if isinstance(cur_input, StaticInputV2): - input[i] = cur_input.input - - actual_input = [ - RecurrentLayerInput( - recurrent_name=name, - index=i, - parent_layers={'recurrent_inputs': input}) - for i in xrange(len(input)) - ] - - actual_output = step(*actual_input) - - if not isinstance(actual_output, collections.Sequence): - actual_output = [actual_output] - - retv = [ - RecurrentLayerOutput( - recurrent_name=name, - index=i, - parent_layers={'recurrent_outputs': actual_output}) - for i in xrange(len(actual_output)) - ] - if len(retv) == 1: - return retv[0] - else: - return retv - - LayerV2 = Layer data = DataLayerV2 AggregateLevel = conf_helps.layers.AggregateLevel ExpandLevel = conf_helps.layers.ExpandLevel -recurrent_group = recurrent_group memory = MemoryV2 def __layer_name_mapping__(inname): - if inname in ['data_layer', 'memory', 'mixed_layer']: + if inname in ['data_layer', 'memory', 'mixed_layer', 'recurrent_group']: # Do Not handle these layers return elif inname == 'maxid_layer': @@ -469,3 +447,55 @@ operator_list = [ for op in operator_list: globals()[op[0]] = __convert_to_v2__( op[0], parent_names=op[1], is_default_name=False) + + +@wrap_name_default() +def recurrent_group(step, input, name=None): + if not isinstance(input, collections.Sequence): + input = [input] + + non_static_inputs = filter(lambda x: not isinstance(x, StaticInputV2), + input) + actual_input = [ + RecurrentLayerInput( + recurrent_name=name, + index=i, + parent_layers={'recurrent_inputs': non_static_inputs}) + for i in xrange(len(non_static_inputs)) + ] + + def __real_step__(*args): + rnn_input = list(args) + static_inputs = filter(lambda x: isinstance(x, StaticInputV2), input) + for static_input in static_inputs: + mem_name = "__%s_memory__" % static_input.input.name + print memory + mem = memory( + name=mem_name, + is_seq=static_input.is_seq, + size=static_input.input.calcalted_size, + boot_layer=static_input.input) + with mixed( + name=mem_name, + size=static_input.input.calcalted_size, + act=activation.Identity()) as mix: + mix += identity_projection(input=mem) + rnn_input.insert(input.index(static_input), mix) + return step(*rnn_input) + + actual_output = __real_step__(*actual_input) + + if not isinstance(actual_output, collections.Sequence): + actual_output = [actual_output] + + retv = [ + RecurrentLayerOutput( + recurrent_name=name, + index=i, + parent_layers={'recurrent_outputs': actual_output}) + for i in xrange(len(actual_output)) + ] + if len(retv) == 1: + return retv[0] + else: + return retv From 69bf77fd1e71fc57bf0f15820a9dd34bd98c79b6 Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 2 Mar 2017 15:09:49 +0800 Subject: [PATCH 43/87] fix trainer v2 getNonStaticParameters --- python/paddle/v2/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py index bf8b181e42..44ba9d7ae1 100644 --- a/python/paddle/v2/trainer.py +++ b/python/paddle/v2/trainer.py @@ -113,7 +113,7 @@ class SGD(ITrainer): gm.forwardBackward(feeder(data_batch), out_args, pass_type) gm.eval(pass_evaluator) gm.eval(batch_evaluator) - for each_param in gm.getParameters(): + for each_param in gm.getNonStaticParameters(): updater.update(each_param) # Get cost. We use numpy to calculate total cost for this batch. cost_vec = out_args.getSlotValue(0) From 3b8a8f81142e7eaea06e8e43e41bcb7bc73b0e09 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 2 Mar 2017 15:10:54 +0800 Subject: [PATCH 44/87] Follow comments --- demo/mnist/api_train_v2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demo/mnist/api_train_v2.py b/demo/mnist/api_train_v2.py index 00d1022175..575a32b322 100644 --- a/demo/mnist/api_train_v2.py +++ b/demo/mnist/api_train_v2.py @@ -30,7 +30,7 @@ def main(): result = trainer.test(reader=paddle.reader.batched( paddle.dataset.mnist.test(), batch_size=256)) - print "Pass %d, Batch %d, Cost %.2f, %s, " \ + print "Pass %d, Batch %d, Cost %.2f, %s\n" \ "Testing cost %.2f metrics %s" % ( event.pass_id, event.batch_id, event.cost, event.metrics, From 1164c287b9db46abd9e591ddebe720bc3e08e22d Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 2 Mar 2017 15:14:34 +0800 Subject: [PATCH 45/87] add datasets import --- python/paddle/v2/dataset/__init__.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/python/paddle/v2/dataset/__init__.py b/python/paddle/v2/dataset/__init__.py index 9647e98503..d222739ba2 100644 --- a/python/paddle/v2/dataset/__init__.py +++ b/python/paddle/v2/dataset/__init__.py @@ -1,3 +1,7 @@ import mnist +import imikolov +import imdb +import cifar +import movielens -__all__ = ['mnist'] +__all__ = ['mnist', 'imikolov', 'imdb', 'cifar', 'movielens'] From f9e6aa2c31aa6bc5269cd66eaa8705b0b98af989 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 2 Mar 2017 15:23:19 +0800 Subject: [PATCH 46/87] refine code --- python/paddle/v2/config_base.py | 12 ++++++---- python/paddle/v2/layer.py | 40 +++++++++++++++++---------------- 2 files changed, 29 insertions(+), 23 deletions(-) diff --git a/python/paddle/v2/config_base.py b/python/paddle/v2/config_base.py index 573539a30c..fa2ccec6c3 100644 --- a/python/paddle/v2/config_base.py +++ b/python/paddle/v2/config_base.py @@ -19,7 +19,7 @@ import paddle.trainer_config_helpers as conf_helps class Layer(object): - def __init__(self, name=None, size=None, parent_layers=None): + def __init__(self, name=None, parent_layers=None): assert isinstance(parent_layers, dict) self.name = name self.__contex__ = {} @@ -64,7 +64,12 @@ class Layer(object): def use_context_name(self): return False - def calcalted_size(self): + def calculate_size(self): + """ + lazy calculate size of the layer, should be called when to_proto_impl of + this layer is called. + :return: + """ return self.__contex__[self.context_name()].size @@ -87,8 +92,7 @@ def __convert_to_v2__(method_name, parent_names, is_default_name=True): other_kwargs[key] = kwargs[key] name = kwargs.get('name', None) - size = kwargs.get('size', None) - super(V2LayerImpl, self).__init__(name, size, parent_layers) + super(V2LayerImpl, self).__init__(name, parent_layers) self.__other_kwargs__ = other_kwargs if wrapper is not None: diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py index a97518ed52..0d8b59cfd2 100644 --- a/python/paddle/v2/layer.py +++ b/python/paddle/v2/layer.py @@ -139,10 +139,10 @@ class WithExtraParent(Layer): def extra_parent(self): return self.__extra_parent__ - def __init__(self, name=None, size=None, parent_layers=None): + def __init__(self, name=None, parent_layers=None): self.__extra_parent__ = [] super(WithExtraParent, self).__init__( - name=name, size=size, parent_layers=parent_layers) + name=name, parent_layers=parent_layers) def append_extra_parent(self, parent): self.__extra_parent__.append(parent) @@ -178,11 +178,9 @@ class WithExtraParent(Layer): class MemoryV2(WithExtraParent): - def __init__(self, name, size, **kwargs): + def __init__(self, name, **kwargs): self.name = name - self.size = size - super(MemoryV2, self).__init__( - name=name, size=size, parent_layers=dict()) + super(MemoryV2, self).__init__(name=name, parent_layers=dict()) self.__kwargs__ = kwargs self.__boot_layer_name__ = None if 'boot_layer' in kwargs: @@ -221,11 +219,14 @@ class MemoryV2(WithExtraParent): if self.__boot_layer_name__ is not None: args['boot_layer'] = context[self.__boot_layer_name__] - if callable(self.size): - real_size = self.size() - else: - real_size = self.size - args['size'] = real_size + size = args.get('size', None) + if size is not None: + if callable(size): + real_size = size() + else: + real_size = size + print(real_size) + args['size'] = real_size return conf_helps.memory(name=self.name, **args) def context_name(self): @@ -298,7 +299,7 @@ class MixedLayerV2(Layer): other_kwargs['bias_attr'] = bias_attr other_kwargs['layer_attr'] = layer_attr parent_layers = {"input": self.__inputs__} - super(MixedLayerV2, self).__init__(name, size, parent_layers) + super(MixedLayerV2, self).__init__(name, parent_layers) self.__other_kwargs__ = other_kwargs def __iadd__(self, other): @@ -322,11 +323,12 @@ class MixedLayerV2(Layer): for each in self.__other_kwargs__: args[each] = self.__other_kwargs__[each] size = args.get('size', None) - if callable(size): - real_size = size() - else: - real_size = size - args['size'] = real_size + if size is not None: + if callable(size): + real_size = size() + else: + real_size = size + args['size'] = real_size return getattr(conf_helps, self.__method_name__)(**args) @@ -473,11 +475,11 @@ def recurrent_group(step, input, name=None): mem = memory( name=mem_name, is_seq=static_input.is_seq, - size=static_input.input.calcalted_size, + size=static_input.input.calculate_size, boot_layer=static_input.input) with mixed( name=mem_name, - size=static_input.input.calcalted_size, + size=static_input.input.calculate_size, act=activation.Identity()) as mix: mix += identity_projection(input=mem) rnn_input.insert(input.index(static_input), mix) From bb66f24334eff70a045c75ef9ff5a22b77c27e81 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 2 Mar 2017 15:32:01 +0800 Subject: [PATCH 47/87] remove debug code --- python/paddle/v2/layer.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py index 0d8b59cfd2..2f55611aaa 100644 --- a/python/paddle/v2/layer.py +++ b/python/paddle/v2/layer.py @@ -225,7 +225,6 @@ class MemoryV2(WithExtraParent): real_size = size() else: real_size = size - print(real_size) args['size'] = real_size return conf_helps.memory(name=self.name, **args) @@ -471,7 +470,6 @@ def recurrent_group(step, input, name=None): static_inputs = filter(lambda x: isinstance(x, StaticInputV2), input) for static_input in static_inputs: mem_name = "__%s_memory__" % static_input.input.name - print memory mem = memory( name=mem_name, is_seq=static_input.is_seq, From 6d09f70a860f253e00f91685eb73693e3eef5a76 Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 2 Mar 2017 15:43:13 +0800 Subject: [PATCH 48/87] Add event_handler test and comment --- demo/image_classification/api_v2_train.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/demo/image_classification/api_v2_train.py b/demo/image_classification/api_v2_train.py index 0b4dc4d929..94bf0b5db4 100644 --- a/demo/image_classification/api_v2_train.py +++ b/demo/image_classification/api_v2_train.py @@ -12,27 +12,41 @@ # See the License for the specific language governing permissions and # limitations under the License +import sys +import paddle.v2 as paddle from api_v2_vgg import vgg_bn_drop from api_v2_resnet import resnet_cifar10 -import paddle.v2 as paddle +# End batch and end pass event handler def event_handler(event): if isinstance(event, paddle.event.EndIteration): if event.batch_id % 100 == 0: - print "Pass %d, Batch %d, Cost %f" % (event.pass_id, event.batch_id, - event.cost) + print "\nPass %d, Batch %d, Cost %f, %s" % ( + event.pass_id, event.batch_id, event.cost, event.metrics) + else: + sys.stdout.write('.') + sys.stdout.flush() + if isinstance(event, paddle.event.EndPass): + result = trainer.test( + reader=paddle.reader.batched( + paddle.dataset.cifar.test10(), batch_size=128), + reader_dict={'image': 0, + 'label': 1}) + print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics) def main(): datadim = 3 * 32 * 32 classdim = 10 + # PaddlePaddle init paddle.init(use_gpu=True, trainer_count=1) image = paddle.layer.data( name="image", type=paddle.data_type.dense_vector(datadim)) + # Add neural network config # option 1. resnet net = resnet_cifar10(image, depth=32) # option 2. vgg @@ -46,8 +60,10 @@ def main(): name="label", type=paddle.data_type.integer_value(classdim)) cost = paddle.layer.classification_cost(input=out, label=lbl) + # Create parameters parameters = paddle.parameters.create(cost) + # Create optimizer momentum_optimizer = paddle.optimizer.Momentum( momentum=0.9, regularization=paddle.optimizer.L2Regularization(rate=0.0002 * 128), @@ -57,6 +73,7 @@ def main(): learning_rate_schedule='discexp', batch_size=128) + # Create trainer trainer = paddle.trainer.SGD(cost=cost, parameters=parameters, update_equation=momentum_optimizer) From 9059eea4f46cf47a9f1382b97f25ab5f4586a5da Mon Sep 17 00:00:00 2001 From: "yi.wu" Date: Thu, 2 Mar 2017 15:58:39 +0800 Subject: [PATCH 49/87] Fix k8s cluster job rerunable --- doc/howto/usage/k8s/src/k8s_train/start_paddle.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/howto/usage/k8s/src/k8s_train/start_paddle.py b/doc/howto/usage/k8s/src/k8s_train/start_paddle.py index f1a770ccb5..935c12bb67 100755 --- a/doc/howto/usage/k8s/src/k8s_train/start_paddle.py +++ b/doc/howto/usage/k8s/src/k8s_train/start_paddle.py @@ -132,7 +132,8 @@ def startPaddle(idMap={}, train_args_dict=None): logDir = JOB_PATH_OUTPUT + "/node_" + str(trainerId) if not os.path.exists(JOB_PATH_OUTPUT): os.makedirs(JOB_PATH_OUTPUT) - os.mkdir(logDir) + if not os.path.exists(logDir): + os.mkdir(logDir) copyCommand = 'cp -rf ' + JOB_PATH + \ "/" + str(trainerId) + "/data/*" + " ./data/" os.system(copyCommand) From 4a94f8a4473b96161b721341fd0a889d34367aed Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 2 Mar 2017 16:00:59 +0800 Subject: [PATCH 50/87] refine api training --- demo/semantic_role_labeling/api_train_v2.py | 221 +++++++++++++------- demo/semantic_role_labeling/model_v2.py | 121 ----------- python/paddle/v2/dataset/__init__.py | 2 +- python/paddle/v2/dataset/conll05.py | 6 +- 4 files changed, 147 insertions(+), 203 deletions(-) delete mode 100644 demo/semantic_role_labeling/model_v2.py diff --git a/demo/semantic_role_labeling/api_train_v2.py b/demo/semantic_role_labeling/api_train_v2.py index cfbd2a0224..8ce6faaa1b 100644 --- a/demo/semantic_role_labeling/api_train_v2.py +++ b/demo/semantic_role_labeling/api_train_v2.py @@ -1,69 +1,142 @@ +import sys +import math import numpy as np import paddle.v2 as paddle -from model_v2 import db_lstm +import paddle.v2.dataset.conll05 as conll05 UNK_IDX = 0 -word_dict_file = './data/wordDict.txt' -label_dict_file = './data/targetDict.txt' -predicate_file = './data/verbDict.txt' -word_dict = dict() -label_dict = dict() -predicate_dict = dict() - -with open(word_dict_file, 'r') as f_word, \ - open(label_dict_file, 'r') as f_label, \ - open(predicate_file, 'r') as f_pre: - for i, line in enumerate(f_word): - w = line.strip() - word_dict[w] = i - - for i, line in enumerate(f_label): - w = line.strip() - label_dict[w] = i - - for i, line in enumerate(f_pre): - w = line.strip() - predicate_dict[w] = i - -word_dict_len = len(word_dict) -label_dict_len = len(label_dict) -pred_len = len(predicate_dict) - - -def train_reader(file_name="data/feature"): - def reader(): - with open(file_name, 'r') as fdata: - for line in fdata: - sentence, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, label = \ - line.strip().split('\t') - - words = sentence.split() - sen_len = len(words) - word_slot = [word_dict.get(w, UNK_IDX) for w in words] - - predicate_slot = [predicate_dict.get(predicate)] * sen_len - ctx_n2_slot = [word_dict.get(ctx_n2, UNK_IDX)] * sen_len - ctx_n1_slot = [word_dict.get(ctx_n1, UNK_IDX)] * sen_len - ctx_0_slot = [word_dict.get(ctx_0, UNK_IDX)] * sen_len - ctx_p1_slot = [word_dict.get(ctx_p1, UNK_IDX)] * sen_len - ctx_p2_slot = [word_dict.get(ctx_p2, UNK_IDX)] * sen_len - - marks = mark.split() - mark_slot = [int(w) for w in marks] - - label_list = label.split() - label_slot = [label_dict.get(w) for w in label_list] - yield word_slot, ctx_n2_slot, ctx_n1_slot, \ - ctx_0_slot, ctx_p1_slot, ctx_p2_slot, predicate_slot, mark_slot, label_slot - - return reader +def db_lstm(): + word_dict, verb_dict, label_dict = conll05.get_dict() + word_dict_len = len(word_dict) + label_dict_len = len(label_dict) + pred_len = len(verb_dict) + print 'word_dict_len,', word_dict_len + print 'label_dict_len,', label_dict_len + print 'pred_len,', pred_len + + mark_dict_len = 2 + word_dim = 32 + mark_dim = 5 + hidden_dim = 512 + depth = 8 + + #8 features + def d_type(size): + return paddle.data_type.integer_value_sequence(size) + + word = paddle.layer.data(name='word_data', type=d_type(word_dict_len)) + predicate = paddle.layer.data(name='verb_data', type=d_type(pred_len)) + + ctx_n2 = paddle.layer.data(name='ctx_n2_data', type=d_type(word_dict_len)) + ctx_n1 = paddle.layer.data(name='ctx_n1_data', type=d_type(word_dict_len)) + ctx_0 = paddle.layer.data(name='ctx_0_data', type=d_type(word_dict_len)) + ctx_p1 = paddle.layer.data(name='ctx_p1_data', type=d_type(word_dict_len)) + ctx_p2 = paddle.layer.data(name='ctx_p2_data', type=d_type(word_dict_len)) + mark = paddle.layer.data(name='mark_data', type=d_type(mark_dict_len)) + + target = paddle.layer.data(name='target', type=d_type(label_dict_len)) + + default_std = 1 / math.sqrt(hidden_dim) / 3.0 + + emb_para = paddle.attr.Param(name='emb', initial_std=0., learning_rate=0.) + std_0 = paddle.attr.Param(initial_std=0.) + std_default = paddle.attr.Param(initial_std=default_std) + + predicate_embedding = paddle.layer.embedding( + size=word_dim, + input=predicate, + param_attr=paddle.attr.Param( + name='vemb', initial_std=default_std)) + mark_embedding = paddle.layer.embedding( + size=mark_dim, input=mark, param_attr=std_0) + + word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] + emb_layers = [ + paddle.layer.embedding( + size=word_dim, input=x, param_attr=emb_para) for x in word_input + ] + emb_layers.append(predicate_embedding) + emb_layers.append(mark_embedding) + + hidden_0 = paddle.layer.mixed( + size=hidden_dim, + bias_attr=std_default, + input=[ + paddle.layer.full_matrix_projection( + input=emb, param_attr=std_default) for emb in emb_layers + ]) + + mix_hidden_lr = 1e-3 + lstm_para_attr = paddle.attr.Param(initial_std=0.0, learning_rate=1.0) + hidden_para_attr = paddle.attr.Param( + initial_std=default_std, learning_rate=mix_hidden_lr) + + lstm_0 = paddle.layer.lstmemory( + input=hidden_0, + act=paddle.activation.Relu(), + gate_act=paddle.activation.Sigmoid(), + state_act=paddle.activation.Sigmoid(), + bias_attr=std_0, + param_attr=lstm_para_attr) + + #stack L-LSTM and R-LSTM with direct edges + input_tmp = [hidden_0, lstm_0] + + for i in range(1, depth): + mix_hidden = paddle.layer.mixed( + size=hidden_dim, + bias_attr=std_default, + input=[ + paddle.layer.full_matrix_projection( + input=input_tmp[0], param_attr=hidden_para_attr), + paddle.layer.full_matrix_projection( + input=input_tmp[1], param_attr=lstm_para_attr) + ]) + + lstm = paddle.layer.lstmemory( + input=mix_hidden, + act=paddle.activation.Relu(), + gate_act=paddle.activation.Sigmoid(), + state_act=paddle.activation.Sigmoid(), + reverse=((i % 2) == 1), + bias_attr=std_0, + param_attr=lstm_para_attr) + + input_tmp = [mix_hidden, lstm] + + feature_out = paddle.layer.mixed( + size=label_dict_len, + bias_attr=std_default, + input=[ + paddle.layer.full_matrix_projection( + input=input_tmp[0], param_attr=hidden_para_attr), + paddle.layer.full_matrix_projection( + input=input_tmp[1], param_attr=lstm_para_attr) + ], ) + + crf_cost = paddle.layer.crf(size=label_dict_len, + input=feature_out, + label=target, + param_attr=paddle.attr.Param( + name='crfw', + initial_std=default_std, + learning_rate=mix_hidden_lr)) + + crf_dec = paddle.layer.crf_decoding( + name='crf_dec_l', + size=label_dict_len, + input=feature_out, + label=target, + param_attr=paddle.attr.Param(name='crfw')) + + return crf_cost, crf_dec def load_parameter(file_name, h, w): with open(file_name, 'rb') as f: - f.read(16) # skip header for float type. + f.read(16) # skip header. return np.fromfile(f, dtype=np.float32).reshape(h, w) @@ -71,44 +144,36 @@ def main(): paddle.init(use_gpu=False, trainer_count=1) # define network topology - crf_cost, crf_dec = db_lstm(word_dict_len, label_dict_len, pred_len) + crf_cost, crf_dec = db_lstm() + # create parameters parameters = paddle.parameters.create([crf_cost, crf_dec]) - optimizer = paddle.optimizer.Momentum(momentum=0.01, learning_rate=2e-2) + + # create optimizer + optimizer = paddle.optimizer.Momentum( + momentum=0, + learning_rate=2e-2, + regularization=paddle.optimizer.L2Regularization(rate=8e-4), + model_average=paddle.optimizer.ModelAverage( + average_window=0.5, max_average_window=10000), ) def event_handler(event): if isinstance(event, paddle.event.EndIteration): if event.batch_id % 100 == 0: print "Pass %d, Batch %d, Cost %f, %s" % ( event.pass_id, event.batch_id, event.cost, event.metrics) - else: - pass trainer = paddle.trainer.SGD(cost=crf_cost, parameters=parameters, update_equation=optimizer) + parameters.set('emb', load_parameter(conll05.get_embedding(), 44068, 32)) - parameters.set('emb', load_parameter("data/emb", 44068, 32)) - - reader_dict = { - 'word_data': 0, - 'ctx_n2_data': 1, - 'ctx_n1_data': 2, - 'ctx_0_data': 3, - 'ctx_p1_data': 4, - 'ctx_p2_data': 5, - 'verb_data': 6, - 'mark_data': 7, - 'target': 8, - } trn_reader = paddle.reader.batched( paddle.reader.shuffle( - train_reader(), buf_size=8192), batch_size=10) + conll05.test, buf_size=8192), batch_size=10) + trainer.train( - reader=trn_reader, - event_handler=event_handler, - num_passes=10000, - reader_dict=reader_dict) + reader=trn_reader, event_handler=event_handler, num_passes=10000) if __name__ == '__main__': diff --git a/demo/semantic_role_labeling/model_v2.py b/demo/semantic_role_labeling/model_v2.py deleted file mode 100644 index cec58e52c7..0000000000 --- a/demo/semantic_role_labeling/model_v2.py +++ /dev/null @@ -1,121 +0,0 @@ -import math -import paddle.v2 as paddle - - -def db_lstm(word_dict_len, label_dict_len, pred_len): - mark_dict_len = 2 - word_dim = 32 - mark_dim = 5 - hidden_dim = 512 - depth = 8 - - #8 features - def d_type(size): - return paddle.data_type.integer_value_sequence(size) - - word = paddle.layer.data(name='word_data', type=d_type(word_dict_len)) - predicate = paddle.layer.data(name='verb_data', type=d_type(pred_len)) - - ctx_n2 = paddle.layer.data(name='ctx_n2_data', type=d_type(word_dict_len)) - ctx_n1 = paddle.layer.data(name='ctx_n1_data', type=d_type(word_dict_len)) - ctx_0 = paddle.layer.data(name='ctx_0_data', type=d_type(word_dict_len)) - ctx_p1 = paddle.layer.data(name='ctx_p1_data', type=d_type(word_dict_len)) - ctx_p2 = paddle.layer.data(name='ctx_p2_data', type=d_type(word_dict_len)) - mark = paddle.layer.data(name='mark_data', type=d_type(mark_dict_len)) - - target = paddle.layer.data(name='target', type=d_type(label_dict_len)) - - default_std = 1 / math.sqrt(hidden_dim) / 3.0 - - emb_para = paddle.attr.Param(name='emb', initial_std=0., learning_rate=0.) - std_0 = paddle.attr.Param(initial_std=0.) - std_default = paddle.attr.Param(initial_std=default_std) - - predicate_embedding = paddle.layer.embedding( - size=word_dim, - input=predicate, - param_attr=paddle.attr.Param( - name='vemb', initial_std=default_std)) - mark_embedding = paddle.layer.embedding( - size=mark_dim, input=mark, param_attr=std_0) - - word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] - emb_layers = [ - paddle.layer.embedding( - size=word_dim, input=x, param_attr=emb_para) for x in word_input - ] - emb_layers.append(predicate_embedding) - emb_layers.append(mark_embedding) - - hidden_0 = paddle.layer.mixed( - size=hidden_dim, - bias_attr=std_default, - input=[ - paddle.layer.full_matrix_projection( - input=emb, param_attr=std_default) for emb in emb_layers - ]) - - mix_hidden_lr = 1e-3 - lstm_para_attr = paddle.attr.Param(initial_std=0.0, learning_rate=1.0) - hidden_para_attr = paddle.attr.Param( - initial_std=default_std, learning_rate=mix_hidden_lr) - - lstm_0 = paddle.layer.lstmemory( - input=hidden_0, - act=paddle.activation.Relu(), - gate_act=paddle.activation.Sigmoid(), - state_act=paddle.activation.Sigmoid(), - bias_attr=std_0, - param_attr=lstm_para_attr) - - #stack L-LSTM and R-LSTM with direct edges - input_tmp = [hidden_0, lstm_0] - - for i in range(1, depth): - mix_hidden = paddle.layer.mixed( - size=hidden_dim, - bias_attr=std_default, - input=[ - paddle.layer.full_matrix_projection( - input=input_tmp[0], param_attr=hidden_para_attr), - paddle.layer.full_matrix_projection( - input=input_tmp[1], param_attr=lstm_para_attr) - ]) - - lstm = paddle.layer.lstmemory( - input=mix_hidden, - act=paddle.activation.Relu(), - gate_act=paddle.activation.Sigmoid(), - state_act=paddle.activation.Sigmoid(), - reverse=((i % 2) == 1), - bias_attr=std_0, - param_attr=lstm_para_attr) - - input_tmp = [mix_hidden, lstm] - - feature_out = paddle.layer.mixed( - size=label_dict_len, - bias_attr=std_default, - input=[ - paddle.layer.full_matrix_projection( - input=input_tmp[0], param_attr=hidden_para_attr), - paddle.layer.full_matrix_projection( - input=input_tmp[1], param_attr=lstm_para_attr) - ], ) - - crf_cost = paddle.layer.crf(size=label_dict_len, - input=feature_out, - label=target, - param_attr=paddle.attr.Param( - name='crfw', - initial_std=default_std, - learning_rate=mix_hidden_lr)) - - crf_dec = paddle.layer.crf_decoding( - name='crf_dec_l', - size=label_dict_len, - input=feature_out, - label=target, - param_attr=paddle.attr.Param(name='crfw')) - - return crf_cost, crf_dec diff --git a/python/paddle/v2/dataset/__init__.py b/python/paddle/v2/dataset/__init__.py index 15460b820d..90803628e3 100644 --- a/python/paddle/v2/dataset/__init__.py +++ b/python/paddle/v2/dataset/__init__.py @@ -14,4 +14,4 @@ import mnist -__all__ = ['mnist'] +__all__ = ['mnist', 'cifar', 'imdb', 'conll05', 'imikolov', 'movielens'] diff --git a/python/paddle/v2/dataset/conll05.py b/python/paddle/v2/dataset/conll05.py index 7874161a05..52f19d2115 100644 --- a/python/paddle/v2/dataset/conll05.py +++ b/python/paddle/v2/dataset/conll05.py @@ -160,7 +160,6 @@ def reader_creator(corpus_reader, ctx_p2 = 'eos' word_idx = [word_dict.get(w, UNK_IDX) for w in sentence] - pred_idx = [predicate_dict.get(predicate)] * sen_len ctx_n2_idx = [word_dict.get(ctx_n2, UNK_IDX)] * sen_len ctx_n1_idx = [word_dict.get(ctx_n1, UNK_IDX)] * sen_len @@ -168,10 +167,11 @@ def reader_creator(corpus_reader, ctx_p1_idx = [word_dict.get(ctx_p1, UNK_IDX)] * sen_len ctx_p2_idx = [word_dict.get(ctx_p2, UNK_IDX)] * sen_len + pred_idx = [predicate_dict.get(predicate)] * sen_len label_idx = [label_dict.get(w) for w in labels] - yield word_idx, pred_idx, ctx_n2_idx, ctx_n1_idx, \ - ctx_0_idx, ctx_p1_idx, ctx_p2_idx, mark, label_idx + yield word_idx, ctx_n2_idx, ctx_n1_idx, \ + ctx_0_idx, ctx_p1_idx, ctx_p2_idx, pred_idx, mark, label_idx return reader() From 11fdb4f041250d906a115c00d4e37b76a4bf8905 Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 2 Mar 2017 16:11:30 +0800 Subject: [PATCH 51/87] Check system's protobuf for internal users --- cmake/external/protobuf.cmake | 82 ++++++++++++++++++----------------- cmake/external/python.cmake | 4 -- 2 files changed, 43 insertions(+), 43 deletions(-) diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake index 84f459033f..26da7e8e38 100644 --- a/cmake/external/protobuf.cmake +++ b/cmake/external/protobuf.cmake @@ -14,46 +14,50 @@ INCLUDE(ExternalProject) -SET(PROTOBUF_SOURCES_DIR ${THIRD_PARTY_PATH}/protobuf) -SET(PROTOBUF_INSTALL_DIR ${THIRD_PARTY_PATH}/install/protobuf) -SET(PROTOBUF_INCLUDE_DIR "${PROTOBUF_INSTALL_DIR}/include" CACHE PATH "protobuf include directory." FORCE) +FIND_PACKAGE(Protobuf) -INCLUDE_DIRECTORIES(${PROTOBUF_INCLUDE_DIR}) +IF(NOT PROTOBUF_FOUND) + SET(PROTOBUF_SOURCES_DIR ${THIRD_PARTY_PATH}/protobuf) + SET(PROTOBUF_INSTALL_DIR ${THIRD_PARTY_PATH}/install/protobuf) + SET(PROTOBUF_INCLUDE_DIR "${PROTOBUF_INSTALL_DIR}/include" CACHE PATH "protobuf include directory." FORCE) + + IF(WIN32) + SET(PROTOBUF_LITE_LIBRARY + "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf-lite.lib" CACHE FILEPATH "protobuf lite library." FORCE) + SET(PROTOBUF_LIBRARY + "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf.lib" CACHE FILEPATH "protobuf library." FORCE) + SET(PROTOBUF_PROTOC_LIBRARY + "${PROTOBUF_INSTALL_DIR}/lib/libprotoc.lib" CACHE FILEPATH "protoc library." FORCE) + SET(PROTOBUF_PROTOC_EXECUTABLE "${PROTOBUF_INSTALL_DIR}/bin/protoc.exe" CACHE FILEPATH "protobuf executable." FORCE) + ELSE(WIN32) + SET(PROTOBUF_LITE_LIBRARY + "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf-lite.a" CACHE FILEPATH "protobuf lite library." FORCE) + SET(PROTOBUF_LIBRARY + "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf.a" CACHE FILEPATH "protobuf library." FORCE) + SET(PROTOBUF_PROTOC_LIBRARY + "${PROTOBUF_INSTALL_DIR}/lib/libprotoc.a" CACHE FILEPATH "protoc library." FORCE) + SET(PROTOBUF_PROTOC_EXECUTABLE "${PROTOBUF_INSTALL_DIR}/bin/protoc" CACHE FILEPATH "protobuf executable." FORCE) + ENDIF(WIN32) -IF(WIN32) - SET(PROTOBUF_LITE_LIBRARY - "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf-lite.lib" CACHE FILEPATH "protobuf lite library." FORCE) - SET(PROTOBUF_LIBRARY - "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf.lib" CACHE FILEPATH "protobuf library." FORCE) - SET(PROTOBUF_PROTOC_LIBRARY - "${PROTOBUF_INSTALL_DIR}/lib/libprotoc.lib" CACHE FILEPATH "protoc library." FORCE) - SET(PROTOBUF_PROTOC_EXECUTABLE "${PROTOBUF_INSTALL_DIR}/bin/protoc.exe" CACHE FILEPATH "protobuf executable." FORCE) -ELSE(WIN32) - SET(PROTOBUF_LITE_LIBRARY - "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf-lite.a" CACHE FILEPATH "protobuf lite library." FORCE) - SET(PROTOBUF_LIBRARY - "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf.a" CACHE FILEPATH "protobuf library." FORCE) - SET(PROTOBUF_PROTOC_LIBRARY - "${PROTOBUF_INSTALL_DIR}/lib/libprotoc.a" CACHE FILEPATH "protoc library." FORCE) - SET(PROTOBUF_PROTOC_EXECUTABLE "${PROTOBUF_INSTALL_DIR}/bin/protoc" CACHE FILEPATH "protobuf executable." FORCE) -ENDIF(WIN32) + ExternalProject_Add( + protobuf + ${EXTERNAL_PROJECT_LOG_ARGS} + PREFIX ${PROTOBUF_SOURCES_DIR} + UPDATE_COMMAND "" + DEPENDS zlib + GIT_REPOSITORY "https://github.com/google/protobuf.git" + GIT_TAG "9f75c5aa851cd877fb0d93ccc31b8567a6706546" + CONFIGURE_COMMAND + ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/protobuf/cmake + -Dprotobuf_BUILD_TESTS=OFF + -DZLIB_ROOT:FILEPATH=${ZLIB_ROOT} + -DCMAKE_POSITION_INDEPENDENT_CODE=ON + -DCMAKE_BUILD_TYPE=Release + -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR} + -DCMAKE_INSTALL_LIBDIR=lib + ) -ExternalProject_Add( - protobuf - ${EXTERNAL_PROJECT_LOG_ARGS} - PREFIX ${PROTOBUF_SOURCES_DIR} - UPDATE_COMMAND "" - DEPENDS zlib - GIT_REPOSITORY "https://github.com/google/protobuf.git" - GIT_TAG "9f75c5aa851cd877fb0d93ccc31b8567a6706546" - CONFIGURE_COMMAND - ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/protobuf/cmake - -Dprotobuf_BUILD_TESTS=OFF - -DZLIB_ROOT:FILEPATH=${ZLIB_ROOT} - -DCMAKE_POSITION_INDEPENDENT_CODE=ON - -DCMAKE_BUILD_TYPE=Release - -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR} - -DCMAKE_INSTALL_LIBDIR=lib -) + LIST(APPEND external_project_dependencies protobuf) +ENDIF(NOT PROTOBUF_FOUND) -LIST(APPEND external_project_dependencies protobuf) +INCLUDE_DIRECTORIES(${PROTOBUF_INCLUDE_DIR}) diff --git a/cmake/external/python.cmake b/cmake/external/python.cmake index 6372a9a768..0accf1a8dd 100644 --- a/cmake/external/python.cmake +++ b/cmake/external/python.cmake @@ -221,7 +221,3 @@ ENDIF(PYTHONLIBS_FOUND AND PYTHONINTERP_FOUND) INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIR}) INCLUDE_DIRECTORIES(${PYTHON_NUMPY_INCLUDE_DIR}) - -MESSAGE("[Paddle] Python Executable: ${PYTHON_EXECUTABLE}") -MESSAGE("[Paddle] Python Include: ${PYTHON_INCLUDE_DIRS}") -MESSAGE("[Paddle] Python Libraries: ${PYTHON_LIBRARIES}") From 69ac20c2845fa0bb988407a4cd3af7af1aaa7d0a Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 2 Mar 2017 16:53:31 +0800 Subject: [PATCH 52/87] Fix event_handler trainer --- demo/image_classification/api_v2_train.py | 35 +++++++++++------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/demo/image_classification/api_v2_train.py b/demo/image_classification/api_v2_train.py index 94bf0b5db4..585f61c6fa 100644 --- a/demo/image_classification/api_v2_train.py +++ b/demo/image_classification/api_v2_train.py @@ -18,24 +18,6 @@ from api_v2_vgg import vgg_bn_drop from api_v2_resnet import resnet_cifar10 -# End batch and end pass event handler -def event_handler(event): - if isinstance(event, paddle.event.EndIteration): - if event.batch_id % 100 == 0: - print "\nPass %d, Batch %d, Cost %f, %s" % ( - event.pass_id, event.batch_id, event.cost, event.metrics) - else: - sys.stdout.write('.') - sys.stdout.flush() - if isinstance(event, paddle.event.EndPass): - result = trainer.test( - reader=paddle.reader.batched( - paddle.dataset.cifar.test10(), batch_size=128), - reader_dict={'image': 0, - 'label': 1}) - print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics) - - def main(): datadim = 3 * 32 * 32 classdim = 10 @@ -73,6 +55,23 @@ def main(): learning_rate_schedule='discexp', batch_size=128) + # End batch and end pass event handler + def event_handler(event): + if isinstance(event, paddle.event.EndIteration): + if event.batch_id % 100 == 0: + print "\nPass %d, Batch %d, Cost %f, %s" % ( + event.pass_id, event.batch_id, event.cost, event.metrics) + else: + sys.stdout.write('.') + sys.stdout.flush() + if isinstance(event, paddle.event.EndPass): + result = trainer.test( + reader=paddle.reader.batched( + paddle.dataset.cifar.test10(), batch_size=128), + reader_dict={'image': 0, + 'label': 1}) + print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics) + # Create trainer trainer = paddle.trainer.SGD(cost=cost, parameters=parameters, From edce6c8b6ab23c9c7fea1dee75d46fb2bb0f3e31 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 2 Mar 2017 17:51:53 +0800 Subject: [PATCH 53/87] restore embedding_layer name to embedding --- python/paddle/trainer_config_helpers/layers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 2b95c2ed0f..b68460b6a3 100755 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -822,7 +822,7 @@ def data_layer(name, size, height=None, width=None, layer_attr=None): return LayerOutput(name, LayerType.DATA, size=size) -@wrap_name_default("embedding_layer") +@wrap_name_default("embedding") @wrap_param_attr_default() @layer_support(ERROR_CLIPPING) def embedding_layer(input, size, name=None, param_attr=None, layer_attr=None): From 465878a9d2f84910dc71ea8fa9dd53f34c10f52d Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 2 Mar 2017 18:50:12 +0800 Subject: [PATCH 54/87] fit_a_line v2 api --- demo/introduction/api_train_v2.py | 59 +++++++++++++++++ python/paddle/v2/dataset/__init__.py | 3 +- python/paddle/v2/dataset/uci_housing.py | 86 +++++++++++++++++++++++++ 3 files changed, 147 insertions(+), 1 deletion(-) create mode 100644 demo/introduction/api_train_v2.py create mode 100644 python/paddle/v2/dataset/uci_housing.py diff --git a/demo/introduction/api_train_v2.py b/demo/introduction/api_train_v2.py new file mode 100644 index 0000000000..49496c7f08 --- /dev/null +++ b/demo/introduction/api_train_v2.py @@ -0,0 +1,59 @@ +import paddle.v2 as paddle +import paddle.v2.dataset.uci_housing as uci_housing + + +def main(): + # init + paddle.init(use_gpu=False, trainer_count=1) + + # network config + x = paddle.layer.data(name='x', type=paddle.data_type.dense_vector(13)) + y_predict = paddle.layer.fc(input=x, + param_attr=paddle.attr.Param(name='w'), + size=1, + act=paddle.activation.Linear(), + bias_attr=paddle.attr.Param(name='b')) + y = paddle.layer.data(name='y', type=paddle.data_type.dense_vector(1)) + cost = paddle.layer.regression_cost(input=y_predict, label=y) + + # create parameters + parameters = paddle.parameters.create(cost) + + # create optimizer + optimizer = paddle.optimizer.Momentum(momentum=0) + + trainer = paddle.trainer.SGD(cost=cost, + parameters=parameters, + update_equation=optimizer) + + # event_handler to print training and testing info + def event_handler(event): + if isinstance(event, paddle.event.EndIteration): + if event.batch_id % 100 == 0: + print "Pass %d, Batch %d, Cost %f, %s" % ( + event.pass_id, event.batch_id, event.cost, event.metrics) + + if isinstance(event, paddle.event.EndPass): + result = trainer.test( + reader=paddle.reader.batched( + uci_housing.test(), batch_size=2), + reader_dict={'x': 0, + 'y': 1}) + if event.pass_id % 10 == 0: + print "Test %d, Cost %f, %s" % (event.pass_id, event.cost, + result.metrics) + + # training + trainer.train( + reader=paddle.reader.batched( + paddle.reader.shuffle( + uci_housing.train(), buf_size=500), + batch_size=2), + reader_dict={'x': 0, + 'y': 1}, + event_handler=event_handler, + num_passes=30) + + +if __name__ == '__main__': + main() diff --git a/python/paddle/v2/dataset/__init__.py b/python/paddle/v2/dataset/__init__.py index 9647e98503..0058e1df11 100644 --- a/python/paddle/v2/dataset/__init__.py +++ b/python/paddle/v2/dataset/__init__.py @@ -1,3 +1,4 @@ import mnist +import uci_housing -__all__ = ['mnist'] +__all__ = ['mnist', 'uci_housing'] diff --git a/python/paddle/v2/dataset/uci_housing.py b/python/paddle/v2/dataset/uci_housing.py new file mode 100644 index 0000000000..b5a0537af6 --- /dev/null +++ b/python/paddle/v2/dataset/uci_housing.py @@ -0,0 +1,86 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import os +from common import download + +__all__ = ['train', 'test'] + +URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data' +MD5 = 'd4accdce7a25600298819f8e28e8d593' +feature_names = [ + 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', + 'PTRATIO', 'B', 'LSTAT' +] + +UCI_TRAIN_DATA = None +UCI_TEST_DATA = None + + +def feature_range(maximums, minimums): + import matplotlib + matplotlib.use('Agg') + import matplotlib.pyplot as plt + fig, ax = plt.subplots() + feature_num = len(maximums) + ax.bar(range(feature_num), maximums - minimums, color='r', align='center') + ax.set_title('feature scale') + plt.xticks(range(feature_num), feature_names) + plt.xlim([-1, feature_num]) + fig.set_figheight(6) + fig.set_figwidth(10) + if not os.path.exists('./image'): + os.makedirs('./image') + fig.savefig('image/ranges.png', dpi=48) + plt.close(fig) + + +def load_data(filename, feature_num=14, ratio=0.8): + global UCI_TRAIN_DATA, UCI_TEST_DATA + if UCI_TRAIN_DATA is not None and UCI_TEST_DATA is not None: + return + + data = np.fromfile(filename, sep=' ') + data = data.reshape(data.shape[0] / feature_num, feature_num) + maximums, minimums, avgs = data.max(axis=0), data.min(axis=0), data.sum( + axis=0) / data.shape[0] + feature_range(maximums[:-1], minimums[:-1]) + for i in xrange(feature_num - 1): + data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i]) + offset = int(data.shape[0] * ratio) + UCI_TRAIN_DATA = data[:offset] + UCI_TEST_DATA = data[offset:] + + +def train(): + global UCI_TRAIN_DATA + load_data(download(URL, 'uci_housing', MD5)) + + def reader(): + for d in UCI_TRAIN_DATA: + yield d[:-1], d[-1:] + + return reader + + +def test(): + global UCI_TEST_DATA + load_data(download(URL, 'uci_housing', MD5)) + + def reader(): + for d in UCI_TEST_DATA: + yield d[:-1], d[-1:] + + return reader From a05707ff324e59b032f8ac0c43d62f339ef78db5 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Thu, 2 Mar 2017 20:45:37 +0800 Subject: [PATCH 55/87] add test cost --- demo/mnist/api_train_v2.py | 25 +++++++++++++++++++------ python/paddle/v2/event.py | 3 ++- python/paddle/v2/trainer.py | 10 +++++++++- 3 files changed, 30 insertions(+), 8 deletions(-) diff --git a/demo/mnist/api_train_v2.py b/demo/mnist/api_train_v2.py index 6439d07ac6..341a7165da 100644 --- a/demo/mnist/api_train_v2.py +++ b/demo/mnist/api_train_v2.py @@ -63,6 +63,8 @@ def main(): label = paddle.layer.data( name='label', type=paddle.data_type.integer_value(10)) + # Here we can build the prediction network in different ways. Please + # choose one by uncomment corresponding line. predict = softmax_regression(images) #predict = multilayer_perceptron(images) #predict = convolutional_neural_network(images) @@ -80,14 +82,20 @@ def main(): parameters=parameters, update_equation=optimizer) + list = [] + def event_handler(event): if isinstance(event, paddle.event.EndIteration): if event.batch_id % 100 == 0: - result = trainer.test(reader=paddle.reader.batched( - paddle.dataset.mnist.test(), batch_size=128)) - print "Pass %d, Batch %d, Cost %f, %s, Testing metrics %s" % ( - event.pass_id, event.batch_id, event.cost, event.metrics, - result.metrics) + print "Pass %d, Batch %d, Cost %f, %s" % ( + event.pass_id, event.batch_id, event.cost, event.metrics) + if isinstance(event, paddle.event.EndPass): + result = trainer.test(reader=paddle.reader.batched( + paddle.dataset.mnist.test(), batch_size=128)) + print "Test with Pass %d, Cost %f, %s\n" % ( + event.pass_id, event.cost, result.metrics) + list.append((event.pass_id, event.cost, + result.metrics['classification_error_evaluator'])) trainer.train( reader=paddle.reader.batched( @@ -97,10 +105,15 @@ def main(): event_handler=event_handler, num_passes=100) + # find the best pass + best = sorted(list, key=lambda list: float(list[1]))[0] + print 'Best pass is %s, testing Avgcost is %s' % (best[0], best[1]) + print 'The classification accuracy is %.2f%%' % (100 - float(best[2]) * 100) + # output is a softmax layer. It returns probabilities. # Shape should be (100, 10) probs = paddle.infer( - output=inference, + output=predict, parameters=parameters, reader=paddle.reader.batched( paddle.reader.firstn( diff --git a/python/paddle/v2/event.py b/python/paddle/v2/event.py index a78bcf076c..6a7bcb8187 100644 --- a/python/paddle/v2/event.py +++ b/python/paddle/v2/event.py @@ -52,8 +52,9 @@ class EndPass(WithMetric): Event On One Pass Training Complete. """ - def __init__(self, pass_id, evaluator): + def __init__(self, pass_id, cost, evaluator): self.pass_id = pass_id + self.cost = cost WithMetric.__init__(self, evaluator) diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py index e743a49523..a4ef0df597 100644 --- a/python/paddle/v2/trainer.py +++ b/python/paddle/v2/trainer.py @@ -107,6 +107,8 @@ class SGD(ITrainer): event_handler(v2_event.BeginPass(pass_id)) pass_evaluator.start() updater.startPass() + total_cost_sum = 0 + total_batch = 0 for batch_id, data_batch in enumerate(reader()): pass_type = updater.startBatch(len(data_batch)) self.__gradient_machine__.forwardBackward( @@ -127,6 +129,8 @@ class SGD(ITrainer): cost_vec = out_args.getSlotValue(0) cost_vec = cost_vec.copyToNumpyMat() cost = cost_vec.sum() / len(data_batch) + total_cost_sum += cost_vec.sum() + total_batch += len(data_batch) updater.finishBatch(cost) batch_evaluator.finish() event_handler( @@ -138,7 +142,11 @@ class SGD(ITrainer): updater.finishPass() pass_evaluator.finish() - event_handler(v2_event.EndPass(pass_id, evaluator=pass_evaluator)) + event_handler( + v2_event.EndPass( + pass_id, + cost=total_cost_sum / total_batch, + evaluator=pass_evaluator)) self.__gradient_machine__.finish() def default_reader_dict(self): From 5ce504b19b96d94dec5ceab2e2443a091b690cf0 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 2 Mar 2017 20:50:23 +0800 Subject: [PATCH 56/87] Fix duplicated forward/backward in trainer. --- python/paddle/v2/trainer.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py index 5003f55f3e..5cff75e39d 100644 --- a/python/paddle/v2/trainer.py +++ b/python/paddle/v2/trainer.py @@ -108,9 +108,6 @@ class SGD(ITrainer): pass_evaluator.start() updater.startPass() for batch_id, data_batch in enumerate(reader()): - pass_type = updater.startBatch(len(data_batch)) - self.__gradient_machine__.forwardBackward( - feeder(data_batch), out_args, pass_type) batch_evaluator.start() event_handler( v2_event.BeginIteration( From 172ac8af7abb0b54f47abb7eb067fbd538ab5b57 Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 2 Mar 2017 20:58:05 +0800 Subject: [PATCH 57/87] update --- demo/semantic_role_labeling/api_train_v2.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/demo/semantic_role_labeling/api_train_v2.py b/demo/semantic_role_labeling/api_train_v2.py index 8ce6faaa1b..c582724185 100644 --- a/demo/semantic_role_labeling/api_train_v2.py +++ b/demo/semantic_role_labeling/api_train_v2.py @@ -4,17 +4,12 @@ import numpy as np import paddle.v2 as paddle import paddle.v2.dataset.conll05 as conll05 -UNK_IDX = 0 - def db_lstm(): word_dict, verb_dict, label_dict = conll05.get_dict() word_dict_len = len(word_dict) label_dict_len = len(label_dict) pred_len = len(verb_dict) - print 'word_dict_len,', word_dict_len - print 'label_dict_len,', label_dict_len - print 'pred_len,', pred_len mark_dict_len = 2 word_dim = 32 From 495861f55923dcd9092e74e401e50aa8deeb0fd5 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 2 Mar 2017 21:30:47 +0800 Subject: [PATCH 58/87] add v2 demo to seqtoseq, fix __dfs_travel__ for v2 layers --- demo/seqToseq/api_train_v2.py | 106 +++++++++++++++++++++++++++++++ demo/seqToseq/seqToseq_net_v2.py | 90 ++++++++++++++++++++++++++ python/paddle/v2/layer.py | 2 +- python/paddle/v2/topology.py | 6 +- 4 files changed, 202 insertions(+), 2 deletions(-) create mode 100644 demo/seqToseq/api_train_v2.py create mode 100644 demo/seqToseq/seqToseq_net_v2.py diff --git a/demo/seqToseq/api_train_v2.py b/demo/seqToseq/api_train_v2.py new file mode 100644 index 0000000000..efbab8d7b9 --- /dev/null +++ b/demo/seqToseq/api_train_v2.py @@ -0,0 +1,106 @@ +import os + +import paddle.v2 as paddle + +from seqToseq_net_v2 import seqToseq_net_v2 + +### Data Definiation +data_dir = "./data/pre-wmt14" +src_lang_dict = os.path.join(data_dir, 'src.dict') +trg_lang_dict = os.path.join(data_dir, 'trg.dict') + +source_dict_dim = len(open(src_lang_dict, "r").readlines()) +target_dict_dim = len(open(trg_lang_dict, "r").readlines()) + + +def read_to_dict(dict_path): + with open(dict_path, "r") as fin: + out_dict = { + line.strip(): line_count + for line_count, line in enumerate(fin) + } + return out_dict + + +src_dict = read_to_dict(src_lang_dict) +trg_dict = read_to_dict(trg_lang_dict) + +train_list = os.path.join(data_dir, 'train.list') +test_list = os.path.join(data_dir, 'test.list') + +UNK_IDX = 2 +START = "" +END = "" + + +def _get_ids(s, dictionary): + words = s.strip().split() + return [dictionary[START]] + \ + [dictionary.get(w, UNK_IDX) for w in words] + \ + [dictionary[END]] + + +def train_reader(file_name): + def reader(): + with open(file_name, 'r') as f: + for line_count, line in enumerate(f): + line_split = line.strip().split('\t') + if len(line_split) != 2: + continue + src_seq = line_split[0] # one source sequence + src_ids = _get_ids(src_seq, src_dict) + + trg_seq = line_split[1] # one target sequence + trg_words = trg_seq.split() + trg_ids = [trg_dict.get(w, UNK_IDX) for w in trg_words] + + # remove sequence whose length > 80 in training mode + if len(src_ids) > 80 or len(trg_ids) > 80: + continue + trg_ids_next = trg_ids + [trg_dict[END]] + trg_ids = [trg_dict[START]] + trg_ids + + yield src_ids, trg_ids, trg_ids_next + + return reader + + +def main(): + paddle.init(use_gpu=False, trainer_count=1) + + # reader = train_reader("data/pre-wmt14/train/train") + # define network topology + cost = seqToseq_net_v2(source_dict_dim, target_dict_dim) + parameters = paddle.parameters.create(cost) + optimizer = paddle.optimizer.Adam(batch_size=50, learning_rate=5e-4) + + def event_handler(event): + if isinstance(event, paddle.event.EndIteration): + if event.batch_id % 100 == 0: + print "Pass %d, Batch %d, Cost %f, %s" % ( + event.pass_id, event.batch_id, event.cost, event.metrics) + + trainer = paddle.trainer.SGD(cost=cost, + parameters=parameters, + update_equation=optimizer) + + reader_dict = { + 'source_language_word': 0, + 'target_language_word': 1, + 'target_language_next_word': 2 + } + + trn_reader = paddle.reader.batched( + paddle.reader.shuffle( + train_reader("data/pre-wmt14/train/train"), buf_size=8192), + batch_size=10) + + trainer.train( + reader=trn_reader, + event_handler=event_handler, + num_passes=10000, + reader_dict=reader_dict) + + +if __name__ == '__main__': + main() diff --git a/demo/seqToseq/seqToseq_net_v2.py b/demo/seqToseq/seqToseq_net_v2.py new file mode 100644 index 0000000000..7e057e2440 --- /dev/null +++ b/demo/seqToseq/seqToseq_net_v2.py @@ -0,0 +1,90 @@ +import paddle.v2.activation as activation +import paddle.v2.attr as attr +import paddle.v2.data_type as data_type +import paddle.v2.layer as layer +import paddle.v2.networks as networks + + +def seqToseq_net_v2(source_dict_dim, target_dict_dim): + ### Network Architecture + word_vector_dim = 512 # dimension of word vector + decoder_size = 512 # dimension of hidden unit in GRU Decoder network + encoder_size = 512 # dimension of hidden unit in GRU Encoder network + + #### Encoder + src_word_id = layer.data( + name='source_language_word', + type=data_type.dense_vector(source_dict_dim)) + src_embedding = layer.embedding( + input=src_word_id, + size=word_vector_dim, + param_attr=attr.ParamAttr(name='_source_language_embedding')) + src_forward = networks.simple_gru(input=src_embedding, size=encoder_size) + src_backward = networks.simple_gru( + input=src_embedding, size=encoder_size, reverse=True) + encoded_vector = layer.concat(input=[src_forward, src_backward]) + + #### Decoder + with layer.mixed(size=decoder_size) as encoded_proj: + encoded_proj += layer.full_matrix_projection(input=encoded_vector) + + backward_first = layer.first_seq(input=src_backward) + + with layer.mixed(size=decoder_size, act=activation.Tanh()) as decoder_boot: + decoder_boot += layer.full_matrix_projection(input=backward_first) + + def gru_decoder_with_attention(enc_vec, enc_proj, current_word): + + decoder_mem = layer.memory( + name='gru_decoder', size=decoder_size, boot_layer=decoder_boot) + + context = networks.simple_attention( + encoded_sequence=enc_vec, + encoded_proj=enc_proj, + decoder_state=decoder_mem) + + with layer.mixed(size=decoder_size * 3) as decoder_inputs: + decoder_inputs += layer.full_matrix_projection(input=context) + decoder_inputs += layer.full_matrix_projection(input=current_word) + + gru_step = layer.gru_step( + name='gru_decoder', + input=decoder_inputs, + output_mem=decoder_mem, + size=decoder_size) + + with layer.mixed( + size=target_dict_dim, bias_attr=True, + act=activation.Softmax()) as out: + out += layer.full_matrix_projection(input=gru_step) + return out + + decoder_group_name = "decoder_group" + group_input1 = layer.StaticInputV2(input=encoded_vector, is_seq=True) + group_input2 = layer.StaticInputV2(input=encoded_proj, is_seq=True) + group_inputs = [group_input1, group_input2] + + trg_embedding = layer.embedding( + input=layer.data( + name='target_language_word', + type=data_type.dense_vector(target_dict_dim)), + size=word_vector_dim, + param_attr=attr.ParamAttr(name='_target_language_embedding')) + group_inputs.append(trg_embedding) + + # For decoder equipped with attention mechanism, in training, + # target embeding (the groudtruth) is the data input, + # while encoded source sequence is accessed to as an unbounded memory. + # Here, the StaticInput defines a read-only memory + # for the recurrent_group. + decoder = layer.recurrent_group( + name=decoder_group_name, + step=gru_decoder_with_attention, + input=group_inputs) + + lbl = layer.data( + name='target_language_next_word', + type=data_type.dense_vector(target_dict_dim)) + cost = layer.classification_cost(input=decoder, label=lbl) + + return cost diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py index 2f55611aaa..010773ddbd 100644 --- a/python/paddle/v2/layer.py +++ b/python/paddle/v2/layer.py @@ -262,7 +262,7 @@ class StaticInputV2(object): self.input = input self.is_seq = is_seq self.size = size - # TODO(qiaolongfei): add size + # TODO(add size check) # assert input.size is not None or size is not None diff --git a/python/paddle/v2/topology.py b/python/paddle/v2/topology.py index 4c21125431..f0679c5675 100644 --- a/python/paddle/v2/topology.py +++ b/python/paddle/v2/topology.py @@ -17,6 +17,7 @@ import collections from paddle.proto.ModelConfig_pb2 import ModelConfig import layer as v2_layer +from layer import WithExtraParent __all__ = ['Topology'] @@ -40,7 +41,10 @@ def __bfs_travel__(callback, *layers): __break__ = callback(each_layer) if __break__: return - __bfs_travel__(callback, *each_layer.__parent_layers__.values()) + __layers__ = each_layer.__parent_layers__.values() + if isinstance(each_layer, WithExtraParent): + __layers__ = __layers__ + each_layer.extra_parent() + __bfs_travel__(callback, *__layers__) class Topology(object): From 4a265b5200bb86ef81f08d9fce516330b2c2f41a Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Thu, 2 Mar 2017 21:42:11 +0800 Subject: [PATCH 59/87] Use reader in dataset imdb.py --- demo/sentiment/train_v2.py | 38 ++++++++------------------------ python/paddle/v2/dataset/imdb.py | 5 +++++ 2 files changed, 14 insertions(+), 29 deletions(-) diff --git a/demo/sentiment/train_v2.py b/demo/sentiment/train_v2.py index 3d595fad30..0fa7494853 100644 --- a/demo/sentiment/train_v2.py +++ b/demo/sentiment/train_v2.py @@ -2,10 +2,11 @@ import sys from os.path import join as join_path import paddle.trainer_config_helpers.attrs as attrs from paddle.trainer_config_helpers.poolings import MaxPooling -import paddle.v2 as paddle import paddle.v2.layer as layer import paddle.v2.activation as activation import paddle.v2.data_type as data_type +import paddle.v2.dataset.imdb as imdb +import paddle.v2 as paddle def sequence_conv_pool(input, @@ -189,36 +190,15 @@ def stacked_lstm_net(input_dim, return cost -def data_reader(data_file, dict_file): - def reader(): - with open(dict_file, 'r') as fdict, open(data_file, 'r') as fdata: - dictionary = dict() - for i, line in enumerate(fdict): - dictionary[line.split('\t')[0]] = i - - for line_count, line in enumerate(fdata): - label, comment = line.strip().split('\t\t') - label = int(label) - words = comment.split() - word_slot = [dictionary[w] for w in words if w in dictionary] - yield (word_slot, label) - - return reader - - if __name__ == '__main__': - # data file - train_file = "./data/pre-imdb/train_part_000" - test_file = "./data/pre-imdb/test_part_000" - dict_file = "./data/pre-imdb/dict.txt" - labels = "./data/pre-imdb/labels.list" - # init paddle.init(use_gpu=True, trainer_count=4) # network config - dict_dim = len(open(dict_file).readlines()) - class_dim = len(open(labels).readlines()) + print 'load dictionary...' + word_dict = imdb.word_dict() + dict_dim = len(word_dict) + class_dim = 2 # Please choose the way to build the network # by uncommenting the corresponding line. @@ -246,7 +226,7 @@ if __name__ == '__main__': if isinstance(event, paddle.event.EndPass): result = trainer.test( reader=paddle.reader.batched( - data_reader(test_file, dict_file), batch_size=128), + lambda: imdb.test(word_dict), batch_size=128), reader_dict={'word': 0, 'label': 1}) print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics) @@ -259,8 +239,8 @@ if __name__ == '__main__': trainer.train( reader=paddle.reader.batched( paddle.reader.shuffle( - data_reader(train_file, dict_file), buf_size=4096), - batch_size=128), + lambda: imdb.train(word_dict), buf_size=1000), + batch_size=100), event_handler=event_handler, reader_dict={'word': 0, 'label': 1}, diff --git a/python/paddle/v2/dataset/imdb.py b/python/paddle/v2/dataset/imdb.py index 433e37380f..db388be1e0 100644 --- a/python/paddle/v2/dataset/imdb.py +++ b/python/paddle/v2/dataset/imdb.py @@ -118,3 +118,8 @@ def test(word_idx): return reader_creator( re.compile("aclImdb/test/pos/.*\.txt$"), re.compile("aclImdb/test/neg/.*\.txt$"), word_idx, 1000) + + +def word_dict(): + return build_dict( + re.compile("aclImdb/((train)|(test))/((pos)|(neg))/.*\.txt$"), 150) From fa3b1c465b4e2e2cc8051b8f47ed4b8f860b616e Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Thu, 2 Mar 2017 00:38:06 +0000 Subject: [PATCH 60/87] change argument name of data_type.integer_value/integer_value_sequence to ele_range, add documentation. --- python/paddle/trainer/PyDataProvider2.py | 16 ++++++++++++---- python/paddle/v2/tests/test_data_feeder.py | 15 ++++++++------- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/python/paddle/trainer/PyDataProvider2.py b/python/paddle/trainer/PyDataProvider2.py index bd24c68b6f..afde7453a1 100644 --- a/python/paddle/trainer/PyDataProvider2.py +++ b/python/paddle/trainer/PyDataProvider2.py @@ -65,14 +65,18 @@ def sparse_value_slot(dim, seq_type=SequenceType.NO_SEQUENCE): return InputType(dim, seq_type, DataType.SparseValue) -def index_slot(dim, seq_type=SequenceType.NO_SEQUENCE): - return InputType(dim, seq_type, DataType.Index) +def index_slot(ele_range, seq_type=SequenceType.NO_SEQUENCE): + """Data type of integer. + :param ele_range: range of this integer. + """ + return InputType(ele_range, seq_type, DataType.Index) dense_vector = dense_slot sparse_binary_vector = sparse_non_value_slot sparse_vector = sparse_value_slot integer_value = index_slot +integer_value.__doc__ = index_slot.__doc__ def dense_vector_sequence(dim): @@ -99,8 +103,11 @@ def sparse_vector_sub_sequence(dim): return sparse_vector(dim, seq_type=SequenceType.SUB_SEQUENCE) -def integer_value_sequence(dim): - return integer_value(dim, seq_type=SequenceType.SEQUENCE) +def integer_value_sequence(ele_range): + """Data type of a sequence of integer. + :param ele_range: range of each element. + """ + return integer_value(ele_range, seq_type=SequenceType.SEQUENCE) def integer_value_sub_sequence(dim): @@ -108,6 +115,7 @@ def integer_value_sub_sequence(dim): integer_sequence = integer_value_sequence +integer_sequence.__doc__ = integer_value_sequence.__doc__ class SingleSlotWrapper(object): diff --git a/python/paddle/v2/tests/test_data_feeder.py b/python/paddle/v2/tests/test_data_feeder.py index ab2bc5df76..1b1f5aef8b 100644 --- a/python/paddle/v2/tests/test_data_feeder.py +++ b/python/paddle/v2/tests/test_data_feeder.py @@ -110,14 +110,14 @@ class DataFeederTest(unittest.TestCase): self.assertAlmostEqual(value.all(), w[i].all()) def test_integer(self): - dim = 100 + ele_range = 100 batch_size = 32 index = [] for i in xrange(batch_size): each_sample = [] - each_sample.append(np.random.randint(dim)) + each_sample.append(np.random.randint(ele_range)) index.append(each_sample) - feeder = DataFeeder([('input', data_type.integer_value(dim))], + feeder = DataFeeder([('input', data_type.integer_value(ele_range))], {'input': 0}) arg = feeder(index) output = arg.getSlotIds(0).copyToNumpyArray() @@ -125,7 +125,7 @@ class DataFeederTest(unittest.TestCase): self.assertEqual(output.all(), index.flatten().all()) def test_integer_sequence(self): - dim = 10000 + ele_range = 10000 batch_size = 32 start = [0] data = [] @@ -133,11 +133,12 @@ class DataFeederTest(unittest.TestCase): each_sample = [] each_sample.append( self.sparse_binary_reader( - dim, 30, non_empty=True)) + ele_range, 30, non_empty=True)) data.append(each_sample) start.append(len(each_sample[0]) + start[-1]) - feeder = DataFeeder([('input', data_type.integer_value_sequence(dim))], - {'input': 0}) + feeder = DataFeeder( + [('input', data_type.integer_value_sequence(ele_range))], + {'input': 0}) arg = feeder(data) output_data = arg.getSlotIds(0).copyToNumpyArray() output_start = arg.getSlotSequenceStartPositions(0).copyToNumpyArray() From e87181db8d4d67dc3c35675adf0c24d399dce88e Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Thu, 2 Mar 2017 19:47:17 +0000 Subject: [PATCH 61/87] use value_range instead of ele_range --- python/paddle/trainer/PyDataProvider2.py | 12 ++++++------ python/paddle/v2/tests/test_data_feeder.py | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/python/paddle/trainer/PyDataProvider2.py b/python/paddle/trainer/PyDataProvider2.py index afde7453a1..4e3c4db853 100644 --- a/python/paddle/trainer/PyDataProvider2.py +++ b/python/paddle/trainer/PyDataProvider2.py @@ -65,11 +65,11 @@ def sparse_value_slot(dim, seq_type=SequenceType.NO_SEQUENCE): return InputType(dim, seq_type, DataType.SparseValue) -def index_slot(ele_range, seq_type=SequenceType.NO_SEQUENCE): +def index_slot(value_range, seq_type=SequenceType.NO_SEQUENCE): """Data type of integer. - :param ele_range: range of this integer. + :param value_range: range of this integer. """ - return InputType(ele_range, seq_type, DataType.Index) + return InputType(value_range, seq_type, DataType.Index) dense_vector = dense_slot @@ -103,11 +103,11 @@ def sparse_vector_sub_sequence(dim): return sparse_vector(dim, seq_type=SequenceType.SUB_SEQUENCE) -def integer_value_sequence(ele_range): +def integer_value_sequence(value_range): """Data type of a sequence of integer. - :param ele_range: range of each element. + :param value_range: range of each element. """ - return integer_value(ele_range, seq_type=SequenceType.SEQUENCE) + return integer_value(value_range, seq_type=SequenceType.SEQUENCE) def integer_value_sub_sequence(dim): diff --git a/python/paddle/v2/tests/test_data_feeder.py b/python/paddle/v2/tests/test_data_feeder.py index 1b1f5aef8b..71eb3bf314 100644 --- a/python/paddle/v2/tests/test_data_feeder.py +++ b/python/paddle/v2/tests/test_data_feeder.py @@ -110,14 +110,14 @@ class DataFeederTest(unittest.TestCase): self.assertAlmostEqual(value.all(), w[i].all()) def test_integer(self): - ele_range = 100 + value_range = 100 batch_size = 32 index = [] for i in xrange(batch_size): each_sample = [] - each_sample.append(np.random.randint(ele_range)) + each_sample.append(np.random.randint(value_range)) index.append(each_sample) - feeder = DataFeeder([('input', data_type.integer_value(ele_range))], + feeder = DataFeeder([('input', data_type.integer_value(value_range))], {'input': 0}) arg = feeder(index) output = arg.getSlotIds(0).copyToNumpyArray() @@ -125,7 +125,7 @@ class DataFeederTest(unittest.TestCase): self.assertEqual(output.all(), index.flatten().all()) def test_integer_sequence(self): - ele_range = 10000 + value_range = 10000 batch_size = 32 start = [0] data = [] @@ -133,11 +133,11 @@ class DataFeederTest(unittest.TestCase): each_sample = [] each_sample.append( self.sparse_binary_reader( - ele_range, 30, non_empty=True)) + value_range, 30, non_empty=True)) data.append(each_sample) start.append(len(each_sample[0]) + start[-1]) feeder = DataFeeder( - [('input', data_type.integer_value_sequence(ele_range))], + [('input', data_type.integer_value_sequence(value_range))], {'input': 0}) arg = feeder(data) output_data = arg.getSlotIds(0).copyToNumpyArray() From 4cbbb23fa315eb3b58929f059c59ae7498cdbf45 Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Wed, 1 Mar 2017 13:25:47 -0800 Subject: [PATCH 62/87] expose build_dict in imikolov dataset, fix bug that len(word_dict) is not bigger than all index in word_dict. --- python/paddle/v2/dataset/imikolov.py | 31 +++++++++---------- .../paddle/v2/dataset/tests/imikolov_test.py | 10 ++++-- 2 files changed, 22 insertions(+), 19 deletions(-) diff --git a/python/paddle/v2/dataset/imikolov.py b/python/paddle/v2/dataset/imikolov.py index 285d3eaca8..deb556942d 100644 --- a/python/paddle/v2/dataset/imikolov.py +++ b/python/paddle/v2/dataset/imikolov.py @@ -17,7 +17,7 @@ imikolov's simple dataset: http://www.fit.vutbr.cz/~imikolov/rnnlm/ import paddle.v2.dataset.common import tarfile -__all__ = ['train', 'test'] +__all__ = ['train', 'test', 'build_dict'] URL = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz' MD5 = '30177ea32e27c525793142b6bf2c8e2d' @@ -37,7 +37,9 @@ def word_count(f, word_freq=None): return word_freq -def build_dict(train_filename, test_filename): +def build_dict(): + train_filename = './simple-examples/data/ptb.train.txt' + test_filename = './simple-examples/data/ptb.valid.txt' with tarfile.open( paddle.v2.dataset.common.download( paddle.v2.dataset.imikolov.URL, 'imikolov', @@ -45,27 +47,22 @@ def build_dict(train_filename, test_filename): trainf = tf.extractfile(train_filename) testf = tf.extractfile(test_filename) word_freq = word_count(testf, word_count(trainf)) + if '' in word_freq: + # remove for now, since we will set it as last index + del word_freq[''] TYPO_FREQ = 50 word_freq = filter(lambda x: x[1] > TYPO_FREQ, word_freq.items()) - dictionary = sorted(word_freq, key=lambda x: (-x[1], x[0])) - words, _ = list(zip(*dictionary)) + word_freq_sorted = sorted(word_freq, key=lambda x: (-x[1], x[0])) + words, _ = list(zip(*word_freq_sorted)) word_idx = dict(zip(words, xrange(len(words)))) word_idx[''] = len(words) return word_idx -word_idx = {} - - -def reader_creator(filename, n): - global word_idx - if len(word_idx) == 0: - word_idx = build_dict('./simple-examples/data/ptb.train.txt', - './simple-examples/data/ptb.valid.txt') - +def reader_creator(filename, word_idx, n): def reader(): with tarfile.open( paddle.v2.dataset.common.download( @@ -84,9 +81,9 @@ def reader_creator(filename, n): return reader -def train(n): - return reader_creator('./simple-examples/data/ptb.train.txt', n) +def train(word_idx, n): + return reader_creator('./simple-examples/data/ptb.train.txt', word_idx, n) -def test(n): - return reader_creator('./simple-examples/data/ptb.valid.txt', n) +def test(word_idx, n): + return reader_creator('./simple-examples/data/ptb.valid.txt', word_idx, n) diff --git a/python/paddle/v2/dataset/tests/imikolov_test.py b/python/paddle/v2/dataset/tests/imikolov_test.py index 9b1748eaaa..009e55243a 100644 --- a/python/paddle/v2/dataset/tests/imikolov_test.py +++ b/python/paddle/v2/dataset/tests/imikolov_test.py @@ -1,6 +1,8 @@ import paddle.v2.dataset.imikolov import unittest +WORD_DICT = paddle.v2.dataset.imikolov.build_dict() + class TestMikolov(unittest.TestCase): def check_reader(self, reader, n): @@ -9,11 +11,15 @@ class TestMikolov(unittest.TestCase): def test_train(self): n = 5 - self.check_reader(paddle.v2.dataset.imikolov.train(n), n) + self.check_reader(paddle.v2.dataset.imikolov.train(WORD_DICT, n), n) def test_test(self): n = 5 - self.check_reader(paddle.v2.dataset.imikolov.test(n), n) + self.check_reader(paddle.v2.dataset.imikolov.test(WORD_DICT, n), n) + + def test_total(self): + _, idx = zip(*WORD_DICT.items()) + self.assertEqual(sorted(idx)[-1], len(WORD_DICT) - 1) if __name__ == '__main__': From a8a25238bbde726834955db9cc9c2deeed36e33d Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Wed, 1 Mar 2017 23:14:36 +0000 Subject: [PATCH 63/87] remove ITrainer interface since parameters for train is already getting out of sync. Currently there is no benefit of having a interface. --- python/paddle/v2/trainer.py | 23 ++--------------------- 1 file changed, 2 insertions(+), 21 deletions(-) diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py index e743a49523..6652a1e8fe 100644 --- a/python/paddle/v2/trainer.py +++ b/python/paddle/v2/trainer.py @@ -8,7 +8,7 @@ from . import event as v2_event from . import optimizer as v2_optimizer from . import parameters as v2_parameters -__all__ = ['ITrainer', 'SGD'] +__all__ = ['SGD'] def default_event_handler(event): @@ -22,26 +22,7 @@ def default_event_handler(event): pass -class ITrainer(object): - """ - The interface of Trainer. The only exposed method is `train`. - """ - - def train(self, reader, topology, parameters, event_handler=None): - """ - train method. - - :param reader: - :param topology: - :param parameters: - :param event_handler: - :return: - """ - - raise NotImplementedError() - - -class SGD(ITrainer): +class SGD(): def __init__(self, cost, parameters, update_equation): """ Simple SGD Trainer. From 8ebfe554bda96e2ff64d64fe9ae6ca461938411a Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Thu, 2 Mar 2017 15:38:48 -0800 Subject: [PATCH 64/87] add batch reader into reader design doc --- doc/design/reader/README.md | 91 +++++++++++++++++++++++++++---------- 1 file changed, 66 insertions(+), 25 deletions(-) diff --git a/doc/design/reader/README.md b/doc/design/reader/README.md index 17d52b9e20..03119fdd74 100644 --- a/doc/design/reader/README.md +++ b/doc/design/reader/README.md @@ -4,9 +4,10 @@ At training and testing time, PaddlePaddle programs need to read data. To ease t - A *reader* is a function that reads data (from file, network, random number generator, etc) and yields data items. - A *reader creator* is a function that returns a reader function. -- A *reader* decorator is a function, which accepts one or more readers, and returns a reader. +- A *reader decorator* is a function, which accepts one or more readers, and returns a reader. +- A *batch reader* is a function that reads data (from *reader*, file, network, random number generator, etc) and yields a batch of data items. -and provide frequently used reader creators and reader decorators. +and provide function which converts reader to batch reader, frequently used reader creators and reader decorators. ## Data Reader Interface @@ -37,9 +38,54 @@ def reader_creator_random_imageand_label(widht, height, label): return reader ``` +## Batch Reader Interface + +*batch reader* can be any function with no parameter that creates a iterable (anything can be used in `for x in iterable`). The output of the iterable should be a batch (list) of data items. Each item inside the list must be a tuple. + +Here are valid outputs: +```python +# a mini batch of three data items. Each data item consist three columns of data, each of which is 1. +[(1, 1, 1), +(2, 2, 2), +(3, 3, 3)] + +# a mini batch of three data items, each data item is a list (single column). +[([1,1,1],), +([2,2,2],), +([3,3,3],), +``` + +Please note that each item inside the list must be a tuple, below is an invalid output: +```python + # wrong, [1,1,1] needs to be inside a tuple: ([1,1,1],). + # Otherwise it's ambiguous whether [1,1,1] means a single column of data [1, 1, 1], + # or three column of datas, each of which is 1. +[[1,1,1], +[2,2,2], +[3,3,3]] +``` + +It's easy to convert from reader to batch reader: +```python +mnist_train = paddle.dataset.mnist.train() +mnist_train_batch_reader = paddle.batch(mnist_train, 128) +``` + +Also easy to create custom batch reader: +```python +def custom_batch_reader(): + while True: + batch = [] + for i in xrange(128): + batch.append((numpy.random.uniform(-1, 1, 28*28),)) # note that it's a tuple being appended. + yield batch + +mnist_random_image_batch_reader = custom_batch_reader +``` + ## Usage -data reader, mapping from item(s) read to data layer, batch size and number of total pass will be passed into `paddle.train`: +batch reader, mapping from item(s) read to data layer, batch size and number of total pass will be passed into `paddle.train`: ```python # two data layer is created: @@ -47,8 +93,8 @@ image_layer = paddle.layer.data("image", ...) label_layer = paddle.layer.data("label", ...) # ... - -paddle.train(paddle.dataset.mnist, {"image":0, "label":1}, 128, 10, ...) +batch_reader = paddle.batch(paddle.dataset.mnist.train(), 128) +paddle.train(batch_reader, {"image":0, "label":1}, 128, 10, ...) ``` ## Data Reader Decorator @@ -64,7 +110,7 @@ Since reading data may take time and training can not proceed without data. It i Use `paddle.reader.buffered` to prefetch data: ```python -buffered_reader = paddle.reader.buffered(paddle.dataset.mnist, 100) +buffered_reader = paddle.reader.buffered(paddle.dataset.mnist.train(), 100) ``` `buffered_reader` will try to buffer (prefetch) `100` data entries. @@ -91,10 +137,10 @@ def reader_creator_bool(t): true_reader = reader_creator_bool(True) false_reader = reader_creator_bool(False) -reader = paddle.reader.compose(paddle.dataset.mnist, data_reader_creator_random_image(20, 20), true_reader, false_reader) -# Skipped 1 because paddle.dataset.mnist produces two items per data entry. +reader = paddle.reader.compose(paddle.dataset.mnist.train(), data_reader_creator_random_image(20, 20), true_reader, false_reader) +# Skipped 1 because paddle.dataset.mnist.train() produces two items per data entry. # And we don't care second item at this time. -paddle.train(reader, {"true_image":0, "fake_image": 2, "true_label": 3, "false_label": 4}, ...) +paddle.train(paddle.batch(reader, 128), {"true_image":0, "fake_image": 2, "true_label": 3, "false_label": 4}, ...) ``` ### Shuffle @@ -103,16 +149,20 @@ Given shuffle buffer size `n`, `paddle.reader.shuffle` will return a data reader Example: ```python -reader = paddle.reader.shuffle(paddle.dataset.mnist, 512) +reader = paddle.reader.shuffle(paddle.dataset.mnist.train(), 512) ``` ## Q & A -### Why return only a single entry, but not a mini batch? +### Why reader return only a single entry, but not a mini batch? + +Always returning a single entry make reusing existing data readers much easier (e.g., if existing reader return not a single entry but 3 entries, training code will be more complex because it need to handle cases like batch size 2). + +We provide function `paddle.batch` to turn (single entry) reader into batch reader. -If a mini batch is returned, data reader need to take care of batch size. But batch size is a concept for training, it makes more sense for user to specify batch size as a parameter for `train`. +### Why do we need batch reader, isn't train take reader and batch_size as arguments sufficient? -Practically, always return a single entry make reusing existing data readers much easier (e.g., if existing reader return not a single entry but 3 entries, training code will be more complex because it need to handle cases like batch size 2). +In most of the case, train taking reader and batch_size as arguments would be sufficent. However sometimes user want to customize order of data entries inside a mini batch. Or even change batch size dynamically. ### Why use a dictionary but not a list to provide mapping? @@ -137,7 +187,7 @@ def image_reader_creator(image_path, label_path, n): # images_reader_creator creates a reader reader = image_reader_creator("/path/to/image_file", "/path/to/label_file", 1024) -paddle.train(reader, {"image":0, "label":1}, ...) +paddle.train(paddle.batch(reader, 128), {"image":0, "label":1}, ...) ``` ### How is `paddle.train` implemented @@ -145,17 +195,8 @@ paddle.train(reader, {"image":0, "label":1}, ...) An example implementation of paddle.train could be: ```python -def make_minibatch(reader, minibatch_size): - def ret(): - r = reader() - buf = [r.next() for x in xrange(minibatch_size)] - while len(buf) > 0: - yield buf - buf = [r.next() for x in xrange(minibatch_size)] - return ret - -def train(reader, mapping, batch_size, total_pass): +def train(batch_reader, mapping, batch_size, total_pass): for pass_idx in range(total_pass): - for mini_batch in make_minibatch(reader): # this loop will never end in online learning. + for mini_batch in batch_reader(): # this loop will never end in online learning. do_forward_backward(mini_batch, mapping) ``` From f9ea339dd03bdf8df068e6936801db82ffd39bcd Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Thu, 2 Mar 2017 22:14:14 +0000 Subject: [PATCH 65/87] remove osx build from CI We don't officially support OSX. And Travis CI takes a lot of time to build osx. (long time in pending state. Probably because travis ci don't have enough osx machines. --- .travis.yml | 9 -------- paddle/scripts/travis/before_install.osx.sh | 4 ---- paddle/scripts/travis/build_and_test.sh | 23 +++++++-------------- 3 files changed, 8 insertions(+), 28 deletions(-) delete mode 100755 paddle/scripts/travis/before_install.osx.sh diff --git a/.travis.yml b/.travis.yml index 28d1f51be7..5a7f45a748 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,22 +4,14 @@ cache: - $HOME/third_party - $HOME/.ccache - $HOME/.cache/pip - - $HOME/Library/Caches/Homebrew sudo: required dist: trusty os: - linux - - osx env: - JOB=DOCS - JOB=BUILD_AND_TEST - JOB=PRE_COMMIT -matrix: - exclude: - - os: osx - env: JOB=DOCS # Only generate documentation in linux. - - os: osx - env: JOB=PRE_COMMIT # Only check pre-commit hook in linux addons: apt: @@ -53,7 +45,6 @@ before_install: fi fi fi - - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then paddle/scripts/travis/before_install.osx.sh; fi - if [[ "$JOB" == "PRE_COMMIT" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi # Paddle is using protobuf 3.1 currently. Protobuf 3.2 breaks the compatibility. So we specify the python # protobuf version. diff --git a/paddle/scripts/travis/before_install.osx.sh b/paddle/scripts/travis/before_install.osx.sh deleted file mode 100755 index 80f031a74e..0000000000 --- a/paddle/scripts/travis/before_install.osx.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -brew update -brew tap homebrew/science -brew install openblas swig md5sha1sum diff --git a/paddle/scripts/travis/build_and_test.sh b/paddle/scripts/travis/build_and_test.sh index 5e6350b574..7deb3e62e8 100755 --- a/paddle/scripts/travis/build_and_test.sh +++ b/paddle/scripts/travis/build_and_test.sh @@ -2,18 +2,11 @@ source ./common.sh NPROC=1 -if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then - export PYTHONPATH=/opt/python/2.7.12/lib/python2.7/site-packages - export PYTHONHOME=/opt/python/2.7.12 - export PATH=/opt/python/2.7.12/bin:${PATH} - cmake .. -DCMAKE_Fortran_COMPILER=/usr/bin/gfortran-4.8 -DON_TRAVIS=ON -DON_COVERALLS=ON -DCOVERALLS_UPLOAD=ON ${EXTRA_CMAKE_OPTS} - NRPOC=`nproc` - make -j $NPROC - make coveralls - sudo make install -elif [[ "$TRAVIS_OS_NAME" == "osx" ]]; then - export PYTHONPATH=/usr/local/lib/python2.7/site-packages - cmake .. -DON_TRAVIS=ON -DON_COVERALLS=ON -DCOVERALLS_UPLOAD=ON ${EXTRA_CMAKE_OPTS} - NPROC=`sysctl -n hw.ncpu` - make -j $NPROC -fi +export PYTHONPATH=/opt/python/2.7.12/lib/python2.7/site-packages +export PYTHONHOME=/opt/python/2.7.12 +export PATH=/opt/python/2.7.12/bin:${PATH} +cmake .. -DCMAKE_Fortran_COMPILER=/usr/bin/gfortran-4.8 -DON_TRAVIS=ON -DON_COVERALLS=ON -DCOVERALLS_UPLOAD=ON ${EXTRA_CMAKE_OPTS} +NRPOC=`nproc` +make -j $NPROC +make coveralls +sudo make install From 4157e009c1fb2c0e12e806ce52da0f9e4890084d Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Fri, 3 Mar 2017 01:48:35 +0000 Subject: [PATCH 66/87] dataset: print download progress --- python/paddle/v2/dataset/common.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/python/paddle/v2/dataset/common.py b/python/paddle/v2/dataset/common.py index 397c9e66d4..0944fe973d 100644 --- a/python/paddle/v2/dataset/common.py +++ b/python/paddle/v2/dataset/common.py @@ -16,6 +16,7 @@ import requests import hashlib import os import shutil +import sys __all__ = ['DATA_HOME', 'download', 'md5file'] @@ -35,6 +36,7 @@ def md5file(fname): def download(url, module_name, md5sum): + print "downloading %s" % url dirname = os.path.join(DATA_HOME, module_name) if not os.path.exists(dirname): os.makedirs(dirname) @@ -42,8 +44,22 @@ def download(url, module_name, md5sum): filename = os.path.join(dirname, url.split('/')[-1]) if not (os.path.exists(filename) and md5file(filename) == md5sum): r = requests.get(url, stream=True) - with open(filename, 'w') as f: - shutil.copyfileobj(r.raw, f) + total_length = r.headers.get('content-length') + + if total_length is None: + with open(filename, 'w') as f: + shutil.copyfileobj(r.raw, f) + else: + with open(filename, 'w') as f: + dl = 0 + total_length = int(total_length) + for data in r.iter_content(chunk_size=4096): + dl += len(data) + f.write(data) + done = int(50 * dl / total_length) + sys.stdout.write("\r[%s%s]" % ('=' * done, + ' ' * (50 - done))) + sys.stdout.flush() return filename From 1996d4de74d328bbb6fa2f9544955763a4eb2d2e Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Fri, 3 Mar 2017 03:38:43 +0000 Subject: [PATCH 67/87] fix according to comment --- python/paddle/v2/dataset/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/v2/dataset/common.py b/python/paddle/v2/dataset/common.py index 0944fe973d..3021b68ddb 100644 --- a/python/paddle/v2/dataset/common.py +++ b/python/paddle/v2/dataset/common.py @@ -36,13 +36,13 @@ def md5file(fname): def download(url, module_name, md5sum): - print "downloading %s" % url dirname = os.path.join(DATA_HOME, module_name) if not os.path.exists(dirname): os.makedirs(dirname) filename = os.path.join(dirname, url.split('/')[-1]) if not (os.path.exists(filename) and md5file(filename) == md5sum): + print "Cache file %s not found, downloading %s" % (filename, url) r = requests.get(url, stream=True) total_length = r.headers.get('content-length') From e76264f97e09c47edc78ef811cff57f8382df51c Mon Sep 17 00:00:00 2001 From: liaogang Date: Fri, 3 Mar 2017 13:24:35 +0800 Subject: [PATCH 68/87] redirect code coverage to /dev/null --- cmake/coverallsGcovJsons.cmake | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/cmake/coverallsGcovJsons.cmake b/cmake/coverallsGcovJsons.cmake index ae3530c3a0..7dfe0c8bf9 100644 --- a/cmake/coverallsGcovJsons.cmake +++ b/cmake/coverallsGcovJsons.cmake @@ -110,14 +110,15 @@ endmacro() # Get the coverage data. file(GLOB_RECURSE GCDA_FILES "${COV_PATH}" "*.gcda") -message("GCDA files:") +message("Process GCDA files:") +message("===============================") # Get a list of all the object directories needed by gcov # (The directories the .gcda files and .o files are found in) # and run gcov on those. foreach(GCDA ${GCDA_FILES}) - message("Process: ${GCDA}") - message("------------------------------------------------------------------------------") + # message("Process: ${GCDA}") + # message("------------------------------------------------------------------------------") get_filename_component(GCDA_DIR ${GCDA} PATH) # @@ -135,7 +136,7 @@ foreach(GCDA ${GCDA_FILES}) # If -p is not specified then the file is named only "the_file.c.gcov" # execute_process( - COMMAND ${GCOV_EXECUTABLE} -p -o ${GCDA_DIR} ${GCDA} + COMMAND "${GCOV_EXECUTABLE} -p -o ${GCDA_DIR} ${GCDA} >/dev/null" WORKING_DIRECTORY ${GCDA_DIR} ) endforeach() @@ -383,7 +384,7 @@ foreach(NOT_COVERED_SRC ${COVERAGE_SRCS_REMAINING}) set(GCOV_FILE_COVERAGE "${GCOV_FILE_COVERAGE}]") # Generate the final JSON for this file. - message("Generate JSON for non-gcov file: ${NOT_COVERED_SRC}...") + # message("Generate JSON for non-gcov file: ${NOT_COVERED_SRC}...") string(CONFIGURE ${SRC_FILE_TEMPLATE} FILE_JSON) set(JSON_GCOV_FILES "${JSON_GCOV_FILES}${FILE_JSON}, ") endforeach() From 19ab464b8b89d0373c6e7729756a27cf8ca94148 Mon Sep 17 00:00:00 2001 From: liaogang Date: Fri, 3 Mar 2017 13:33:57 +0800 Subject: [PATCH 69/87] Disable test_ProtoServer on Travis CI --- paddle/pserver/test/CMakeLists.txt | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/paddle/pserver/test/CMakeLists.txt b/paddle/pserver/test/CMakeLists.txt index 64654f67d0..6e8f9c37f6 100644 --- a/paddle/pserver/test/CMakeLists.txt +++ b/paddle/pserver/test/CMakeLists.txt @@ -10,9 +10,11 @@ add_test(NAME socket_test add_unittest_without_exec(test_ProtoServer test_ProtoServer.cpp) -add_test(NAME test_ProtoServer - COMMAND ${PROJ_ROOT}/paddle/.set_port.sh -p port - ${CMAKE_CURRENT_BINARY_DIR}/test_ProtoServer) +IF(NOT ON_TRAVIS) + add_test(NAME test_ProtoServer + COMMAND ${PROJ_ROOT}/paddle/.set_port.sh -p port + ${CMAKE_CURRENT_BINARY_DIR}/test_ProtoServer) +ENDIF(NOT ON_TRAVIS) # TODO(yuyang18): Run test_ProtoServer when with rdma # add_test(NAME test_ProtoServerRDMA From a6f25f3d2a27e2ef434bd399fd9e0c8c1971638d Mon Sep 17 00:00:00 2001 From: wen-bo-yang Date: Sat, 25 Feb 2017 04:09:20 +0800 Subject: [PATCH 70/87] add sentiment data package --- paddle/setup.py.in | 2 + paddle/v2/data_set/config.py | 8 ++++ paddle/v2/data_set/sentiment.py | 81 +++++++++++++++++++++++++++++++++ 3 files changed, 91 insertions(+) create mode 100644 paddle/v2/data_set/config.py create mode 100644 paddle/v2/data_set/sentiment.py diff --git a/paddle/setup.py.in b/paddle/setup.py.in index 38621af065..8dc3ff6acd 100644 --- a/paddle/setup.py.in +++ b/paddle/setup.py.in @@ -72,6 +72,8 @@ setup(name="py_paddle", packages=['py_paddle'], include_dirs = include_dirs, install_requires = [ + 'h5py', + 'nltk', 'numpy>=1.8.0', # The numpy is required. 'protobuf>=3.0.0' # The paddle protobuf version ], diff --git a/paddle/v2/data_set/config.py b/paddle/v2/data_set/config.py new file mode 100644 index 0000000000..69e96d65ef --- /dev/null +++ b/paddle/v2/data_set/config.py @@ -0,0 +1,8 @@ +import os + +__all__ = ['DATA_HOME'] + +DATA_HOME = os.path.expanduser('~/.cache/paddle_data_set') + +if not os.path.exists(DATA_HOME): + os.makedirs(DATA_HOME) diff --git a/paddle/v2/data_set/sentiment.py b/paddle/v2/data_set/sentiment.py new file mode 100644 index 0000000000..323fc214dd --- /dev/null +++ b/paddle/v2/data_set/sentiment.py @@ -0,0 +1,81 @@ +import random +import nltk +import numpy as np +from nltk.corpus import movie_reviews +from config import DATA_HOME + +__all__ = ['train', 'test', 'get_label_dict', 'get_word_dict'] +SPLIT_NUM = 800 +TOTAL_DATASET_NUM = 1000 + + +def get_label_dict(): + label_dict = {'neg': 0, 'pos': 1} + return label_dict + + +def is_download_data(): + try: + nltk.data.path.append(DATA_HOME) + movie_reviews.categories() + except LookupError: + print "dd" + nltk.download('movie_reviews', download_dir=DATA_HOME) + nltk.data.path.append(DATA_HOME) + + +def get_word_dict(): + words_freq_sorted = list() + is_download_data() + words_freq = nltk.FreqDist(w.lower() for w in movie_reviews.words()) + words_sort_list = words_freq.items() + words_sort_list.sort(cmp=lambda a, b: b[1] - a[1]) + print words_sort_list + for index, word in enumerate(words_sort_list): + words_freq_sorted.append(word[0]) + return words_freq_sorted + + +def load_sentiment_data(): + label_dict = get_label_dict() + is_download_data() + words_freq = nltk.FreqDist(w.lower() for w in movie_reviews.words()) + data_set = [([words_freq[word] + for word in movie_reviews.words(fileid)], label_dict[category]) + for category in movie_reviews.categories() + for fileid in movie_reviews.fileids(category)] + random.shuffle(data_set) + return data_set + + +data_set = load_sentiment_data() + + +def reader_creator(data_type): + if data_type == 'train': + for each in data_set[0:SPLIT_NUM]: + train_sentences = np.array(each[0], dtype=np.int32) + train_label = np.array(each[1], dtype=np.int8) + yield train_sentences, train_label + else: + for each in data_set[SPLIT_NUM:]: + test_sentences = np.array(each[0], dtype=np.int32) + test_label = np.array(each[1], dtype=np.int8) + yield test_sentences, test_label + + +def train(): + return reader_creator('train') + + +def test(): + return reader_creator('test') + + +if __name__ == '__main__': + for train in train(): + print "train" + print train + for test in test(): + print "test" + print test From 55d19fc4f029105661ddd30aa838e1100d03ee54 Mon Sep 17 00:00:00 2001 From: wen-bo-yang Date: Sun, 26 Feb 2017 21:41:42 +0800 Subject: [PATCH 71/87] fix bugs --- paddle/setup.py.in | 1 - paddle/v2/data_set/sentiment.py | 81 -------------- paddle/v2/{data_set => dataset}/config.py | 2 +- paddle/v2/dataset/sentiment.py | 127 ++++++++++++++++++++++ 4 files changed, 128 insertions(+), 83 deletions(-) delete mode 100644 paddle/v2/data_set/sentiment.py rename paddle/v2/{data_set => dataset}/config.py (62%) create mode 100644 paddle/v2/dataset/sentiment.py diff --git a/paddle/setup.py.in b/paddle/setup.py.in index 8dc3ff6acd..d44f1145df 100644 --- a/paddle/setup.py.in +++ b/paddle/setup.py.in @@ -72,7 +72,6 @@ setup(name="py_paddle", packages=['py_paddle'], include_dirs = include_dirs, install_requires = [ - 'h5py', 'nltk', 'numpy>=1.8.0', # The numpy is required. 'protobuf>=3.0.0' # The paddle protobuf version diff --git a/paddle/v2/data_set/sentiment.py b/paddle/v2/data_set/sentiment.py deleted file mode 100644 index 323fc214dd..0000000000 --- a/paddle/v2/data_set/sentiment.py +++ /dev/null @@ -1,81 +0,0 @@ -import random -import nltk -import numpy as np -from nltk.corpus import movie_reviews -from config import DATA_HOME - -__all__ = ['train', 'test', 'get_label_dict', 'get_word_dict'] -SPLIT_NUM = 800 -TOTAL_DATASET_NUM = 1000 - - -def get_label_dict(): - label_dict = {'neg': 0, 'pos': 1} - return label_dict - - -def is_download_data(): - try: - nltk.data.path.append(DATA_HOME) - movie_reviews.categories() - except LookupError: - print "dd" - nltk.download('movie_reviews', download_dir=DATA_HOME) - nltk.data.path.append(DATA_HOME) - - -def get_word_dict(): - words_freq_sorted = list() - is_download_data() - words_freq = nltk.FreqDist(w.lower() for w in movie_reviews.words()) - words_sort_list = words_freq.items() - words_sort_list.sort(cmp=lambda a, b: b[1] - a[1]) - print words_sort_list - for index, word in enumerate(words_sort_list): - words_freq_sorted.append(word[0]) - return words_freq_sorted - - -def load_sentiment_data(): - label_dict = get_label_dict() - is_download_data() - words_freq = nltk.FreqDist(w.lower() for w in movie_reviews.words()) - data_set = [([words_freq[word] - for word in movie_reviews.words(fileid)], label_dict[category]) - for category in movie_reviews.categories() - for fileid in movie_reviews.fileids(category)] - random.shuffle(data_set) - return data_set - - -data_set = load_sentiment_data() - - -def reader_creator(data_type): - if data_type == 'train': - for each in data_set[0:SPLIT_NUM]: - train_sentences = np.array(each[0], dtype=np.int32) - train_label = np.array(each[1], dtype=np.int8) - yield train_sentences, train_label - else: - for each in data_set[SPLIT_NUM:]: - test_sentences = np.array(each[0], dtype=np.int32) - test_label = np.array(each[1], dtype=np.int8) - yield test_sentences, test_label - - -def train(): - return reader_creator('train') - - -def test(): - return reader_creator('test') - - -if __name__ == '__main__': - for train in train(): - print "train" - print train - for test in test(): - print "test" - print test diff --git a/paddle/v2/data_set/config.py b/paddle/v2/dataset/config.py similarity index 62% rename from paddle/v2/data_set/config.py rename to paddle/v2/dataset/config.py index 69e96d65ef..304c4bc5cd 100644 --- a/paddle/v2/data_set/config.py +++ b/paddle/v2/dataset/config.py @@ -2,7 +2,7 @@ import os __all__ = ['DATA_HOME'] -DATA_HOME = os.path.expanduser('~/.cache/paddle_data_set') +DATA_HOME = os.path.expanduser('~/.cache/paddle/dataset') if not os.path.exists(DATA_HOME): os.makedirs(DATA_HOME) diff --git a/paddle/v2/dataset/sentiment.py b/paddle/v2/dataset/sentiment.py new file mode 100644 index 0000000000..83581eadf2 --- /dev/null +++ b/paddle/v2/dataset/sentiment.py @@ -0,0 +1,127 @@ +# /usr/bin/env python +# -*- coding:utf-8 -*- + +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +The script fetch and preprocess movie_reviews data set + +that provided by NLTK +""" + + +import nltk +import numpy as np +from nltk.corpus import movie_reviews +from config import DATA_HOME + +__all__ = ['train', 'test', 'get_label_dict', 'get_word_dict'] +NUM_TRAINING_INSTANCES = 1600 +NUM_TOTAL_INSTANCES = 2000 + + +def get_label_dict(): + """ + Define the labels dict for dataset + """ + label_dict = {'neg': 0, 'pos': 1} + return label_dict + + +def download_data_if_not_yet(): + """ + Download the data set, if the data set is not download. + """ + try: + # make sure that nltk can find the data + nltk.data.path.append(DATA_HOME) + movie_reviews.categories() + except LookupError: + print "Downloading movie_reviews data set, please wait....." + nltk.download('movie_reviews', download_dir=DATA_HOME) + print "Download data set success......" + # make sure that nltk can find the data + nltk.data.path.append(DATA_HOME) + + +def get_word_dict(): + """ + Sorted the words by the frequency of words which occur in sample + :return: + words_freq_sorted + """ + words_freq_sorted = list() + download_data_if_not_yet() + words_freq = nltk.FreqDist(w.lower() for w in movie_reviews.words()) + words_sort_list = words_freq.items() + words_sort_list.sort(cmp=lambda a, b: b[1] - a[1]) + for index, word in enumerate(words_sort_list): + words_freq_sorted.append(word[0]) + return words_freq_sorted + + +def load_sentiment_data(): + """ + Load the data set + :return: + data_set + """ + label_dict = get_label_dict() + download_data_if_not_yet() + words_freq = nltk.FreqDist(w.lower() for w in movie_reviews.words()) + data_set = [([words_freq[word.lower()] + for word in movie_reviews.words(fileid)], + label_dict[category]) + for category in movie_reviews.categories() + for fileid in movie_reviews.fileids(category)] + return data_set + + +data_set = load_sentiment_data() + + +def reader_creator(data): + """ + Reader creator, it format data set to numpy + :param data: + train data set or test data set + """ + for each in data: + sentences = np.array(each[0], dtype=np.int32) + labels = np.array(each[1], dtype=np.int8) + yield sentences, labels + + +def train(): + """ + Default train set reader creator + """ + return reader_creator(data_set[0:NUM_TRAINING_INSTANCES]) + + +def test(): + """ + Default test set reader creator + """ + return reader_creator(data_set[NUM_TRAINING_INSTANCES:]) + + +def unittest(): + assert len(data_set) == NUM_TOTAL_INSTANCES + assert len(list(train())) == NUM_TRAINING_INSTANCES + assert len(list(test())) == NUM_TOTAL_INSTANCES - NUM_TRAINING_INSTANCES + + +if __name__ == '__main__': + unittest() From 812e21f3c4c14b8cf215fb1221b74814b132f301 Mon Sep 17 00:00:00 2001 From: wen-bo-yang Date: Mon, 27 Feb 2017 17:43:28 +0800 Subject: [PATCH 72/87] add cross reading sample files and fix bugs --- paddle/setup.py.in | 2 +- paddle/v2/dataset/config.py | 8 ---- .../paddle}/v2/dataset/sentiment.py | 42 +++++++++++++------ 3 files changed, 30 insertions(+), 22 deletions(-) delete mode 100644 paddle/v2/dataset/config.py rename {paddle => python/paddle}/v2/dataset/sentiment.py (73%) diff --git a/paddle/setup.py.in b/paddle/setup.py.in index d44f1145df..382d5be6ec 100644 --- a/paddle/setup.py.in +++ b/paddle/setup.py.in @@ -72,7 +72,7 @@ setup(name="py_paddle", packages=['py_paddle'], include_dirs = include_dirs, install_requires = [ - 'nltk', + 'nltk>=3.2.2', 'numpy>=1.8.0', # The numpy is required. 'protobuf>=3.0.0' # The paddle protobuf version ], diff --git a/paddle/v2/dataset/config.py b/paddle/v2/dataset/config.py deleted file mode 100644 index 304c4bc5cd..0000000000 --- a/paddle/v2/dataset/config.py +++ /dev/null @@ -1,8 +0,0 @@ -import os - -__all__ = ['DATA_HOME'] - -DATA_HOME = os.path.expanduser('~/.cache/paddle/dataset') - -if not os.path.exists(DATA_HOME): - os.makedirs(DATA_HOME) diff --git a/paddle/v2/dataset/sentiment.py b/python/paddle/v2/dataset/sentiment.py similarity index 73% rename from paddle/v2/dataset/sentiment.py rename to python/paddle/v2/dataset/sentiment.py index 83581eadf2..9825d2ef96 100644 --- a/paddle/v2/dataset/sentiment.py +++ b/python/paddle/v2/dataset/sentiment.py @@ -20,9 +20,9 @@ The script fetch and preprocess movie_reviews data set that provided by NLTK """ - import nltk import numpy as np +from itertools import chain from nltk.corpus import movie_reviews from config import DATA_HOME @@ -50,9 +50,10 @@ def download_data_if_not_yet(): except LookupError: print "Downloading movie_reviews data set, please wait....." nltk.download('movie_reviews', download_dir=DATA_HOME) - print "Download data set success......" # make sure that nltk can find the data nltk.data.path.append(DATA_HOME) + print "Download data set success....." + print "Path is " + nltk.data.find('corpora/movie_reviews').path def get_word_dict(): @@ -67,24 +68,39 @@ def get_word_dict(): words_sort_list = words_freq.items() words_sort_list.sort(cmp=lambda a, b: b[1] - a[1]) for index, word in enumerate(words_sort_list): - words_freq_sorted.append(word[0]) + words_freq_sorted.append((word[0], index + 1)) return words_freq_sorted +def sort_files(): + """ + Sorted the sample for cross reading the sample + :return: + files_list + """ + files_list = list() + download_data_if_not_yet() + neg_file_list = movie_reviews.fileids('neg') + pos_file_list = movie_reviews.fileids('pos') + files_list = list(chain.from_iterable(zip(neg_file_list, pos_file_list))) + return files_list + + def load_sentiment_data(): """ Load the data set :return: data_set """ - label_dict = get_label_dict() + data_set = list() download_data_if_not_yet() - words_freq = nltk.FreqDist(w.lower() for w in movie_reviews.words()) - data_set = [([words_freq[word.lower()] - for word in movie_reviews.words(fileid)], - label_dict[category]) - for category in movie_reviews.categories() - for fileid in movie_reviews.fileids(category)] + words_ids = dict(get_word_dict()) + for sample_file in sort_files(): + words_list = list() + category = 0 if 'neg' in sample_file else 1 + for word in movie_reviews.words(sample_file): + words_list.append(words_ids[word.lower()]) + data_set.append((words_list, category)) return data_set @@ -98,9 +114,9 @@ def reader_creator(data): train data set or test data set """ for each in data: - sentences = np.array(each[0], dtype=np.int32) - labels = np.array(each[1], dtype=np.int8) - yield sentences, labels + list_of_int = np.array(each[0], dtype=np.int32) + label = each[1] + yield list_of_int, label def train(): From 6115fcc5a73497157718eadb3bd596311ea83a55 Mon Sep 17 00:00:00 2001 From: wen-bo-yang Date: Thu, 2 Mar 2017 04:11:11 +0000 Subject: [PATCH 73/87] format by yapf --- python/paddle/v2/dataset/sentiment.py | 51 ++++++------------ .../paddle/v2/dataset/tests/test_sentiment.py | 52 +++++++++++++++++++ 2 files changed, 69 insertions(+), 34 deletions(-) create mode 100644 python/paddle/v2/dataset/tests/test_sentiment.py diff --git a/python/paddle/v2/dataset/sentiment.py b/python/paddle/v2/dataset/sentiment.py index 9825d2ef96..1e7f222f4d 100644 --- a/python/paddle/v2/dataset/sentiment.py +++ b/python/paddle/v2/dataset/sentiment.py @@ -20,38 +20,30 @@ The script fetch and preprocess movie_reviews data set that provided by NLTK """ +import paddle.v2.dataset.common as common +import collections import nltk import numpy as np from itertools import chain from nltk.corpus import movie_reviews -from config import DATA_HOME -__all__ = ['train', 'test', 'get_label_dict', 'get_word_dict'] +__all__ = ['train', 'test', 'get_word_dict'] NUM_TRAINING_INSTANCES = 1600 NUM_TOTAL_INSTANCES = 2000 -def get_label_dict(): - """ - Define the labels dict for dataset - """ - label_dict = {'neg': 0, 'pos': 1} - return label_dict - - def download_data_if_not_yet(): """ Download the data set, if the data set is not download. """ try: # make sure that nltk can find the data - nltk.data.path.append(DATA_HOME) + if common.DATA_HOME not in nltk.data.path: + nltk.data.path.append(common.DATA_HOME) movie_reviews.categories() except LookupError: print "Downloading movie_reviews data set, please wait....." - nltk.download('movie_reviews', download_dir=DATA_HOME) - # make sure that nltk can find the data - nltk.data.path.append(DATA_HOME) + nltk.download('movie_reviews', download_dir=common.DATA_HOME) print "Download data set success....." print "Path is " + nltk.data.find('corpora/movie_reviews').path @@ -63,12 +55,17 @@ def get_word_dict(): words_freq_sorted """ words_freq_sorted = list() + word_freq_dict = collections.defaultdict(int) download_data_if_not_yet() - words_freq = nltk.FreqDist(w.lower() for w in movie_reviews.words()) - words_sort_list = words_freq.items() + + for category in movie_reviews.categories(): + for field in movie_reviews.fileids(category): + for words in movie_reviews.words(field): + word_freq_dict[words] += 1 + words_sort_list = word_freq_dict.items() words_sort_list.sort(cmp=lambda a, b: b[1] - a[1]) for index, word in enumerate(words_sort_list): - words_freq_sorted.append((word[0], index + 1)) + words_freq_sorted.append((word[0], index)) return words_freq_sorted @@ -79,7 +76,6 @@ def sort_files(): files_list """ files_list = list() - download_data_if_not_yet() neg_file_list = movie_reviews.fileids('neg') pos_file_list = movie_reviews.fileids('pos') files_list = list(chain.from_iterable(zip(neg_file_list, pos_file_list))) @@ -104,9 +100,6 @@ def load_sentiment_data(): return data_set -data_set = load_sentiment_data() - - def reader_creator(data): """ Reader creator, it format data set to numpy @@ -114,15 +107,14 @@ def reader_creator(data): train data set or test data set """ for each in data: - list_of_int = np.array(each[0], dtype=np.int32) - label = each[1] - yield list_of_int, label + yield each[0], each[1] def train(): """ Default train set reader creator """ + data_set = load_sentiment_data() return reader_creator(data_set[0:NUM_TRAINING_INSTANCES]) @@ -130,14 +122,5 @@ def test(): """ Default test set reader creator """ + data_set = load_sentiment_data() return reader_creator(data_set[NUM_TRAINING_INSTANCES:]) - - -def unittest(): - assert len(data_set) == NUM_TOTAL_INSTANCES - assert len(list(train())) == NUM_TRAINING_INSTANCES - assert len(list(test())) == NUM_TOTAL_INSTANCES - NUM_TRAINING_INSTANCES - - -if __name__ == '__main__': - unittest() diff --git a/python/paddle/v2/dataset/tests/test_sentiment.py b/python/paddle/v2/dataset/tests/test_sentiment.py new file mode 100644 index 0000000000..48a14aad2a --- /dev/null +++ b/python/paddle/v2/dataset/tests/test_sentiment.py @@ -0,0 +1,52 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import nltk +import paddle.v2.dataset.sentiment as st +from nltk.corpus import movie_reviews + + +class TestSentimentMethods(unittest.TestCase): + def test_get_word_dict(self): + word_dict = st.get_word_dict()[0:10] + test_word_list = [(u',', 0), (u'the', 1), (u'.', 2), (u'a', 3), + (u'and', 4), (u'of', 5), (u'to', 6), (u"'", 7), + (u'is', 8), (u'in', 9)] + for idx, each in enumerate(word_dict): + self.assertEqual(each, test_word_list[idx]) + self.assertTrue("/root/.cache/paddle/dataset" in nltk.data.path) + + def test_sort_files(self): + last_label = '' + for sample_file in st.sort_files(): + current_label = sample_file.split("/")[0] + self.assertNotEqual(current_label, last_label) + last_label = current_label + + def test_data_set(self): + data_set = st.load_sentiment_data() + last_label = -1 + for each in st.test(): + self.assertNotEqual(each[1], last_label) + last_label = each[1] + self.assertEqual(len(data_set), st.NUM_TOTAL_INSTANCES) + self.assertEqual(len(list(st.train())), st.NUM_TRAINING_INSTANCES) + self.assertEqual( + len(list(st.test())), + (st.NUM_TOTAL_INSTANCES - st.NUM_TRAINING_INSTANCES)) + + +if __name__ == '__main__': + unittest.main() From a846ef664002a91c949f383d273e0539768198e6 Mon Sep 17 00:00:00 2001 From: wen-bo-yang Date: Thu, 2 Mar 2017 08:13:13 +0000 Subject: [PATCH 74/87] modify doc --- python/paddle/v2/dataset/sentiment.py | 2 +- python/paddle/v2/dataset/tests/test_sentiment.py | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/python/paddle/v2/dataset/sentiment.py b/python/paddle/v2/dataset/sentiment.py index 1e7f222f4d..137aa6aea7 100644 --- a/python/paddle/v2/dataset/sentiment.py +++ b/python/paddle/v2/dataset/sentiment.py @@ -102,7 +102,7 @@ def load_sentiment_data(): def reader_creator(data): """ - Reader creator, it format data set to numpy + Reader creator, generate an iterator for data set :param data: train data set or test data set """ diff --git a/python/paddle/v2/dataset/tests/test_sentiment.py b/python/paddle/v2/dataset/tests/test_sentiment.py index 48a14aad2a..4074052907 100644 --- a/python/paddle/v2/dataset/tests/test_sentiment.py +++ b/python/paddle/v2/dataset/tests/test_sentiment.py @@ -1,3 +1,6 @@ +# /usr/bin/env python +# -*- coding:utf-8 -*- + # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); From 3feebce2095ebe7002e86081de0a76d573319129 Mon Sep 17 00:00:00 2001 From: wen-bo-yang Date: Fri, 3 Mar 2017 05:45:59 +0000 Subject: [PATCH 75/87] add sentiment in __init__.py --- python/paddle/v2/dataset/__init__.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/python/paddle/v2/dataset/__init__.py b/python/paddle/v2/dataset/__init__.py index a1b21bab3b..e823f56316 100644 --- a/python/paddle/v2/dataset/__init__.py +++ b/python/paddle/v2/dataset/__init__.py @@ -18,5 +18,8 @@ import imdb import cifar import movielens import conll05 +import sentiment -__all__ = ['mnist', 'imikolov', 'imdb', 'cifar', 'movielens', 'conll05'] +__all__ = [ + 'mnist', 'imikolov', 'imdb', 'cifar', 'movielens', 'conll05', 'sentiment' +] From 615e41240c0083de10d938f28834d0933ea27bdc Mon Sep 17 00:00:00 2001 From: liaogang Date: Fri, 3 Mar 2017 14:55:16 +0800 Subject: [PATCH 76/87] remove comment messages --- cmake/coverallsGcovJsons.cmake | 3 --- 1 file changed, 3 deletions(-) diff --git a/cmake/coverallsGcovJsons.cmake b/cmake/coverallsGcovJsons.cmake index 7dfe0c8bf9..ad9a10cb86 100644 --- a/cmake/coverallsGcovJsons.cmake +++ b/cmake/coverallsGcovJsons.cmake @@ -117,8 +117,6 @@ message("===============================") # (The directories the .gcda files and .o files are found in) # and run gcov on those. foreach(GCDA ${GCDA_FILES}) - # message("Process: ${GCDA}") - # message("------------------------------------------------------------------------------") get_filename_component(GCDA_DIR ${GCDA} PATH) # @@ -384,7 +382,6 @@ foreach(NOT_COVERED_SRC ${COVERAGE_SRCS_REMAINING}) set(GCOV_FILE_COVERAGE "${GCOV_FILE_COVERAGE}]") # Generate the final JSON for this file. - # message("Generate JSON for non-gcov file: ${NOT_COVERED_SRC}...") string(CONFIGURE ${SRC_FILE_TEMPLATE} FILE_JSON) set(JSON_GCOV_FILES "${JSON_GCOV_FILES}${FILE_JSON}, ") endforeach() From c9f379ed805d7459fd5333706c0496dee662112d Mon Sep 17 00:00:00 2001 From: wen-bo-yang Date: Fri, 3 Mar 2017 07:05:37 +0000 Subject: [PATCH 77/87] modify code --- python/paddle/v2/dataset/sentiment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/v2/dataset/sentiment.py b/python/paddle/v2/dataset/sentiment.py index 137aa6aea7..cbd08fa736 100644 --- a/python/paddle/v2/dataset/sentiment.py +++ b/python/paddle/v2/dataset/sentiment.py @@ -20,7 +20,7 @@ The script fetch and preprocess movie_reviews data set that provided by NLTK """ -import paddle.v2.dataset.common as common +import common import collections import nltk import numpy as np From 5b24583f63ff25f9f652bd1a1d9e9bc3f930f263 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Fri, 3 Mar 2017 15:12:42 +0800 Subject: [PATCH 78/87] fix merge conflict --- python/paddle/v2/event.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/python/paddle/v2/event.py b/python/paddle/v2/event.py index 8cbf9b9b1f..a429e36b63 100644 --- a/python/paddle/v2/event.py +++ b/python/paddle/v2/event.py @@ -53,9 +53,8 @@ class EndPass(WithMetric): Event On One Pass Training Complete. """ - def __init__(self, pass_id, cost, evaluator): + def __init__(self, pass_id, evaluator): self.pass_id = pass_id - self.cost = cost WithMetric.__init__(self, evaluator) From e60d94b3d89233d8272a98a67cd980f6b837c40b Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 3 Mar 2017 15:13:45 +0800 Subject: [PATCH 79/87] correct data_type --- demo/seqToseq/api_train_v2.py | 12 ++++++------ demo/seqToseq/seqToseq_net_v2.py | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/demo/seqToseq/api_train_v2.py b/demo/seqToseq/api_train_v2.py index efbab8d7b9..bdcf3a5af0 100644 --- a/demo/seqToseq/api_train_v2.py +++ b/demo/seqToseq/api_train_v2.py @@ -4,7 +4,8 @@ import paddle.v2 as paddle from seqToseq_net_v2 import seqToseq_net_v2 -### Data Definiation +# Data Definiation. +# TODO:This code should be merged to dataset package. data_dir = "./data/pre-wmt14" src_lang_dict = os.path.join(data_dir, 'src.dict') trg_lang_dict = os.path.join(data_dir, 'trg.dict') @@ -68,15 +69,14 @@ def train_reader(file_name): def main(): paddle.init(use_gpu=False, trainer_count=1) - # reader = train_reader("data/pre-wmt14/train/train") # define network topology cost = seqToseq_net_v2(source_dict_dim, target_dict_dim) parameters = paddle.parameters.create(cost) - optimizer = paddle.optimizer.Adam(batch_size=50, learning_rate=5e-4) + optimizer = paddle.optimizer.Adam(learning_rate=1e-4) def event_handler(event): if isinstance(event, paddle.event.EndIteration): - if event.batch_id % 100 == 0: + if event.batch_id % 10 == 0: print "Pass %d, Batch %d, Cost %f, %s" % ( event.pass_id, event.batch_id, event.cost, event.metrics) @@ -93,12 +93,12 @@ def main(): trn_reader = paddle.reader.batched( paddle.reader.shuffle( train_reader("data/pre-wmt14/train/train"), buf_size=8192), - batch_size=10) + batch_size=10000) trainer.train( reader=trn_reader, event_handler=event_handler, - num_passes=10000, + num_passes=10, reader_dict=reader_dict) diff --git a/demo/seqToseq/seqToseq_net_v2.py b/demo/seqToseq/seqToseq_net_v2.py index 7e057e2440..1ac95686b4 100644 --- a/demo/seqToseq/seqToseq_net_v2.py +++ b/demo/seqToseq/seqToseq_net_v2.py @@ -14,7 +14,7 @@ def seqToseq_net_v2(source_dict_dim, target_dict_dim): #### Encoder src_word_id = layer.data( name='source_language_word', - type=data_type.dense_vector(source_dict_dim)) + type=data_type.integer_value_sequence(source_dict_dim)) src_embedding = layer.embedding( input=src_word_id, size=word_vector_dim, @@ -67,7 +67,7 @@ def seqToseq_net_v2(source_dict_dim, target_dict_dim): trg_embedding = layer.embedding( input=layer.data( name='target_language_word', - type=data_type.dense_vector(target_dict_dim)), + type=data_type.integer_value_sequence(target_dict_dim)), size=word_vector_dim, param_attr=attr.ParamAttr(name='_target_language_embedding')) group_inputs.append(trg_embedding) @@ -84,7 +84,7 @@ def seqToseq_net_v2(source_dict_dim, target_dict_dim): lbl = layer.data( name='target_language_next_word', - type=data_type.dense_vector(target_dict_dim)) + type=data_type.integer_value_sequence(target_dict_dim)) cost = layer.classification_cost(input=decoder, label=lbl) return cost From 61619580355d37c1ef817c98995dfce8a6556fc0 Mon Sep 17 00:00:00 2001 From: wenboyang Date: Fri, 3 Mar 2017 15:56:43 +0800 Subject: [PATCH 80/87] update __init__.py I use pre-commit check all file and report is the passed. But it is not passed in Travis, so I update __init__.py --- python/paddle/v2/dataset/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/v2/dataset/__init__.py b/python/paddle/v2/dataset/__init__.py index fba76b202e..82f11a7c41 100644 --- a/python/paddle/v2/dataset/__init__.py +++ b/python/paddle/v2/dataset/__init__.py @@ -24,4 +24,4 @@ import sentiment __all__ = [ 'mnist', 'imikolov', 'imdb', 'cifar', 'movielens', 'conll05', 'sentiment' 'uci_housing' -] \ No newline at end of file +] From a6364f9e6a84f3d29876c5ad286acc1e86f7fe1f Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 3 Mar 2017 16:27:29 +0800 Subject: [PATCH 81/87] change batch_size to 5 --- demo/seqToseq/api_train_v2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/demo/seqToseq/api_train_v2.py b/demo/seqToseq/api_train_v2.py index bdcf3a5af0..74ae1cf9ec 100644 --- a/demo/seqToseq/api_train_v2.py +++ b/demo/seqToseq/api_train_v2.py @@ -93,12 +93,12 @@ def main(): trn_reader = paddle.reader.batched( paddle.reader.shuffle( train_reader("data/pre-wmt14/train/train"), buf_size=8192), - batch_size=10000) + batch_size=5) trainer.train( reader=trn_reader, event_handler=event_handler, - num_passes=10, + num_passes=10000, reader_dict=reader_dict) From 1d2025c99cd7b21f3c6657244b7cdce8b8465380 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Fri, 3 Mar 2017 18:00:26 +0800 Subject: [PATCH 82/87] Use the sequence_conv_pool define inside the networks.py --- demo/sentiment/train_v2.py | 195 ++++++++++---------------------- python/paddle/v2/config_base.py | 2 + 2 files changed, 59 insertions(+), 138 deletions(-) diff --git a/demo/sentiment/train_v2.py b/demo/sentiment/train_v2.py index 0fa7494853..3a266e74ea 100644 --- a/demo/sentiment/train_v2.py +++ b/demo/sentiment/train_v2.py @@ -1,126 +1,40 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import sys -from os.path import join as join_path import paddle.trainer_config_helpers.attrs as attrs from paddle.trainer_config_helpers.poolings import MaxPooling -import paddle.v2.layer as layer -import paddle.v2.activation as activation -import paddle.v2.data_type as data_type -import paddle.v2.dataset.imdb as imdb import paddle.v2 as paddle -def sequence_conv_pool(input, - input_size, - context_len, - hidden_size, - name=None, - context_start=None, - pool_type=None, - context_proj_layer_name=None, - context_proj_param_attr=False, - fc_layer_name=None, - fc_param_attr=None, - fc_bias_attr=None, - fc_act=None, - pool_bias_attr=None, - fc_attr=None, - context_attr=None, - pool_attr=None): - """ - Text convolution pooling layers helper. - - Text input => Context Projection => FC Layer => Pooling => Output. - - :param name: name of output layer(pooling layer name) - :type name: basestring - :param input: name of input layer - :type input: LayerOutput - :param context_len: context projection length. See - context_projection's document. - :type context_len: int - :param hidden_size: FC Layer size. - :type hidden_size: int - :param context_start: context projection length. See - context_projection's context_start. - :type context_start: int or None - :param pool_type: pooling layer type. See pooling_layer's document. - :type pool_type: BasePoolingType. - :param context_proj_layer_name: context projection layer name. - None if user don't care. - :type context_proj_layer_name: basestring - :param context_proj_param_attr: context projection parameter attribute. - None if user don't care. - :type context_proj_param_attr: ParameterAttribute or None. - :param fc_layer_name: fc layer name. None if user don't care. - :type fc_layer_name: basestring - :param fc_param_attr: fc layer parameter attribute. None if user don't care. - :type fc_param_attr: ParameterAttribute or None - :param fc_bias_attr: fc bias parameter attribute. False if no bias, - None if user don't care. - :type fc_bias_attr: ParameterAttribute or None - :param fc_act: fc layer activation type. None means tanh - :type fc_act: BaseActivation - :param pool_bias_attr: pooling layer bias attr. None if don't care. - False if no bias. - :type pool_bias_attr: ParameterAttribute or None. - :param fc_attr: fc layer extra attribute. - :type fc_attr: ExtraLayerAttribute - :param context_attr: context projection layer extra attribute. - :type context_attr: ExtraLayerAttribute - :param pool_attr: pooling layer extra attribute. - :type pool_attr: ExtraLayerAttribute - :return: output layer name. - :rtype: LayerOutput - """ - # Set Default Value to param - context_proj_layer_name = "%s_conv_proj" % name \ - if context_proj_layer_name is None else context_proj_layer_name - - with layer.mixed( - name=context_proj_layer_name, - size=input_size * context_len, - act=activation.Linear(), - layer_attr=context_attr) as m: - m += layer.context_projection( - input=input, - context_len=context_len, - context_start=context_start, - padding_attr=context_proj_param_attr) - - fc_layer_name = "%s_conv_fc" % name \ - if fc_layer_name is None else fc_layer_name - fl = layer.fc(name=fc_layer_name, - input=m, - size=hidden_size, - act=fc_act, - layer_attr=fc_attr, - param_attr=fc_param_attr, - bias_attr=fc_bias_attr) - - return layer.pooling( - name=name, - input=fl, - pooling_type=pool_type, - bias_attr=pool_bias_attr, - layer_attr=pool_attr) - - def convolution_net(input_dim, class_dim=2, emb_dim=128, hid_dim=128, is_predict=False): - data = layer.data("word", data_type.integer_value_sequence(input_dim)) - emb = layer.embedding(input=data, size=emb_dim) - conv_3 = sequence_conv_pool( - input=emb, input_size=emb_dim, context_len=3, hidden_size=hid_dim) - conv_4 = sequence_conv_pool( - input=emb, input_size=emb_dim, context_len=4, hidden_size=hid_dim) - output = layer.fc(input=[conv_3, conv_4], - size=class_dim, - act=activation.Softmax()) - lbl = layer.data("label", data_type.integer_value(2)) - cost = layer.classification_cost(input=output, label=lbl) + data = paddle.layer.data("word", + paddle.data_type.integer_value_sequence(input_dim)) + emb = paddle.layer.embedding(input=data, size=emb_dim) + conv_3 = paddle.networks.sequence_conv_pool( + input=emb, context_len=3, hidden_size=hid_dim) + conv_4 = paddle.networks.sequence_conv_pool( + input=emb, context_len=4, hidden_size=hid_dim) + output = paddle.layer.fc(input=[conv_3, conv_4], + size=class_dim, + act=paddle.activation.Softmax()) + lbl = paddle.layer.data("label", paddle.data_type.integer_value(2)) + cost = paddle.layer.classification_cost(input=output, label=lbl) return cost @@ -152,24 +66,28 @@ def stacked_lstm_net(input_dim, lstm_para_attr = attrs.ParameterAttribute(initial_std=0., learning_rate=1.) para_attr = [fc_para_attr, lstm_para_attr] bias_attr = attrs.ParameterAttribute(initial_std=0., l2_rate=0.) - relu = activation.Relu() - linear = activation.Linear() - - data = layer.data("word", data_type.integer_value_sequence(input_dim)) - emb = layer.embedding(input=data, size=emb_dim) - - fc1 = layer.fc(input=emb, size=hid_dim, act=linear, bias_attr=bias_attr) - lstm1 = layer.lstmemory( + relu = paddle.activation.Relu() + linear = paddle.activation.Linear() + + data = paddle.layer.data("word", + paddle.data_type.integer_value_sequence(input_dim)) + emb = paddle.layer.embedding(input=data, size=emb_dim) + + fc1 = paddle.layer.fc(input=emb, + size=hid_dim, + act=linear, + bias_attr=bias_attr) + lstm1 = paddle.layer.lstmemory( input=fc1, act=relu, bias_attr=bias_attr, layer_attr=layer_attr) inputs = [fc1, lstm1] for i in range(2, stacked_num + 1): - fc = layer.fc(input=inputs, - size=hid_dim, - act=linear, - param_attr=para_attr, - bias_attr=bias_attr) - lstm = layer.lstmemory( + fc = paddle.layer.fc(input=inputs, + size=hid_dim, + act=linear, + param_attr=para_attr, + bias_attr=bias_attr) + lstm = paddle.layer.lstmemory( input=fc, reverse=(i % 2) == 0, act=relu, @@ -177,16 +95,16 @@ def stacked_lstm_net(input_dim, layer_attr=layer_attr) inputs = [fc, lstm] - fc_last = layer.pooling(input=inputs[0], pooling_type=MaxPooling()) - lstm_last = layer.pooling(input=inputs[1], pooling_type=MaxPooling()) - output = layer.fc(input=[fc_last, lstm_last], - size=class_dim, - act=activation.Softmax(), - bias_attr=bias_attr, - param_attr=para_attr) + fc_last = paddle.layer.pooling(input=inputs[0], pooling_type=MaxPooling()) + lstm_last = paddle.layer.pooling(input=inputs[1], pooling_type=MaxPooling()) + output = paddle.layer.fc(input=[fc_last, lstm_last], + size=class_dim, + act=paddle.activation.Softmax(), + bias_attr=bias_attr, + param_attr=para_attr) - lbl = layer.data("label", data_type.integer_value(2)) - cost = layer.classification_cost(input=output, label=lbl) + lbl = paddle.layer.data("label", paddle.data_type.integer_value(2)) + cost = paddle.layer.classification_cost(input=output, label=lbl) return cost @@ -196,7 +114,7 @@ if __name__ == '__main__': # network config print 'load dictionary...' - word_dict = imdb.word_dict() + word_dict = paddle.dataset.imdb.word_dict() dict_dim = len(word_dict) class_dim = 2 @@ -226,7 +144,8 @@ if __name__ == '__main__': if isinstance(event, paddle.event.EndPass): result = trainer.test( reader=paddle.reader.batched( - lambda: imdb.test(word_dict), batch_size=128), + lambda: paddle.dataset.imdb.test(word_dict), + batch_size=128), reader_dict={'word': 0, 'label': 1}) print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics) @@ -239,7 +158,7 @@ if __name__ == '__main__': trainer.train( reader=paddle.reader.batched( paddle.reader.shuffle( - lambda: imdb.train(word_dict), buf_size=1000), + lambda: paddle.dataset.imdb.train(word_dict), buf_size=1000), batch_size=100), event_handler=event_handler, reader_dict={'word': 0, diff --git a/python/paddle/v2/config_base.py b/python/paddle/v2/config_base.py index fa2ccec6c3..0fc711e99a 100644 --- a/python/paddle/v2/config_base.py +++ b/python/paddle/v2/config_base.py @@ -93,6 +93,8 @@ def __convert_to_v2__(method_name, parent_names, is_default_name=True): name = kwargs.get('name', None) super(V2LayerImpl, self).__init__(name, parent_layers) + if kwargs.has_key('size'): + self.size = kwargs['size'] self.__other_kwargs__ = other_kwargs if wrapper is not None: From 84ce706750ba4e75e55fe75cb13467c5545ad945 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Fri, 3 Mar 2017 18:14:11 +0800 Subject: [PATCH 83/87] Restore config_base.py --- python/paddle/v2/config_base.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/python/paddle/v2/config_base.py b/python/paddle/v2/config_base.py index 0fc711e99a..fa2ccec6c3 100644 --- a/python/paddle/v2/config_base.py +++ b/python/paddle/v2/config_base.py @@ -93,8 +93,6 @@ def __convert_to_v2__(method_name, parent_names, is_default_name=True): name = kwargs.get('name', None) super(V2LayerImpl, self).__init__(name, parent_layers) - if kwargs.has_key('size'): - self.size = kwargs['size'] self.__other_kwargs__ = other_kwargs if wrapper is not None: From 6b0961a3c17f3f9a32a7faf063731e05732f23a1 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 3 Mar 2017 14:41:45 +0800 Subject: [PATCH 84/87] auto deployment paddle documentation * Change build process to generate v2 documentation * Add directory for v2 api * Correct sphinx index soft-link --- cmake/FindSphinx.cmake | 2 +- doc/api/index_cn.rst | 39 +---------- doc/api/index_en.rst | 29 +------- .../data_provider/dataprovider_cn.rst | 0 .../data_provider/dataprovider_en.rst | 0 .../data_provider/pydataprovider2_cn.rst | 0 .../data_provider/pydataprovider2_en.rst | 0 .../data_provider/src/mnist_config.py | 0 .../data_provider/src/mnist_provider.dict.py | 0 .../data_provider/src/mnist_train.txt | 0 .../data_provider/src/sentimental_config.py | 0 .../data_provider/src/sentimental_provider.py | 0 .../data_provider/src/sentimental_train.txt | 0 doc/api/{ => v1}/data_provider/src/train.list | 0 doc/api/v1/index_cn.rst | 37 ++++++++++ doc/api/v1/index_en.rst | 37 ++++++++++ .../{ => v1}/predict/src/predict_sample.py | 0 .../{ => v1}/predict/swig_py_paddle_cn.rst | 0 .../{ => v1}/predict/swig_py_paddle_en.rst | 0 .../trainer_config_helpers/activations.rst | 0 .../{ => v1}/trainer_config_helpers/attrs.rst | 0 .../trainer_config_helpers/data_sources.rst | 0 .../trainer_config_helpers/evaluators.rst | 0 .../trainer_config_helpers/layers.rst | 0 .../trainer_config_helpers/networks.rst | 0 .../trainer_config_helpers/optimizers.rst | 0 .../trainer_config_helpers/poolings.rst | 0 doc/api/v2/model_configs.rst | 6 ++ doc/templates/conf.py.cn.in | 8 ++- doc/templates/conf.py.en.in | 10 ++- doc/tutorials/quick_start/index_en.md | 8 +-- paddle/scripts/travis/docs.sh | 46 +++++++++---- python/paddle/v2/layer.py | 69 +++++-------------- 33 files changed, 152 insertions(+), 139 deletions(-) rename doc/api/{ => v1}/data_provider/dataprovider_cn.rst (100%) rename doc/api/{ => v1}/data_provider/dataprovider_en.rst (100%) rename doc/api/{ => v1}/data_provider/pydataprovider2_cn.rst (100%) rename doc/api/{ => v1}/data_provider/pydataprovider2_en.rst (100%) rename doc/api/{ => v1}/data_provider/src/mnist_config.py (100%) rename doc/api/{ => v1}/data_provider/src/mnist_provider.dict.py (100%) rename doc/api/{ => v1}/data_provider/src/mnist_train.txt (100%) rename doc/api/{ => v1}/data_provider/src/sentimental_config.py (100%) rename doc/api/{ => v1}/data_provider/src/sentimental_provider.py (100%) rename doc/api/{ => v1}/data_provider/src/sentimental_train.txt (100%) rename doc/api/{ => v1}/data_provider/src/train.list (100%) create mode 100644 doc/api/v1/index_cn.rst create mode 100644 doc/api/v1/index_en.rst rename doc/api/{ => v1}/predict/src/predict_sample.py (100%) rename doc/api/{ => v1}/predict/swig_py_paddle_cn.rst (100%) rename doc/api/{ => v1}/predict/swig_py_paddle_en.rst (100%) rename doc/api/{ => v1}/trainer_config_helpers/activations.rst (100%) rename doc/api/{ => v1}/trainer_config_helpers/attrs.rst (100%) rename doc/api/{ => v1}/trainer_config_helpers/data_sources.rst (100%) rename doc/api/{ => v1}/trainer_config_helpers/evaluators.rst (100%) rename doc/api/{ => v1}/trainer_config_helpers/layers.rst (100%) rename doc/api/{ => v1}/trainer_config_helpers/networks.rst (100%) rename doc/api/{ => v1}/trainer_config_helpers/optimizers.rst (100%) rename doc/api/{ => v1}/trainer_config_helpers/poolings.rst (100%) create mode 100644 doc/api/v2/model_configs.rst diff --git a/cmake/FindSphinx.cmake b/cmake/FindSphinx.cmake index d319442ef1..1c29cb22a3 100644 --- a/cmake/FindSphinx.cmake +++ b/cmake/FindSphinx.cmake @@ -72,7 +72,7 @@ function( Sphinx_add_target target_name builder conf cache source destination ) ${source} ${destination} COMMENT "Generating sphinx documentation: ${builder}" - COMMAND ln -sf ${destination}/index_*.html ${destination}/index.html + COMMAND cd ${destination} && ln -s ./index_*.html index.html ) set_property( diff --git a/doc/api/index_cn.rst b/doc/api/index_cn.rst index 3718cd73a2..874dd9cb22 100644 --- a/doc/api/index_cn.rst +++ b/doc/api/index_cn.rst @@ -1,37 +1,2 @@ -API中文手册 -============ - -DataProvider API ----------------- - -.. toctree:: - :maxdepth: 1 - - data_provider/dataprovider_cn.rst - data_provider/pydataprovider2_cn.rst - -.. _api_trainer_config: - -Model Config API ----------------- - -.. toctree:: - :maxdepth: 1 - - trainer_config_helpers/optimizers.rst - trainer_config_helpers/data_sources.rst - trainer_config_helpers/layers.rst - trainer_config_helpers/activations.rst - trainer_config_helpers/poolings.rst - trainer_config_helpers/networks.rst - trainer_config_helpers/evaluators.rst - trainer_config_helpers/attrs.rst - - -Applications API ----------------- - -.. toctree:: - :maxdepth: 1 - - predict/swig_py_paddle_cn.rst +API +=== \ No newline at end of file diff --git a/doc/api/index_en.rst b/doc/api/index_en.rst index 10c297a71d..b7f470e1f8 100644 --- a/doc/api/index_en.rst +++ b/doc/api/index_en.rst @@ -1,37 +1,10 @@ API === -DataProvider API ----------------- - -.. toctree:: - :maxdepth: 1 - - data_provider/dataprovider_en.rst - data_provider/pydataprovider2_en.rst - -.. _api_trainer_config: - Model Config API ---------------- .. toctree:: :maxdepth: 1 - trainer_config_helpers/optimizers.rst - trainer_config_helpers/data_sources.rst - trainer_config_helpers/layers.rst - trainer_config_helpers/activations.rst - trainer_config_helpers/poolings.rst - trainer_config_helpers/networks.rst - trainer_config_helpers/evaluators.rst - trainer_config_helpers/attrs.rst - - -Applications API ----------------- - -.. toctree:: - :maxdepth: 1 - - predict/swig_py_paddle_en.rst + v2/model_configs.rst \ No newline at end of file diff --git a/doc/api/data_provider/dataprovider_cn.rst b/doc/api/v1/data_provider/dataprovider_cn.rst similarity index 100% rename from doc/api/data_provider/dataprovider_cn.rst rename to doc/api/v1/data_provider/dataprovider_cn.rst diff --git a/doc/api/data_provider/dataprovider_en.rst b/doc/api/v1/data_provider/dataprovider_en.rst similarity index 100% rename from doc/api/data_provider/dataprovider_en.rst rename to doc/api/v1/data_provider/dataprovider_en.rst diff --git a/doc/api/data_provider/pydataprovider2_cn.rst b/doc/api/v1/data_provider/pydataprovider2_cn.rst similarity index 100% rename from doc/api/data_provider/pydataprovider2_cn.rst rename to doc/api/v1/data_provider/pydataprovider2_cn.rst diff --git a/doc/api/data_provider/pydataprovider2_en.rst b/doc/api/v1/data_provider/pydataprovider2_en.rst similarity index 100% rename from doc/api/data_provider/pydataprovider2_en.rst rename to doc/api/v1/data_provider/pydataprovider2_en.rst diff --git a/doc/api/data_provider/src/mnist_config.py b/doc/api/v1/data_provider/src/mnist_config.py similarity index 100% rename from doc/api/data_provider/src/mnist_config.py rename to doc/api/v1/data_provider/src/mnist_config.py diff --git a/doc/api/data_provider/src/mnist_provider.dict.py b/doc/api/v1/data_provider/src/mnist_provider.dict.py similarity index 100% rename from doc/api/data_provider/src/mnist_provider.dict.py rename to doc/api/v1/data_provider/src/mnist_provider.dict.py diff --git a/doc/api/data_provider/src/mnist_train.txt b/doc/api/v1/data_provider/src/mnist_train.txt similarity index 100% rename from doc/api/data_provider/src/mnist_train.txt rename to doc/api/v1/data_provider/src/mnist_train.txt diff --git a/doc/api/data_provider/src/sentimental_config.py b/doc/api/v1/data_provider/src/sentimental_config.py similarity index 100% rename from doc/api/data_provider/src/sentimental_config.py rename to doc/api/v1/data_provider/src/sentimental_config.py diff --git a/doc/api/data_provider/src/sentimental_provider.py b/doc/api/v1/data_provider/src/sentimental_provider.py similarity index 100% rename from doc/api/data_provider/src/sentimental_provider.py rename to doc/api/v1/data_provider/src/sentimental_provider.py diff --git a/doc/api/data_provider/src/sentimental_train.txt b/doc/api/v1/data_provider/src/sentimental_train.txt similarity index 100% rename from doc/api/data_provider/src/sentimental_train.txt rename to doc/api/v1/data_provider/src/sentimental_train.txt diff --git a/doc/api/data_provider/src/train.list b/doc/api/v1/data_provider/src/train.list similarity index 100% rename from doc/api/data_provider/src/train.list rename to doc/api/v1/data_provider/src/train.list diff --git a/doc/api/v1/index_cn.rst b/doc/api/v1/index_cn.rst new file mode 100644 index 0000000000..3718cd73a2 --- /dev/null +++ b/doc/api/v1/index_cn.rst @@ -0,0 +1,37 @@ +API中文手册 +============ + +DataProvider API +---------------- + +.. toctree:: + :maxdepth: 1 + + data_provider/dataprovider_cn.rst + data_provider/pydataprovider2_cn.rst + +.. _api_trainer_config: + +Model Config API +---------------- + +.. toctree:: + :maxdepth: 1 + + trainer_config_helpers/optimizers.rst + trainer_config_helpers/data_sources.rst + trainer_config_helpers/layers.rst + trainer_config_helpers/activations.rst + trainer_config_helpers/poolings.rst + trainer_config_helpers/networks.rst + trainer_config_helpers/evaluators.rst + trainer_config_helpers/attrs.rst + + +Applications API +---------------- + +.. toctree:: + :maxdepth: 1 + + predict/swig_py_paddle_cn.rst diff --git a/doc/api/v1/index_en.rst b/doc/api/v1/index_en.rst new file mode 100644 index 0000000000..10c297a71d --- /dev/null +++ b/doc/api/v1/index_en.rst @@ -0,0 +1,37 @@ +API +=== + +DataProvider API +---------------- + +.. toctree:: + :maxdepth: 1 + + data_provider/dataprovider_en.rst + data_provider/pydataprovider2_en.rst + +.. _api_trainer_config: + +Model Config API +---------------- + +.. toctree:: + :maxdepth: 1 + + trainer_config_helpers/optimizers.rst + trainer_config_helpers/data_sources.rst + trainer_config_helpers/layers.rst + trainer_config_helpers/activations.rst + trainer_config_helpers/poolings.rst + trainer_config_helpers/networks.rst + trainer_config_helpers/evaluators.rst + trainer_config_helpers/attrs.rst + + +Applications API +---------------- + +.. toctree:: + :maxdepth: 1 + + predict/swig_py_paddle_en.rst diff --git a/doc/api/predict/src/predict_sample.py b/doc/api/v1/predict/src/predict_sample.py similarity index 100% rename from doc/api/predict/src/predict_sample.py rename to doc/api/v1/predict/src/predict_sample.py diff --git a/doc/api/predict/swig_py_paddle_cn.rst b/doc/api/v1/predict/swig_py_paddle_cn.rst similarity index 100% rename from doc/api/predict/swig_py_paddle_cn.rst rename to doc/api/v1/predict/swig_py_paddle_cn.rst diff --git a/doc/api/predict/swig_py_paddle_en.rst b/doc/api/v1/predict/swig_py_paddle_en.rst similarity index 100% rename from doc/api/predict/swig_py_paddle_en.rst rename to doc/api/v1/predict/swig_py_paddle_en.rst diff --git a/doc/api/trainer_config_helpers/activations.rst b/doc/api/v1/trainer_config_helpers/activations.rst similarity index 100% rename from doc/api/trainer_config_helpers/activations.rst rename to doc/api/v1/trainer_config_helpers/activations.rst diff --git a/doc/api/trainer_config_helpers/attrs.rst b/doc/api/v1/trainer_config_helpers/attrs.rst similarity index 100% rename from doc/api/trainer_config_helpers/attrs.rst rename to doc/api/v1/trainer_config_helpers/attrs.rst diff --git a/doc/api/trainer_config_helpers/data_sources.rst b/doc/api/v1/trainer_config_helpers/data_sources.rst similarity index 100% rename from doc/api/trainer_config_helpers/data_sources.rst rename to doc/api/v1/trainer_config_helpers/data_sources.rst diff --git a/doc/api/trainer_config_helpers/evaluators.rst b/doc/api/v1/trainer_config_helpers/evaluators.rst similarity index 100% rename from doc/api/trainer_config_helpers/evaluators.rst rename to doc/api/v1/trainer_config_helpers/evaluators.rst diff --git a/doc/api/trainer_config_helpers/layers.rst b/doc/api/v1/trainer_config_helpers/layers.rst similarity index 100% rename from doc/api/trainer_config_helpers/layers.rst rename to doc/api/v1/trainer_config_helpers/layers.rst diff --git a/doc/api/trainer_config_helpers/networks.rst b/doc/api/v1/trainer_config_helpers/networks.rst similarity index 100% rename from doc/api/trainer_config_helpers/networks.rst rename to doc/api/v1/trainer_config_helpers/networks.rst diff --git a/doc/api/trainer_config_helpers/optimizers.rst b/doc/api/v1/trainer_config_helpers/optimizers.rst similarity index 100% rename from doc/api/trainer_config_helpers/optimizers.rst rename to doc/api/v1/trainer_config_helpers/optimizers.rst diff --git a/doc/api/trainer_config_helpers/poolings.rst b/doc/api/v1/trainer_config_helpers/poolings.rst similarity index 100% rename from doc/api/trainer_config_helpers/poolings.rst rename to doc/api/v1/trainer_config_helpers/poolings.rst diff --git a/doc/api/v2/model_configs.rst b/doc/api/v2/model_configs.rst new file mode 100644 index 0000000000..a9f33b33ef --- /dev/null +++ b/doc/api/v2/model_configs.rst @@ -0,0 +1,6 @@ +====== +Layers +====== + +.. automodule:: paddle.v2.layer + :members: diff --git a/doc/templates/conf.py.cn.in b/doc/templates/conf.py.cn.in index 418d718fbd..6dc48704bc 100644 --- a/doc/templates/conf.py.cn.in +++ b/doc/templates/conf.py.cn.in @@ -15,13 +15,19 @@ import sys import os, subprocess import shlex from recommonmark import parser, transform +try: + import py_paddle + import paddle + import paddle.v2 +except ImportError: + print("Must install paddle python package before generating documentation") + sys.exit(1) MarkdownParser = parser.CommonMarkParser AutoStructify = transform.AutoStructify # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, '@PROJ_ROOT@/python') templates_path = ["@PROJ_ROOT@/doc_theme/templates"] # -- General configuration ------------------------------------------------ diff --git a/doc/templates/conf.py.en.in b/doc/templates/conf.py.en.in index e96c25cb75..b477f0120c 100644 --- a/doc/templates/conf.py.en.in +++ b/doc/templates/conf.py.en.in @@ -15,14 +15,20 @@ import sys import os, subprocess import shlex from recommonmark import parser, transform +try: + import py_paddle + import paddle + import paddle.v2 +except ImportError: + print("Must install paddle python package before generating documentation") + sys.exit(1) + MarkdownParser = parser.CommonMarkParser AutoStructify = transform.AutoStructify # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, '@PROJ_ROOT@/python') - templates_path = ["@PROJ_ROOT@/doc_theme/templates"] # -- General configuration ------------------------------------------------ diff --git a/doc/tutorials/quick_start/index_en.md b/doc/tutorials/quick_start/index_en.md index 70dec2eb2a..ca110431cf 100644 --- a/doc/tutorials/quick_start/index_en.md +++ b/doc/tutorials/quick_start/index_en.md @@ -156,14 +156,14 @@ define_py_data_sources2(train_list='data/train.list', obj="process", args={"dictionary": word_dict}) ``` -You can refer to the following link for more detailed examples and data formats: PyDataProvider2. +You can refer to the following link for more detailed examples and data formats: PyDataProvider2. ## Network Architecture We will describe four kinds of network architectures in this section.
![](./src/PipelineNetwork_en.jpg)
First, you will build a logistic regression model. Later, you will also get chance to build other more powerful network architectures. -For more detailed documentation, you could refer to: layer documentation. All configuration files are in `demo/quick_start` directory. +For more detailed documentation, you could refer to: layer documentation. All configuration files are in `demo/quick_start` directory. ### Logistic Regression The architecture is illustrated in the following picture: @@ -366,7 +366,7 @@ You can use single layer LSTM model with Dropout for our text classification pro
## Optimization Algorithm -Optimization algorithms include Momentum, RMSProp, AdaDelta, AdaGrad, Adam, and Adamax. You can use Adam optimization method here, with L2 regularization and gradient clipping, because Adam has been proved to work very well for training recurrent neural network. +Optimization algorithms include Momentum, RMSProp, AdaDelta, AdaGrad, Adam, and Adamax. You can use Adam optimization method here, with L2 regularization and gradient clipping, because Adam has been proved to work very well for training recurrent neural network. ```python settings(batch_size=128, @@ -407,7 +407,7 @@ paddle train \ --init_model_path=./output/pass-0000x ``` -We will give an example of performing prediction using Recurrent model on a dataset with no labels. You can refer to Python Prediction API tutorial,or other demo for the prediction process using Python. You can also use the following script for inference or evaluation. +We will give an example of performing prediction using Recurrent model on a dataset with no labels. You can refer to Python Prediction API tutorial,or other demo for the prediction process using Python. You can also use the following script for inference or evaluation. inference script (predict.sh): diff --git a/paddle/scripts/travis/docs.sh b/paddle/scripts/travis/docs.sh index 6b43cad20b..53e998ef6c 100755 --- a/paddle/scripts/travis/docs.sh +++ b/paddle/scripts/travis/docs.sh @@ -2,8 +2,12 @@ # Add set -e, cd to directory. source ./common.sh - # Compile Documentation only. +cmake .. -DCMAKE_BUILD_TYPE=Debug -DCMAKE_Fortran_COMPILER=/usr/bin/gfortran-4.8 -DWITH_GPU=OFF -DWITH_DOC=OFF -DWITH_STYLE_CHECK=OFF ${EXTRA_CMAKE_OPTS} +mkdir output +make DESTDIR=./output install -j `nproc` +pip install ./output/usr/local/opt/paddle/share/wheels/* +rm -rf * cmake .. -DCMAKE_BUILD_TYPE=Debug -DCMAKE_Fortran_COMPILER=/usr/bin/gfortran-4.8 -DWITH_GPU=OFF -DWITH_DOC=ON ${EXTRA_CMAKE_OPTS} make paddle_docs paddle_docs_cn @@ -25,26 +29,41 @@ TARGET_BRANCH="gh-pages" # Only deploy master branch to build latest documentation. SOURCE_BRANCH="master" -# If is not a Github pull request, and in master branch. -if [ "$TRAVIS_PULL_REQUEST" != "false" -o "$TRAVIS_BRANCH" != "$SOURCE_BRANCH" ]; then - exit 0 -fi - # Clone the repo to output directory git clone $REPO output cd output -# checkout github page branch -git checkout $TARGET_BRANCH || git checkout --orphan $TARGET_BRANCH +function deploy_docs() { + SOURCE_BRANCH=$1 + DIR=$2 + # If is not a Github pull request + if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then + exit 0 + fi + # If it is not watched branch. + if [ "$TRAVIS_BRANCH" != "$SOURCE_BRANCH" ]; then + return + fi -# remove old docs. mv new docs. -rm -rf doc doc_cn -mv ../doc/cn/html doc_cn -mv ../doc/en/html doc + # checkout github page branch + git checkout $TARGET_BRANCH || git checkout --orphan $TARGET_BRANCH + + mkdir -p ${DIR} + # remove old docs. mv new docs. + set +e + rm -rf ${DIR}/doc ${DIR}/doc_cn + set -e + mv ../doc/cn/html ${DIR}/doc_cn + mv ../doc/en/html ${DIR}/doc + git add . +} + +deploy_docs "master" "." +deploy_docs "develop" "./develop/" # Check is there anything changed. set +e -git diff --exit-code >/dev/null +git diff --cached --exit-code >/dev/null if [ $? -eq 0 ]; then echo "No changes to the output on this push; exiting." exit 0 @@ -57,7 +76,6 @@ if [ -n $SSL_KEY ]; then # Only push updated docs for github.com/PaddlePaddle/P git config user.name "Travis CI" git config user.email "paddle-dev@baidu.com" git commit -m "Deploy to GitHub Pages: ${SHA}" - # Set ssh private key openssl aes-256-cbc -K $SSL_KEY -iv $SSL_IV -in ../../paddle/scripts/travis/deploy_key.enc -out deploy_key -d chmod 600 deploy_key diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py index 67111f1315..0aa5391910 100644 --- a/python/paddle/v2/layer.py +++ b/python/paddle/v2/layer.py @@ -12,58 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. """ -Before this new package paddle.v2.layer, users would need to use functions -in paddle.trainer_config_helpers.layers to configure networks. - -The Old Way: -========= -This old way requires that the creation of a network be defined in a Python -function, say network_config, and that this Python function being passed to -paddle.trainer_config_helpers.parse_network_config for the creation of -protobuf message description of this network. - -```python -def network_config(): - img = paddle.trainer_config_helpers.data_layer(name="pixel", size=784) - inference = paddle.trainer_config_helpers.fc_layer( - input=img, - size=10, - act=paddle.trainer_config_helpers.SoftmaxActivation()) - cost = paddle.trainer_config_helpers.classification_cost( - input=inference, - label=paddle.trainer_config_helpers.data_layer(name="label", size=10)) - -proto_desc = parse_network_config(network_config) -``` - -When parse_network_config executes network_config, those layer definition -functions like data_layer and fc_layer would change some Python global variables, -so that after the execution, parse_network_config could collect information from -these global variables and generates the protobuf message. - - - -The New Way: -========= -In this PR, we define a function in paddle.v2.layer which creates a Python -class for each layer creation function in paddle.trainer_config_helpers.layers. -Users can use create a network as follows: - -```python -img = paddle.v2.layer.data(name="pixel", size=784) -inference = paddle.v2.layer.fc(input=img, size=10, act=paddle.v2.layer.Softmax()) -cost = paddle.v2.layer.classification( - input=inference, - label=paddle.v2.layer.data(name="label", size=10)) - -parameters = paddle.v2.parameters.create(cost) -``` - -This new way doesn't require those invocations to layer definition functions -to be in a Python function but could be anywhere. - -Also, the creation of a protobuf message is hidden in the invocation of -paddle.v2.parameters.create, no longer exposed to users. +`paddle.v2.layer` is a part of model config packages in paddle.v2. In API v2, +we want to make Paddle a plain Python package. The model config package defined +the way how to configure a neural network topology in Paddle Python code. + +The primary usage shows below. + +.. code-block:: python + + import paddle.v2 as paddle + + img = paddle.layer.data(name='img', type=paddle.data_type.dense_vector(784)) + hidden = paddle.layer.fc(input=img, size=200) + prediction = paddle.layer.fc(input=hidden, size=10, + act=paddle.activation.Softmax()) + + # use prediction instance where needed. + parameters = paddle.v2.parameters.create(cost) """ from config_base import Layer, __convert_to_v2__ import paddle.trainer_config_helpers as conf_helps From 3219c831a140303e7e03de70e38ae7ebda2f2c26 Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Thu, 2 Mar 2017 22:27:46 +0000 Subject: [PATCH 85/87] Rename Argument::sumCost to Argument::cost since Argument should not know about cost. cost is Argument, but argument does not have to be cost. --- paddle/api/Arguments.cpp | 4 +--- paddle/api/PaddleAPI.h | 2 +- paddle/api/test/testArguments.py | 2 +- paddle/gserver/tests/LayerGradUtil.cpp | 6 +++--- paddle/parameter/Argument.h | 2 +- paddle/trainer/Tester.cpp | 2 +- paddle/trainer/Trainer.cpp | 8 ++++---- paddle/trainer/TrainerInternal.cpp | 2 +- 8 files changed, 13 insertions(+), 15 deletions(-) diff --git a/paddle/api/Arguments.cpp b/paddle/api/Arguments.cpp index a3f4bfffc9..d49b189e25 100644 --- a/paddle/api/Arguments.cpp +++ b/paddle/api/Arguments.cpp @@ -144,9 +144,7 @@ void Arguments::setSlotSequenceDim(size_t idx, IVector* vec) throw(RangeError) { a.cpuSequenceDims = m->cast(vec->getSharedPtr()); } -float Arguments::sumCosts() const { - return paddle::Argument::sumCosts(m->outputs); -} +float Arguments::sum() const { return paddle::Argument::sum(m->outputs); } int64_t Arguments::getBatchSize(size_t idx) const throw(RangeError) { auto& a = m->getArg(idx); diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index 762f86ac79..c4f5dca26c 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -453,7 +453,7 @@ public: IVector* vec) throw(RangeError); void setSlotSequenceDim(size_t idx, IVector* vec) throw(RangeError); - float sumCosts() const; + float sum() const; private: static Arguments* createByPaddleArgumentVector(void* ptr); diff --git a/paddle/api/test/testArguments.py b/paddle/api/test/testArguments.py index a04a805d7a..9fe44de94e 100644 --- a/paddle/api/test/testArguments.py +++ b/paddle/api/test/testArguments.py @@ -22,7 +22,7 @@ class TestArguments(unittest.TestCase): args = swig_paddle.Arguments.createArguments(1) args.setSlotValue(0, m) - self.assertAlmostEqual(27.0, args.sumCosts()) + self.assertAlmostEqual(27.0, args.sum()) mat = args.getSlotValue(0) assert isinstance(mat, swig_paddle.Matrix) diff --git a/paddle/gserver/tests/LayerGradUtil.cpp b/paddle/gserver/tests/LayerGradUtil.cpp index ae016e74ea..7617af10ba 100644 --- a/paddle/gserver/tests/LayerGradUtil.cpp +++ b/paddle/gserver/tests/LayerGradUtil.cpp @@ -24,7 +24,7 @@ real getCostSum(LayerPtr& testLayer, MatrixPtr weights) { if (weights) { outArgs[0].value->dotMul(*outArgs[0].value, *weights); } - return Argument::sumCosts(outArgs); + return Argument::sum(outArgs); } real getDiffAndPrint(real newCost1, @@ -241,7 +241,7 @@ void testBatchState(LayerPtr testLayer, std::vector args; args.push_back(out); - EXPECT_EQ(0, Argument::sumCosts(args)) << "testBatchState failed"; + EXPECT_EQ(0, Argument::sum(args)) << "testBatchState failed"; for (size_t seqId = 0; seqId < numSequences; ++seqId) { start[seqId] += seqLens[seqId]; } @@ -672,7 +672,7 @@ void testLayerGradKernel(TestConfig testConf, outArgs[0].value->dotMul(*testLayer->getOutput().value, *weights); } - real cost = Argument::sumCosts(outArgs); + real cost = Argument::sum(outArgs); LOG(INFO) << " cost " << cost; EXPECT_FALSE(std::isnan(cost)); diff --git a/paddle/parameter/Argument.h b/paddle/parameter/Argument.h index 178c068b93..9ef44be0cb 100644 --- a/paddle/parameter/Argument.h +++ b/paddle/parameter/Argument.h @@ -163,7 +163,7 @@ struct Argument { : sequenceStartPositions->getData(false); } - static inline real sumCosts(const std::vector& arguments) { + static inline real sum(const std::vector& arguments) { real cost = 0; for (auto& arg : arguments) { if (arg.value) { diff --git a/paddle/trainer/Tester.cpp b/paddle/trainer/Tester.cpp index 13aa28ae5d..80664fa877 100644 --- a/paddle/trainer/Tester.cpp +++ b/paddle/trainer/Tester.cpp @@ -208,7 +208,7 @@ real Tester::forwardOneBatch(const DataBatch& dataBatch, return 0.0; // In this case, there is no meaning to calculate cost } - return Argument::sumCosts(outArgs); + return Argument::sum(outArgs); } void Tester::testOnePassBatch(int passId) { diff --git a/paddle/trainer/Trainer.cpp b/paddle/trainer/Trainer.cpp index bd84545375..b68e29cd5e 100644 --- a/paddle/trainer/Trainer.cpp +++ b/paddle/trainer/Trainer.cpp @@ -310,7 +310,7 @@ real Trainer::checkGradient() { std::vector outArgs; trainerInternal_.getGradientMachine()->forward(inArgs, &outArgs, PASS_GC); - real cost = Argument::sumCosts(outArgs); + real cost = Argument::sum(outArgs); LOG(INFO) << "original cost=" << cost; trainerInternal_.getGradientMachine()->backward(); @@ -340,7 +340,7 @@ real Trainer::checkGradient() { parameter->getBuf(PARAMETER_VALUE)->copyFrom(newPara); parameter->setValueUpdated(); trainerInternal_.getGradientMachine()->forward(inArgs, &outArgs, PASS_GC); - real newCost1 = Argument::sumCosts(outArgs); + real newCost1 = Argument::sum(outArgs); for (size_t i = 0; i < dim; ++i) { newp[i] = oldp[i] - step * d[i]; @@ -349,7 +349,7 @@ real Trainer::checkGradient() { parameter->getBuf(PARAMETER_VALUE)->copyFrom(newPara); parameter->setValueUpdated(); trainerInternal_.getGradientMachine()->forward(inArgs, &outArgs, PASS_GC); - real newCost2 = Argument::sumCosts(outArgs); + real newCost2 = Argument::sum(outArgs); real trueDelta = 0.5 * (newCost1 - newCost2); real diff = (1e-20 + trueDelta) / (1e-20 + delta) - 1; @@ -575,7 +575,7 @@ real Trainer::calcGradient(const DataBatch& dataBatch, trainerInternal_.getGradientMachine()->forwardBackward( inArgs, &outArgs, PASS_TRAIN); - real cost = Argument::sumCosts(outArgs); + real cost = Argument::sum(outArgs); offset = 0; for (auto& para : parameters) { diff --git a/paddle/trainer/TrainerInternal.cpp b/paddle/trainer/TrainerInternal.cpp index f3b465b444..4c5d4a0913 100644 --- a/paddle/trainer/TrainerInternal.cpp +++ b/paddle/trainer/TrainerInternal.cpp @@ -134,7 +134,7 @@ void TrainerInternal::trainOneBatch(int64_t batchId, real cost = 0; { REGISTER_TIMER("sumCost"); - cost = Argument::sumCosts(*outArgs); + cost = Argument::sum(*outArgs); } if (batchId % intconfig_->log_period == 0) { From 61b4c7170a7f55bdd2c6db57b58e9d797fc3522f Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Thu, 2 Mar 2017 22:01:42 +0000 Subject: [PATCH 86/87] remove inferencer and Inference from __All__, since infer is sufficent, they are implementation detail. rename inferencer.py to inference.py --- python/paddle/v2/__init__.py | 6 +++--- python/paddle/v2/{inferencer.py => inference.py} | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) rename python/paddle/v2/{inferencer.py => inference.py} (98%) diff --git a/python/paddle/v2/__init__.py b/python/paddle/v2/__init__.py index 8ab8cd2f85..f663ef735d 100644 --- a/python/paddle/v2/__init__.py +++ b/python/paddle/v2/__init__.py @@ -25,14 +25,14 @@ from . import dataset from . import reader import attr import pooling -import inferencer +import inference import networks import py_paddle.swig_paddle as api __all__ = [ 'optimizer', 'layer', 'activation', 'parameters', 'init', 'trainer', 'event', 'data_type', 'attr', 'pooling', 'data_feeder', 'dataset', 'reader', - 'topology', 'networks', 'inferencer', 'infer' + 'topology', 'networks', 'infer' ] @@ -44,4 +44,4 @@ def init(**kwargs): api.initPaddle(*args) -infer = inferencer.infer +infer = inference.infer diff --git a/python/paddle/v2/inferencer.py b/python/paddle/v2/inference.py similarity index 98% rename from python/paddle/v2/inferencer.py rename to python/paddle/v2/inference.py index ac03b016c9..476fd3fa45 100644 --- a/python/paddle/v2/inferencer.py +++ b/python/paddle/v2/inference.py @@ -5,7 +5,7 @@ from data_feeder import DataFeeder import itertools import numpy -__all__ = ['Inference', 'infer'] +__all__ = ['infer'] class Inference(object): From d6c6a996b1d872a4ee845ee662364a7974635f73 Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Sat, 4 Mar 2017 02:28:53 +0000 Subject: [PATCH 87/87] dataset reader for wmt14 example usage: import paddle.v2 as paddle if __name__ == '__main__': dict_en, dict_fr = paddle.dataset.wmt14.build_dict() train = paddle.dataset.wmt14.train(dict_en, dict_fr) test = paddle.dataset.wmt14.test(dict_en, dict_fr) total_train = 0 for i in train(): total_train += 1 total_test = 0 for i in test(): total_test += 1 print total_train, total_test --- python/paddle/v2/dataset/__init__.py | 3 +- python/paddle/v2/dataset/wmt14.py | 142 +++++++++++++++++++++++++++ 2 files changed, 144 insertions(+), 1 deletion(-) create mode 100644 python/paddle/v2/dataset/wmt14.py diff --git a/python/paddle/v2/dataset/__init__.py b/python/paddle/v2/dataset/__init__.py index 82f11a7c41..6c371d3c9b 100644 --- a/python/paddle/v2/dataset/__init__.py +++ b/python/paddle/v2/dataset/__init__.py @@ -20,8 +20,9 @@ import movielens import conll05 import uci_housing import sentiment +import wmt14 __all__ = [ 'mnist', 'imikolov', 'imdb', 'cifar', 'movielens', 'conll05', 'sentiment' - 'uci_housing' + 'uci_housing', 'wmt14' ] diff --git a/python/paddle/v2/dataset/wmt14.py b/python/paddle/v2/dataset/wmt14.py new file mode 100644 index 0000000000..9904848b5d --- /dev/null +++ b/python/paddle/v2/dataset/wmt14.py @@ -0,0 +1,142 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +wmt14 dataset +""" +import paddle.v2.dataset.common +import tarfile +import os.path +import itertools + +__all__ = ['train', 'test', 'build_dict'] + +URL_DEV_TEST = 'http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/data/dev+test.tgz' +MD5_DEV_TEST = '7d7897317ddd8ba0ae5c5fa7248d3ff5' +URL_TRAIN = 'http://localhost:8000/train.tgz' +MD5_TRAIN = '72de99da2830ea5a3a2c4eb36092bbc7' + + +def word_count(f, word_freq=None): + add = paddle.v2.dataset.common.dict_add + if word_freq == None: + word_freq = {} + + for l in f: + for w in l.strip().split(): + add(word_freq, w) + add(word_freq, '') + add(word_freq, '') + + return word_freq + + +def get_word_dix(word_freq): + TYPO_FREQ = 50 + word_freq = filter(lambda x: x[1] > TYPO_FREQ, word_freq.items()) + word_freq_sorted = sorted(word_freq, key=lambda x: (-x[1], x[0])) + words, _ = list(zip(*word_freq_sorted)) + word_idx = dict(zip(words, xrange(len(words)))) + word_idx[''] = len(words) + return word_idx + + +def get_word_freq(train, dev): + word_freq = word_count(train, word_count(dev)) + if '' in word_freq: + # remove for now, since we will set it as last index + del word_freq[''] + return word_freq + + +def build_dict(): + base_dir = './wmt14-data' + train_en_filename = base_dir + '/train/train.en' + train_fr_filename = base_dir + '/train/train.fr' + dev_en_filename = base_dir + '/dev/ntst1213.en' + dev_fr_filename = base_dir + '/dev/ntst1213.fr' + + if not os.path.exists(train_en_filename) or not os.path.exists( + train_fr_filename): + with tarfile.open( + paddle.v2.dataset.common.download(URL_TRAIN, 'wmt14', + MD5_TRAIN)) as tf: + tf.extractall(base_dir) + + if not os.path.exists(dev_en_filename) or not os.path.exists( + dev_fr_filename): + with tarfile.open( + paddle.v2.dataset.common.download(URL_DEV_TEST, 'wmt14', + MD5_DEV_TEST)) as tf: + tf.extractall(base_dir) + + f_en = open(train_en_filename) + f_fr = open(train_fr_filename) + f_en_dev = open(dev_en_filename) + f_fr_dev = open(dev_fr_filename) + + word_freq_en = get_word_freq(f_en, f_en_dev) + word_freq_fr = get_word_freq(f_fr, f_fr_dev) + + f_en.close() + f_fr.close() + f_en_dev.close() + f_fr_dev.close() + + return get_word_dix(word_freq_en), get_word_dix(word_freq_fr) + + +def reader_creator(directory, path_en, path_fr, URL, MD5, dict_en, dict_fr): + def reader(): + if not os.path.exists(path_en) or not os.path.exists(path_fr): + with tarfile.open( + paddle.v2.dataset.common.download(URL, 'wmt14', MD5)) as tf: + tf.extractall(directory) + + f_en = open(path_en) + f_fr = open(path_fr) + UNK_en = dict_en[''] + UNK_fr = dict_fr[''] + + for en, fr in itertools.izip(f_en, f_fr): + src_ids = [dict_en.get(w, UNK_en) for w in en.strip().split()] + tar_ids = [ + dict_fr.get(w, UNK_fr) + for w in [''] + fr.strip().split() + [''] + ] + + # remove sequence whose length > 80 in training mode + if len(src_ids) == 0 or len(tar_ids) <= 1 or len( + src_ids) > 80 or len(tar_ids) > 80: + continue + + yield src_ids, tar_ids[:-1], tar_ids[1:] + + f_en.close() + f_fr.close() + + return reader + + +def train(dict_en, dict_fr): + directory = './wmt14-data' + return reader_creator(directory, directory + '/train/train.en', + directory + '/train/train.fr', URL_TRAIN, MD5_TRAIN, + dict_en, dict_fr) + + +def test(dict_en, dict_fr): + directory = './wmt14-data' + return reader_creator(directory, directory + '/dev/ntst1213.en', + directory + '/dev/ntst1213.fr', URL_DEV_TEST, + MD5_DEV_TEST, dict_en, dict_fr)