Merge branch 'master' of https://github.com/PaddlePaddle/Paddle into develop
commit
94e2dcb148
@ -0,0 +1,52 @@
|
|||||||
|
import paddle.v2 as paddle
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
# init paddle
|
||||||
|
paddle.init(use_gpu=False)
|
||||||
|
|
||||||
|
# network config
|
||||||
|
x = paddle.layer.data(name='x', type=paddle.data_type.dense_vector(2))
|
||||||
|
y_predict = paddle.layer.fc(input=x, size=1, act=paddle.activation.Linear())
|
||||||
|
y = paddle.layer.data(name='y', type=paddle.data_type.dense_vector(1))
|
||||||
|
cost = paddle.layer.mse_cost(input=y_predict, label=y)
|
||||||
|
|
||||||
|
# create parameters
|
||||||
|
parameters = paddle.parameters.create(cost)
|
||||||
|
# create optimizer
|
||||||
|
optimizer = paddle.optimizer.Momentum(momentum=0)
|
||||||
|
# create trainer
|
||||||
|
trainer = paddle.trainer.SGD(cost=cost,
|
||||||
|
parameters=parameters,
|
||||||
|
update_equation=optimizer)
|
||||||
|
|
||||||
|
|
||||||
|
# event_handler to print training info
|
||||||
|
def event_handler(event):
|
||||||
|
if isinstance(event, paddle.event.EndIteration):
|
||||||
|
if event.batch_id % 1 == 0:
|
||||||
|
print "Pass %d, Batch %d, Cost %f" % (event.pass_id, event.batch_id,
|
||||||
|
event.cost)
|
||||||
|
|
||||||
|
|
||||||
|
# define training dataset reader
|
||||||
|
def train_reader():
|
||||||
|
train_x = np.array([[1, 1], [1, 2], [3, 4], [5, 2]])
|
||||||
|
train_y = np.array([-2, -3, -7, -7])
|
||||||
|
|
||||||
|
def reader():
|
||||||
|
for i in xrange(train_y.shape[0]):
|
||||||
|
yield train_x[i], train_y[i]
|
||||||
|
|
||||||
|
return reader
|
||||||
|
|
||||||
|
|
||||||
|
# define feeding map
|
||||||
|
feeding = {'x': 0, 'y': 1}
|
||||||
|
|
||||||
|
# training
|
||||||
|
trainer.train(
|
||||||
|
reader=paddle.batch(
|
||||||
|
train_reader(), batch_size=1),
|
||||||
|
feeding=feeding,
|
||||||
|
event_handler=event_handler,
|
||||||
|
num_passes=100)
|
@ -1,68 +0,0 @@
|
|||||||
graph pp_topology {
|
|
||||||
rankdir=BT;
|
|
||||||
subgraph cluster_node0 {
|
|
||||||
style=filled;
|
|
||||||
color=lightgrey;
|
|
||||||
node [style=filled, color=white, shape=box];
|
|
||||||
label = "机器0"
|
|
||||||
|
|
||||||
pserver0 [label="Parameter \n Server 0"]
|
|
||||||
trainer0 [label="Trainer 0"]
|
|
||||||
}
|
|
||||||
subgraph cluster_node1 {
|
|
||||||
style=filled;
|
|
||||||
color=lightgrey;
|
|
||||||
node [style=filled, color=white, shape=box];
|
|
||||||
label = "机器1"
|
|
||||||
|
|
||||||
pserver1 [label="Parameter \n Server 1"]
|
|
||||||
trainer1 [label="Trainer 1"]
|
|
||||||
}
|
|
||||||
|
|
||||||
subgraph cluster_node2 {
|
|
||||||
style=filled;
|
|
||||||
color=lightgrey;
|
|
||||||
node [style=filled, color=white, shape=box];
|
|
||||||
label = "机器2"
|
|
||||||
|
|
||||||
pserver2 [label="Parameter \n Server 2"]
|
|
||||||
trainer2 [label="Trainer 2"]
|
|
||||||
}
|
|
||||||
|
|
||||||
subgraph cluster_node3 {
|
|
||||||
style=filled;
|
|
||||||
color=lightgrey;
|
|
||||||
node [style=filled, color=white, shape=box];
|
|
||||||
label = "机器3"
|
|
||||||
|
|
||||||
pserver3 [label="Parameter \n Server 3"]
|
|
||||||
trainer3 [label="Trainer 3"]
|
|
||||||
}
|
|
||||||
|
|
||||||
data [label="数据", shape=hexagon]
|
|
||||||
|
|
||||||
trainer0 -- pserver0
|
|
||||||
trainer0 -- pserver1
|
|
||||||
trainer0 -- pserver2
|
|
||||||
trainer0 -- pserver3
|
|
||||||
|
|
||||||
trainer1 -- pserver0
|
|
||||||
trainer1 -- pserver1
|
|
||||||
trainer1 -- pserver2
|
|
||||||
trainer1 -- pserver3
|
|
||||||
|
|
||||||
trainer2 -- pserver0
|
|
||||||
trainer2 -- pserver1
|
|
||||||
trainer2 -- pserver2
|
|
||||||
trainer2 -- pserver3
|
|
||||||
|
|
||||||
trainer3 -- pserver0
|
|
||||||
trainer3 -- pserver1
|
|
||||||
trainer3 -- pserver2
|
|
||||||
trainer3 -- pserver3
|
|
||||||
|
|
||||||
data -- trainer0
|
|
||||||
data -- trainer1
|
|
||||||
data -- trainer2
|
|
||||||
data -- trainer3
|
|
||||||
}
|
|
@ -1,29 +0,0 @@
|
|||||||
from paddle.trainer_config_helpers import *
|
|
||||||
|
|
||||||
define_py_data_sources2(
|
|
||||||
train_list='train.list',
|
|
||||||
test_list='test.list',
|
|
||||||
module='provider',
|
|
||||||
obj='process')
|
|
||||||
settings(
|
|
||||||
batch_size=128,
|
|
||||||
learning_rate=1e-3,
|
|
||||||
learning_method=AdamOptimizer(),
|
|
||||||
regularization=L2Regularization(0.5))
|
|
||||||
|
|
||||||
img = data_layer(name='pixel', size=28 * 28)
|
|
||||||
|
|
||||||
hidden1 = simple_img_conv_pool(
|
|
||||||
input=img, filter_size=3, num_filters=32, pool_size=3, num_channel=1)
|
|
||||||
|
|
||||||
hidden2 = fc_layer(
|
|
||||||
input=hidden1,
|
|
||||||
size=200,
|
|
||||||
act=TanhActivation(),
|
|
||||||
layer_attr=ExtraAttr(drop_rate=0.5))
|
|
||||||
predict = fc_layer(input=hidden2, size=10, act=SoftmaxActivation())
|
|
||||||
|
|
||||||
outputs(
|
|
||||||
classification_cost(
|
|
||||||
input=predict, label=data_layer(
|
|
||||||
name='label', size=10)))
|
|
Loading…
Reference in new issue