commit
e7c239893a
@ -0,0 +1,74 @@
|
||||
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import paddle.v2 as paddle
|
||||
|
||||
__all__ = ['resnet_cifar10']
|
||||
|
||||
|
||||
def conv_bn_layer(input,
|
||||
ch_out,
|
||||
filter_size,
|
||||
stride,
|
||||
padding,
|
||||
active_type=paddle.activation.Relu(),
|
||||
ch_in=None):
|
||||
tmp = paddle.layer.img_conv(
|
||||
input=input,
|
||||
filter_size=filter_size,
|
||||
num_channels=ch_in,
|
||||
num_filters=ch_out,
|
||||
stride=stride,
|
||||
padding=padding,
|
||||
act=paddle.activation.Linear(),
|
||||
bias_attr=False)
|
||||
return paddle.layer.batch_norm(input=tmp, act=active_type)
|
||||
|
||||
|
||||
def shortcut(ipt, n_in, n_out, stride):
|
||||
if n_in != n_out:
|
||||
return conv_bn_layer(ipt, n_out, 1, stride, 0,
|
||||
paddle.activation.Linear())
|
||||
else:
|
||||
return ipt
|
||||
|
||||
|
||||
def basicblock(ipt, ch_out, stride):
|
||||
ch_in = ch_out * 2
|
||||
tmp = conv_bn_layer(ipt, ch_out, 3, stride, 1)
|
||||
tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, paddle.activation.Linear())
|
||||
short = shortcut(ipt, ch_in, ch_out, stride)
|
||||
return paddle.layer.addto(input=[tmp, short], act=paddle.activation.Relu())
|
||||
|
||||
|
||||
def layer_warp(block_func, ipt, features, count, stride):
|
||||
tmp = block_func(ipt, features, stride)
|
||||
for i in range(1, count):
|
||||
tmp = block_func(tmp, features, 1)
|
||||
return tmp
|
||||
|
||||
|
||||
def resnet_cifar10(ipt, depth=32):
|
||||
# depth should be one of 20, 32, 44, 56, 110, 1202
|
||||
assert (depth - 2) % 6 == 0
|
||||
n = (depth - 2) / 6
|
||||
nStages = {16, 64, 128}
|
||||
conv1 = conv_bn_layer(
|
||||
ipt, ch_in=3, ch_out=16, filter_size=3, stride=1, padding=1)
|
||||
res1 = layer_warp(basicblock, conv1, 16, n, 1)
|
||||
res2 = layer_warp(basicblock, res1, 32, n, 2)
|
||||
res3 = layer_warp(basicblock, res2, 64, n, 2)
|
||||
pool = paddle.layer.img_pool(
|
||||
input=res3, pool_size=8, stride=1, pool_type=paddle.pooling.Avg())
|
||||
return pool
|
@ -0,0 +1,91 @@
|
||||
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License
|
||||
|
||||
import sys
|
||||
import paddle.v2 as paddle
|
||||
from api_v2_vgg import vgg_bn_drop
|
||||
from api_v2_resnet import resnet_cifar10
|
||||
|
||||
|
||||
def main():
|
||||
datadim = 3 * 32 * 32
|
||||
classdim = 10
|
||||
|
||||
# PaddlePaddle init
|
||||
paddle.init(use_gpu=True, trainer_count=1)
|
||||
|
||||
image = paddle.layer.data(
|
||||
name="image", type=paddle.data_type.dense_vector(datadim))
|
||||
|
||||
# Add neural network config
|
||||
# option 1. resnet
|
||||
net = resnet_cifar10(image, depth=32)
|
||||
# option 2. vgg
|
||||
# net = vgg_bn_drop(image)
|
||||
|
||||
out = paddle.layer.fc(input=net,
|
||||
size=classdim,
|
||||
act=paddle.activation.Softmax())
|
||||
|
||||
lbl = paddle.layer.data(
|
||||
name="label", type=paddle.data_type.integer_value(classdim))
|
||||
cost = paddle.layer.classification_cost(input=out, label=lbl)
|
||||
|
||||
# Create parameters
|
||||
parameters = paddle.parameters.create(cost)
|
||||
|
||||
# Create optimizer
|
||||
momentum_optimizer = paddle.optimizer.Momentum(
|
||||
momentum=0.9,
|
||||
regularization=paddle.optimizer.L2Regularization(rate=0.0002 * 128),
|
||||
learning_rate=0.1 / 128.0,
|
||||
learning_rate_decay_a=0.1,
|
||||
learning_rate_decay_b=50000 * 100,
|
||||
learning_rate_schedule='discexp',
|
||||
batch_size=128)
|
||||
|
||||
# End batch and end pass event handler
|
||||
def event_handler(event):
|
||||
if isinstance(event, paddle.event.EndIteration):
|
||||
if event.batch_id % 100 == 0:
|
||||
print "\nPass %d, Batch %d, Cost %f, %s" % (
|
||||
event.pass_id, event.batch_id, event.cost, event.metrics)
|
||||
else:
|
||||
sys.stdout.write('.')
|
||||
sys.stdout.flush()
|
||||
if isinstance(event, paddle.event.EndPass):
|
||||
result = trainer.test(
|
||||
reader=paddle.reader.batched(
|
||||
paddle.dataset.cifar.test10(), batch_size=128),
|
||||
reader_dict={'image': 0,
|
||||
'label': 1})
|
||||
print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics)
|
||||
|
||||
# Create trainer
|
||||
trainer = paddle.trainer.SGD(cost=cost,
|
||||
parameters=parameters,
|
||||
update_equation=momentum_optimizer)
|
||||
trainer.train(
|
||||
reader=paddle.reader.batched(
|
||||
paddle.reader.shuffle(
|
||||
paddle.dataset.cifar.train10(), buf_size=50000),
|
||||
batch_size=128),
|
||||
num_passes=5,
|
||||
event_handler=event_handler,
|
||||
reader_dict={'image': 0,
|
||||
'label': 1})
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,47 @@
|
||||
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import paddle.v2 as paddle
|
||||
|
||||
__all__ = ['vgg_bn_drop']
|
||||
|
||||
|
||||
def vgg_bn_drop(input):
|
||||
def conv_block(ipt, num_filter, groups, dropouts, num_channels=None):
|
||||
return paddle.networks.img_conv_group(
|
||||
input=ipt,
|
||||
num_channels=num_channels,
|
||||
pool_size=2,
|
||||
pool_stride=2,
|
||||
conv_num_filter=[num_filter] * groups,
|
||||
conv_filter_size=3,
|
||||
conv_act=paddle.activation.Relu(),
|
||||
conv_with_batchnorm=True,
|
||||
conv_batchnorm_drop_rate=dropouts,
|
||||
pool_type=paddle.pooling.Max())
|
||||
|
||||
conv1 = conv_block(input, 64, 2, [0.3, 0], 3)
|
||||
conv2 = conv_block(conv1, 128, 2, [0.4, 0])
|
||||
conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0])
|
||||
conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0])
|
||||
conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0])
|
||||
|
||||
drop = paddle.layer.dropout(input=conv5, dropout_rate=0.5)
|
||||
fc1 = paddle.layer.fc(input=drop, size=512, act=paddle.activation.Linear())
|
||||
bn = paddle.layer.batch_norm(
|
||||
input=fc1,
|
||||
act=paddle.activation.Relu(),
|
||||
layer_attr=paddle.attr.Extra(drop_rate=0.5))
|
||||
fc2 = paddle.layer.fc(input=bn, size=512, act=paddle.activation.Linear())
|
||||
return fc2
|
File diff suppressed because it is too large
Load Diff
@ -1,12 +1,16 @@
|
||||
add_test(NAME test_v2_api
|
||||
COMMAND bash ${PROJ_ROOT}/python/paddle/v2/tests/run_tests.sh ${PYTHON_EXECUTABLE})
|
||||
|
||||
add_test(NAME test_v2_layer
|
||||
COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/
|
||||
${PYTHON_EXECUTABLE} ${PROJ_ROOT}/python/paddle/v2/tests/test_layer.py
|
||||
WORKING_DIRECTORY ${PROJ_ROOT}/python/paddle)
|
||||
|
||||
add_test(NAME test_v2_api
|
||||
COMMAND bash ${PROJ_ROOT}/python/paddle/v2/tests/run_tests.sh ${PYTHON_EXECUTABLE})
|
||||
add_test(NAME test_v2_rnn_layer
|
||||
COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/
|
||||
${PYTHON_EXECUTABLE} ${PROJ_ROOT}/python/paddle/v2/tests/test_rnn_layer.py)
|
||||
|
||||
add_test(NAME topology_test
|
||||
add_test(NAME test_topology
|
||||
COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/
|
||||
${PYTHON_EXECUTABLE} ${PROJ_ROOT}/python/paddle/v2/tests/test_topology.py
|
||||
WORKING_DIRECTORY ${PROJ_ROOT}/python/paddle)
|
||||
|
@ -0,0 +1,155 @@
|
||||
# Copyright PaddlePaddle contributors. All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import difflib
|
||||
import unittest
|
||||
|
||||
import paddle.trainer_config_helpers as conf_helps
|
||||
import paddle.v2.activation as activation
|
||||
import paddle.v2.data_type as data_type
|
||||
import paddle.v2.layer as layer
|
||||
from paddle.trainer_config_helpers.config_parser_utils import \
|
||||
parse_network_config as parse_network
|
||||
|
||||
|
||||
class RNNTest(unittest.TestCase):
|
||||
def test_simple_rnn(self):
|
||||
dict_dim = 10
|
||||
word_dim = 8
|
||||
hidden_dim = 8
|
||||
|
||||
def parse_old_rnn():
|
||||
def step(y):
|
||||
mem = conf_helps.memory(name="rnn_state", size=hidden_dim)
|
||||
out = conf_helps.fc_layer(
|
||||
input=[y, mem],
|
||||
size=hidden_dim,
|
||||
act=activation.Tanh(),
|
||||
bias_attr=True,
|
||||
name="rnn_state")
|
||||
return out
|
||||
|
||||
def test():
|
||||
data = conf_helps.data_layer(name="word", size=dict_dim)
|
||||
embd = conf_helps.embedding_layer(input=data, size=word_dim)
|
||||
conf_helps.recurrent_group(name="rnn", step=step, input=embd)
|
||||
|
||||
return str(parse_network(test))
|
||||
|
||||
def parse_new_rnn():
|
||||
def new_step(y):
|
||||
mem = layer.memory(name="rnn_state", size=hidden_dim)
|
||||
out = layer.fc(input=[y, mem],
|
||||
size=hidden_dim,
|
||||
act=activation.Tanh(),
|
||||
bias_attr=True,
|
||||
name="rnn_state")
|
||||
return out
|
||||
|
||||
data = layer.data(
|
||||
name="word", type=data_type.integer_value(dict_dim))
|
||||
embd = layer.embedding(input=data, size=word_dim)
|
||||
rnn_layer = layer.recurrent_group(
|
||||
name="rnn", step=new_step, input=embd)
|
||||
return str(layer.parse_network(rnn_layer))
|
||||
|
||||
diff = difflib.unified_diff(parse_old_rnn().splitlines(1),
|
||||
parse_new_rnn().splitlines(1))
|
||||
print ''.join(diff)
|
||||
|
||||
def test_sequence_rnn_multi_input(self):
|
||||
dict_dim = 10
|
||||
word_dim = 8
|
||||
hidden_dim = 8
|
||||
label_dim = 3
|
||||
|
||||
def parse_old_rnn():
|
||||
def test():
|
||||
data = conf_helps.data_layer(name="word", size=dict_dim)
|
||||
label = conf_helps.data_layer(name="label", size=label_dim)
|
||||
emb = conf_helps.embedding_layer(input=data, size=word_dim)
|
||||
boot_layer = conf_helps.data_layer(name="boot", size=10)
|
||||
boot_layer = conf_helps.fc_layer(
|
||||
name='boot_fc', input=boot_layer, size=10)
|
||||
|
||||
def step(y, wid):
|
||||
z = conf_helps.embedding_layer(input=wid, size=word_dim)
|
||||
mem = conf_helps.memory(
|
||||
name="rnn_state",
|
||||
size=hidden_dim,
|
||||
boot_layer=boot_layer)
|
||||
out = conf_helps.fc_layer(
|
||||
input=[y, z, mem],
|
||||
size=hidden_dim,
|
||||
act=conf_helps.TanhActivation(),
|
||||
bias_attr=True,
|
||||
name="rnn_state")
|
||||
return out
|
||||
|
||||
out = conf_helps.recurrent_group(
|
||||
name="rnn", step=step, input=[emb, data])
|
||||
|
||||
rep = conf_helps.last_seq(input=out)
|
||||
prob = conf_helps.fc_layer(
|
||||
size=label_dim,
|
||||
input=rep,
|
||||
act=conf_helps.SoftmaxActivation(),
|
||||
bias_attr=True)
|
||||
|
||||
conf_helps.outputs(
|
||||
conf_helps.classification_cost(
|
||||
input=prob, label=label))
|
||||
|
||||
return str(parse_network(test))
|
||||
|
||||
def parse_new_rnn():
|
||||
data = layer.data(
|
||||
name="word", type=data_type.dense_vector(dict_dim))
|
||||
label = layer.data(
|
||||
name="label", type=data_type.dense_vector(label_dim))
|
||||
emb = layer.embedding(input=data, size=word_dim)
|
||||
boot_layer = layer.data(
|
||||
name="boot", type=data_type.dense_vector(10))
|
||||
boot_layer = layer.fc(name='boot_fc', input=boot_layer, size=10)
|
||||
|
||||
def step(y, wid):
|
||||
z = layer.embedding(input=wid, size=word_dim)
|
||||
mem = layer.memory(
|
||||
name="rnn_state", size=hidden_dim, boot_layer=boot_layer)
|
||||
out = layer.fc(input=[y, z, mem],
|
||||
size=hidden_dim,
|
||||
act=activation.Tanh(),
|
||||
bias_attr=True,
|
||||
name="rnn_state")
|
||||
return out
|
||||
|
||||
out = layer.recurrent_group(
|
||||
name="rnn", step=step, input=[emb, data])
|
||||
|
||||
rep = layer.last_seq(input=out)
|
||||
prob = layer.fc(size=label_dim,
|
||||
input=rep,
|
||||
act=activation.Softmax(),
|
||||
bias_attr=True)
|
||||
|
||||
cost = layer.classification_cost(input=prob, label=label)
|
||||
|
||||
return str(layer.parse_network(cost))
|
||||
|
||||
diff = difflib.unified_diff(parse_old_rnn().splitlines(1),
|
||||
parse_new_rnn().splitlines(1))
|
||||
print ''.join(diff)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
Loading…
Reference in new issue