Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into prior_box
commit
f7c0ad9d35
@ -0,0 +1,66 @@
|
|||||||
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import paddle.v2.dataset.wmt16
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
|
||||||
|
class TestWMT16(unittest.TestCase):
|
||||||
|
def checkout_one_sample(self, sample):
|
||||||
|
# train data has 3 field: source language word indices,
|
||||||
|
# target language word indices, and target next word indices.
|
||||||
|
self.assertEqual(len(sample), 3)
|
||||||
|
|
||||||
|
# test start mark and end mark in source word indices.
|
||||||
|
self.assertEqual(sample[0][0], 0)
|
||||||
|
self.assertEqual(sample[0][-1], 1)
|
||||||
|
|
||||||
|
# test start mask in target word indices
|
||||||
|
self.assertEqual(sample[1][0], 0)
|
||||||
|
|
||||||
|
# test en mask in target next word indices
|
||||||
|
self.assertEqual(sample[2][-1], 1)
|
||||||
|
|
||||||
|
def test_train(self):
|
||||||
|
for idx, sample in enumerate(
|
||||||
|
paddle.v2.dataset.wmt16.train(
|
||||||
|
src_dict_size=100000, trg_dict_size=100000)()):
|
||||||
|
if idx >= 10: break
|
||||||
|
self.checkout_one_sample(sample)
|
||||||
|
|
||||||
|
def test_test(self):
|
||||||
|
for idx, sample in enumerate(
|
||||||
|
paddle.v2.dataset.wmt16.test(
|
||||||
|
src_dict_size=1000, trg_dict_size=1000)()):
|
||||||
|
if idx >= 10: break
|
||||||
|
self.checkout_one_sample(sample)
|
||||||
|
|
||||||
|
def test_val(self):
|
||||||
|
for idx, sample in enumerate(
|
||||||
|
paddle.v2.dataset.wmt16.validation(
|
||||||
|
src_dict_size=1000, trg_dict_size=1000)()):
|
||||||
|
if idx >= 10: break
|
||||||
|
self.checkout_one_sample(sample)
|
||||||
|
|
||||||
|
def test_get_dict(self):
|
||||||
|
dict_size = 1000
|
||||||
|
word_dict = paddle.v2.dataset.wmt16.get_dict("en", dict_size, True)
|
||||||
|
self.assertEqual(len(word_dict), dict_size)
|
||||||
|
self.assertEqual(word_dict[0], "<s>")
|
||||||
|
self.assertEqual(word_dict[1], "<e>")
|
||||||
|
self.assertEqual(word_dict[2], "<unk>")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
unittest.main()
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,11 @@
|
|||||||
|
file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py")
|
||||||
|
string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
|
||||||
|
|
||||||
|
list(REMOVE_ITEM TEST_OPS test_memopt_image_classification_train)
|
||||||
|
py_test(test_memopt_image_classification_train_resnet SRCS test_memopt_image_classification_train.py ARGS resnet)
|
||||||
|
py_test(test_memopt_image_classification_train_vgg SRCS test_memopt_image_classification_train.py ARGS vgg)
|
||||||
|
|
||||||
|
# default test
|
||||||
|
foreach(src ${TEST_OPS})
|
||||||
|
py_test(${src} SRCS ${src}.py)
|
||||||
|
endforeach()
|
@ -0,0 +1,44 @@
|
|||||||
|
import numpy as np
|
||||||
|
import paddle.v2 as paddle
|
||||||
|
import paddle.v2.fluid as fluid
|
||||||
|
|
||||||
|
x = fluid.layers.data(name='x', shape=[13], dtype='float32')
|
||||||
|
|
||||||
|
y_predict = fluid.layers.fc(input=x, size=1, act=None)
|
||||||
|
|
||||||
|
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
|
||||||
|
|
||||||
|
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
|
||||||
|
avg_cost = fluid.layers.mean(x=cost)
|
||||||
|
|
||||||
|
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1)
|
||||||
|
sgd_optimizer.minimize(avg_cost)
|
||||||
|
|
||||||
|
# memopt_program = fluid.default_main_program()
|
||||||
|
memopt_program = fluid.memory_optimize(fluid.default_main_program())
|
||||||
|
|
||||||
|
BATCH_SIZE = 200
|
||||||
|
|
||||||
|
train_reader = paddle.batch(
|
||||||
|
paddle.reader.shuffle(
|
||||||
|
paddle.dataset.uci_housing.train(), buf_size=500),
|
||||||
|
batch_size=BATCH_SIZE)
|
||||||
|
|
||||||
|
place = fluid.CPUPlace()
|
||||||
|
feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
|
||||||
|
exe = fluid.Executor(place)
|
||||||
|
|
||||||
|
exe.run(fluid.default_startup_program())
|
||||||
|
|
||||||
|
PASS_NUM = 100
|
||||||
|
for pass_id in range(PASS_NUM):
|
||||||
|
fluid.io.save_persistables(exe, "./fit_a_line.model/")
|
||||||
|
fluid.io.load_persistables(exe, "./fit_a_line.model/")
|
||||||
|
for data in train_reader():
|
||||||
|
avg_loss_value, = exe.run(memopt_program,
|
||||||
|
feed=feeder.feed(data),
|
||||||
|
fetch_list=[avg_cost])
|
||||||
|
|
||||||
|
if avg_loss_value[0] < 10.0:
|
||||||
|
exit(0) # if avg cost less than 10.0, we think our code is good.
|
||||||
|
exit(1)
|
@ -0,0 +1,133 @@
|
|||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import paddle.v2 as paddle
|
||||||
|
import paddle.v2.fluid as fluid
|
||||||
|
|
||||||
|
|
||||||
|
def resnet_cifar10(input, depth=32):
|
||||||
|
def conv_bn_layer(input, ch_out, filter_size, stride, padding, act='relu'):
|
||||||
|
tmp = fluid.layers.conv2d(
|
||||||
|
input=input,
|
||||||
|
filter_size=filter_size,
|
||||||
|
num_filters=ch_out,
|
||||||
|
stride=stride,
|
||||||
|
padding=padding,
|
||||||
|
act=None,
|
||||||
|
bias_attr=False)
|
||||||
|
return fluid.layers.batch_norm(input=tmp, act=act)
|
||||||
|
|
||||||
|
def shortcut(input, ch_in, ch_out, stride):
|
||||||
|
if ch_in != ch_out:
|
||||||
|
return conv_bn_layer(input, ch_out, 1, stride, 0, None)
|
||||||
|
else:
|
||||||
|
return input
|
||||||
|
|
||||||
|
def basicblock(input, ch_in, ch_out, stride):
|
||||||
|
tmp = conv_bn_layer(input, ch_out, 3, stride, 1)
|
||||||
|
tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None)
|
||||||
|
short = shortcut(input, ch_in, ch_out, stride)
|
||||||
|
return fluid.layers.elementwise_add(x=tmp, y=short, act='relu')
|
||||||
|
|
||||||
|
def layer_warp(block_func, input, ch_in, ch_out, count, stride):
|
||||||
|
tmp = block_func(input, ch_in, ch_out, stride)
|
||||||
|
for i in range(1, count):
|
||||||
|
tmp = block_func(tmp, ch_out, ch_out, 1)
|
||||||
|
return tmp
|
||||||
|
|
||||||
|
assert (depth - 2) % 6 == 0
|
||||||
|
n = (depth - 2) / 6
|
||||||
|
conv1 = conv_bn_layer(
|
||||||
|
input=input, ch_out=16, filter_size=3, stride=1, padding=1)
|
||||||
|
res1 = layer_warp(basicblock, conv1, 16, 16, n, 1)
|
||||||
|
res2 = layer_warp(basicblock, res1, 16, 32, n, 2)
|
||||||
|
res3 = layer_warp(basicblock, res2, 32, 64, n, 2)
|
||||||
|
pool = fluid.layers.pool2d(
|
||||||
|
input=res3, pool_size=8, pool_type='avg', pool_stride=1)
|
||||||
|
return pool
|
||||||
|
|
||||||
|
|
||||||
|
def vgg16_bn_drop(input):
|
||||||
|
def conv_block(input, num_filter, groups, dropouts):
|
||||||
|
return fluid.nets.img_conv_group(
|
||||||
|
input=input,
|
||||||
|
pool_size=2,
|
||||||
|
pool_stride=2,
|
||||||
|
conv_num_filter=[num_filter] * groups,
|
||||||
|
conv_filter_size=3,
|
||||||
|
conv_act='relu',
|
||||||
|
conv_with_batchnorm=True,
|
||||||
|
conv_batchnorm_drop_rate=dropouts,
|
||||||
|
pool_type='max')
|
||||||
|
|
||||||
|
conv1 = conv_block(input, 64, 2, [0.3, 0])
|
||||||
|
conv2 = conv_block(conv1, 128, 2, [0.4, 0])
|
||||||
|
conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0])
|
||||||
|
conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0])
|
||||||
|
conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0])
|
||||||
|
|
||||||
|
drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5)
|
||||||
|
fc1 = fluid.layers.fc(input=drop, size=512, act=None)
|
||||||
|
bn = fluid.layers.batch_norm(input=fc1, act='relu')
|
||||||
|
drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5)
|
||||||
|
fc2 = fluid.layers.fc(input=drop2, size=512, act=None)
|
||||||
|
return fc2
|
||||||
|
|
||||||
|
|
||||||
|
classdim = 10
|
||||||
|
data_shape = [3, 32, 32]
|
||||||
|
|
||||||
|
images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32')
|
||||||
|
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
|
||||||
|
|
||||||
|
net_type = "vgg"
|
||||||
|
if len(sys.argv) >= 2:
|
||||||
|
net_type = sys.argv[1]
|
||||||
|
|
||||||
|
if net_type == "vgg":
|
||||||
|
print("train vgg net")
|
||||||
|
net = vgg16_bn_drop(images)
|
||||||
|
elif net_type == "resnet":
|
||||||
|
print("train resnet")
|
||||||
|
net = resnet_cifar10(images, 32)
|
||||||
|
else:
|
||||||
|
raise ValueError("%s network is not supported" % net_type)
|
||||||
|
|
||||||
|
predict = fluid.layers.fc(input=net, size=classdim, act='softmax')
|
||||||
|
cost = fluid.layers.cross_entropy(input=predict, label=label)
|
||||||
|
avg_cost = fluid.layers.mean(x=cost)
|
||||||
|
|
||||||
|
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
|
||||||
|
opts = optimizer.minimize(avg_cost)
|
||||||
|
|
||||||
|
accuracy = fluid.evaluator.Accuracy(input=predict, label=label)
|
||||||
|
|
||||||
|
# memopt_program = fluid.default_main_program()
|
||||||
|
memopt_program = fluid.memory_optimize(fluid.default_main_program())
|
||||||
|
|
||||||
|
BATCH_SIZE = 128
|
||||||
|
PASS_NUM = 1
|
||||||
|
|
||||||
|
train_reader = paddle.batch(
|
||||||
|
paddle.reader.shuffle(
|
||||||
|
paddle.dataset.cifar.train10(), buf_size=128 * 10),
|
||||||
|
batch_size=BATCH_SIZE)
|
||||||
|
|
||||||
|
place = fluid.CPUPlace()
|
||||||
|
exe = fluid.Executor(place)
|
||||||
|
feeder = fluid.DataFeeder(place=place, feed_list=[images, label])
|
||||||
|
exe.run(fluid.default_startup_program())
|
||||||
|
|
||||||
|
for pass_id in range(PASS_NUM):
|
||||||
|
accuracy.reset(exe)
|
||||||
|
for data in train_reader():
|
||||||
|
loss, acc = exe.run(memopt_program,
|
||||||
|
feed=feeder.feed(data),
|
||||||
|
fetch_list=[avg_cost] + accuracy.metrics)
|
||||||
|
pass_acc = accuracy.eval(exe)
|
||||||
|
print("loss:" + str(loss) + " acc:" + str(acc) + " pass_acc:" + str(
|
||||||
|
pass_acc))
|
||||||
|
# this model is slow, so if we can train two mini batch, we think it works properly.
|
||||||
|
exit(0)
|
||||||
|
exit(1)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue