commit
f54efd027e
@ -0,0 +1,103 @@
|
|||||||
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import argparse
|
||||||
|
import time
|
||||||
|
import math
|
||||||
|
|
||||||
|
import paddle
|
||||||
|
import paddle.fluid as fluid
|
||||||
|
import paddle.fluid.profiler as profiler
|
||||||
|
from paddle.fluid import core
|
||||||
|
import unittest
|
||||||
|
from multiprocessing import Process
|
||||||
|
import os
|
||||||
|
import signal
|
||||||
|
from functools import reduce
|
||||||
|
from test_dist_base import TestDistRunnerBase, runtime_main
|
||||||
|
|
||||||
|
DTYPE = "float32"
|
||||||
|
paddle.dataset.mnist.fetch()
|
||||||
|
|
||||||
|
# Fix seed for test
|
||||||
|
fluid.default_startup_program().random_seed = 1
|
||||||
|
fluid.default_main_program().random_seed = 1
|
||||||
|
|
||||||
|
|
||||||
|
def cnn_model(data):
|
||||||
|
conv_pool_1 = fluid.nets.simple_img_conv_pool(
|
||||||
|
input=data,
|
||||||
|
filter_size=5,
|
||||||
|
num_filters=20,
|
||||||
|
pool_size=2,
|
||||||
|
pool_stride=2,
|
||||||
|
act="relu",
|
||||||
|
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant()))
|
||||||
|
conv_pool_2 = fluid.nets.simple_img_conv_pool(
|
||||||
|
input=conv_pool_1,
|
||||||
|
filter_size=5,
|
||||||
|
num_filters=50,
|
||||||
|
pool_size=2,
|
||||||
|
pool_stride=2,
|
||||||
|
act="relu",
|
||||||
|
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant()))
|
||||||
|
|
||||||
|
SIZE = 10
|
||||||
|
input_shape = conv_pool_2.shape
|
||||||
|
param_shape = [reduce(lambda a, b: a * b, input_shape[1:], 1)] + [SIZE]
|
||||||
|
scale = (2.0 / (param_shape[0]**2 * SIZE))**0.5
|
||||||
|
|
||||||
|
predict = fluid.layers.fc(
|
||||||
|
input=conv_pool_2,
|
||||||
|
size=SIZE,
|
||||||
|
act="softmax",
|
||||||
|
param_attr=fluid.param_attr.ParamAttr(
|
||||||
|
initializer=fluid.initializer.NormalInitializer(
|
||||||
|
loc=0.0, scale=scale, seed=1)))
|
||||||
|
return predict
|
||||||
|
|
||||||
|
|
||||||
|
class TestDistMnist2x2(TestDistRunnerBase):
|
||||||
|
def get_model(self, batch_size=2):
|
||||||
|
# Input data
|
||||||
|
images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE)
|
||||||
|
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
|
||||||
|
|
||||||
|
# Train program
|
||||||
|
predict = cnn_model(images)
|
||||||
|
cost = fluid.layers.cross_entropy(input=predict, label=label)
|
||||||
|
avg_cost = fluid.layers.mean(x=cost)
|
||||||
|
|
||||||
|
# Evaluator
|
||||||
|
batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
|
||||||
|
batch_acc = fluid.layers.accuracy(
|
||||||
|
input=predict, label=label, total=batch_size_tensor)
|
||||||
|
|
||||||
|
inference_program = fluid.default_main_program().clone()
|
||||||
|
# Optimization
|
||||||
|
opt = fluid.optimizer.AdamOptimizer(
|
||||||
|
learning_rate=0.001, beta1=0.9, beta2=0.999)
|
||||||
|
|
||||||
|
# Reader
|
||||||
|
train_reader = paddle.batch(
|
||||||
|
paddle.dataset.mnist.train(), batch_size=batch_size)
|
||||||
|
test_reader = paddle.batch(
|
||||||
|
paddle.dataset.mnist.test(), batch_size=batch_size)
|
||||||
|
opt.minimize(avg_cost)
|
||||||
|
return inference_program, avg_cost, train_reader, test_reader, batch_acc, predict
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
runtime_main(TestDistMnist2x2)
|
@ -0,0 +1,119 @@
|
|||||||
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import argparse
|
||||||
|
import time
|
||||||
|
import math
|
||||||
|
import paddle
|
||||||
|
import paddle.fluid as fluid
|
||||||
|
import paddle.fluid.profiler as profiler
|
||||||
|
from paddle.fluid import core
|
||||||
|
import unittest
|
||||||
|
from multiprocessing import Process
|
||||||
|
import os
|
||||||
|
import signal
|
||||||
|
from test_dist_base import TestDistRunnerBase, runtime_main
|
||||||
|
|
||||||
|
IS_SPARSE = True
|
||||||
|
EMBED_SIZE = 32
|
||||||
|
HIDDEN_SIZE = 256
|
||||||
|
N = 5
|
||||||
|
|
||||||
|
# Fix seed for test
|
||||||
|
fluid.default_startup_program().random_seed = 1
|
||||||
|
fluid.default_main_program().random_seed = 1
|
||||||
|
|
||||||
|
|
||||||
|
class TestDistWord2vec2x2(TestDistRunnerBase):
|
||||||
|
def get_model(self, batch_size=2):
|
||||||
|
BATCH_SIZE = batch_size
|
||||||
|
|
||||||
|
def __network__(words):
|
||||||
|
embed_first = fluid.layers.embedding(
|
||||||
|
input=words[0],
|
||||||
|
size=[dict_size, EMBED_SIZE],
|
||||||
|
dtype='float32',
|
||||||
|
is_sparse=IS_SPARSE,
|
||||||
|
param_attr=fluid.ParamAttr(
|
||||||
|
name='shared_w', initializer=fluid.initializer.Constant()))
|
||||||
|
embed_second = fluid.layers.embedding(
|
||||||
|
input=words[1],
|
||||||
|
size=[dict_size, EMBED_SIZE],
|
||||||
|
dtype='float32',
|
||||||
|
is_sparse=IS_SPARSE,
|
||||||
|
param_attr=fluid.ParamAttr(
|
||||||
|
name='shared_w', initializer=fluid.initializer.Constant()))
|
||||||
|
embed_third = fluid.layers.embedding(
|
||||||
|
input=words[2],
|
||||||
|
size=[dict_size, EMBED_SIZE],
|
||||||
|
dtype='float32',
|
||||||
|
is_sparse=IS_SPARSE,
|
||||||
|
param_attr=fluid.ParamAttr(
|
||||||
|
name='shared_w', initializer=fluid.initializer.Constant()))
|
||||||
|
embed_forth = fluid.layers.embedding(
|
||||||
|
input=words[3],
|
||||||
|
size=[dict_size, EMBED_SIZE],
|
||||||
|
dtype='float32',
|
||||||
|
is_sparse=IS_SPARSE,
|
||||||
|
param_attr=fluid.ParamAttr(
|
||||||
|
name='shared_w', initializer=fluid.initializer.Constant()))
|
||||||
|
|
||||||
|
concat_embed = fluid.layers.concat(
|
||||||
|
input=[embed_first, embed_second, embed_third, embed_forth],
|
||||||
|
axis=1)
|
||||||
|
hidden1 = fluid.layers.fc(
|
||||||
|
input=concat_embed,
|
||||||
|
size=HIDDEN_SIZE,
|
||||||
|
act='sigmoid',
|
||||||
|
param_attr=fluid.ParamAttr(
|
||||||
|
initializer=fluid.initializer.Constant()))
|
||||||
|
predict_word = fluid.layers.fc(
|
||||||
|
input=hidden1,
|
||||||
|
size=dict_size,
|
||||||
|
act='softmax',
|
||||||
|
param_attr=fluid.ParamAttr(
|
||||||
|
initializer=fluid.initializer.Constant()))
|
||||||
|
cost = fluid.layers.cross_entropy(
|
||||||
|
input=predict_word, label=words[4])
|
||||||
|
avg_cost = fluid.layers.mean(cost)
|
||||||
|
return avg_cost, predict_word
|
||||||
|
|
||||||
|
word_dict = paddle.dataset.imikolov.build_dict()
|
||||||
|
dict_size = len(word_dict)
|
||||||
|
|
||||||
|
first_word = fluid.layers.data(name='firstw', shape=[1], dtype='int64')
|
||||||
|
second_word = fluid.layers.data(
|
||||||
|
name='secondw', shape=[1], dtype='int64')
|
||||||
|
third_word = fluid.layers.data(name='thirdw', shape=[1], dtype='int64')
|
||||||
|
forth_word = fluid.layers.data(name='forthw', shape=[1], dtype='int64')
|
||||||
|
next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64')
|
||||||
|
avg_cost, predict_word = __network__(
|
||||||
|
[first_word, second_word, third_word, forth_word, next_word])
|
||||||
|
|
||||||
|
inference_program = paddle.fluid.default_main_program().clone()
|
||||||
|
|
||||||
|
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
|
||||||
|
sgd_optimizer.minimize(avg_cost)
|
||||||
|
|
||||||
|
train_reader = paddle.batch(
|
||||||
|
paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE)
|
||||||
|
test_reader = paddle.batch(
|
||||||
|
paddle.dataset.imikolov.test(word_dict, N), BATCH_SIZE)
|
||||||
|
|
||||||
|
return inference_program, avg_cost, train_reader, test_reader, None, predict_word
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
runtime_main(TestDistWord2vec2x2)
|
Loading…
Reference in new issue