commit
d5c697e603
@ -0,0 +1,100 @@
|
||||
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from py_paddle import swig_paddle
|
||||
from py_paddle import DataProviderConverter
|
||||
import data_type
|
||||
|
||||
__all__ = ['DataFeeder']
|
||||
|
||||
|
||||
class DataFeeder(DataProviderConverter):
|
||||
"""
|
||||
DataFeeder converts the data returned by paddle.reader into a data structure
|
||||
of Arguments which is defined in the API. The paddle.reader usually returns
|
||||
a list of mini-batch data entries. Each data entry in the list is one sampe.
|
||||
Each sample is a list or a tuple with one feature or multiple features.
|
||||
DataFeeder converts this mini-batch data entries into Arguments in order
|
||||
to feed it to C++ interface.
|
||||
|
||||
The example usage:
|
||||
|
||||
data_types = [('image', paddle.data_type.dense_vector(784)),
|
||||
('label', paddle.data_type.integer_value(10))]
|
||||
reader_dict = {'image':0, 'label':1}
|
||||
feeder = DataFeeder(data_types=data_types, reader_dict=reader_dict)
|
||||
minibatch_data = [
|
||||
( [1.0,2.0,3.0,4.0], 5, [6,7,8] ), # first sample
|
||||
( [1.0,2.0,3.0,4.0], 5, [6,7,8] ) # second sample
|
||||
]
|
||||
# or minibatch_data = [
|
||||
# [ [1.0,2.0,3.0,4.0], 5, [6,7,8] ], # first sample
|
||||
# [ [1.0,2.0,3.0,4.0], 5, [6,7,8] ] # second sample
|
||||
# ]
|
||||
arg = feeder(minibatch_data)
|
||||
"""
|
||||
|
||||
def __init__(self, data_types, reader_dict):
|
||||
"""
|
||||
:param data_types: A list to specify data name and type. Each item is
|
||||
a tuple of (data_name, data_type). For example:
|
||||
[('image', paddle.data_type.dense_vector(784)),
|
||||
('label', paddle.data_type.integer_value(10))]
|
||||
|
||||
:type data_types: A list of tuple
|
||||
:param reader_dict: A dictionary to specify the position of each data
|
||||
in the input data.
|
||||
:type reader_dict: dict()
|
||||
"""
|
||||
self.input_names = []
|
||||
input_types = []
|
||||
self.reader_dict = reader_dict
|
||||
for each in data_types:
|
||||
self.input_names.append(each[0])
|
||||
assert isinstance(each[1], data_type.InputType)
|
||||
input_types.append(each[1])
|
||||
DataProviderConverter.__init__(self, input_types)
|
||||
|
||||
def convert(self, dat, argument=None):
|
||||
"""
|
||||
:param dat: A list of mini-batch data. Each sample is a list or tuple
|
||||
one feature or multiple features.
|
||||
for example:
|
||||
[
|
||||
([0.2, 0.2], ), # first sample
|
||||
([0.8, 0.3], ), # second sample
|
||||
]
|
||||
or,
|
||||
[
|
||||
[[0.2, 0.2], ], # first sample
|
||||
[[0.8, 0.3], ], # second sample
|
||||
]
|
||||
|
||||
:type dat: List
|
||||
:param argument: An Arguments object contains this mini-batch data with
|
||||
one or multiple features. The Arguments definition is
|
||||
in the API.
|
||||
:type argument: swig_paddle.Arguments
|
||||
"""
|
||||
|
||||
def reorder_data(data):
|
||||
retv = []
|
||||
for each in data:
|
||||
reorder = []
|
||||
for name in self.input_names:
|
||||
reorder.append(each[self.reader_dict[name]])
|
||||
retv.append(reorder)
|
||||
return retv
|
||||
|
||||
return DataProviderConverter.convert(self, reorder_data(dat), argument)
|
@ -0,0 +1,8 @@
|
||||
import os
|
||||
|
||||
__all__ = ['DATA_HOME']
|
||||
|
||||
DATA_HOME = os.path.expanduser('~/.cache/paddle_data_set')
|
||||
|
||||
if not os.path.exists(DATA_HOME):
|
||||
os.makedirs(DATA_HOME)
|
@ -0,0 +1,39 @@
|
||||
import sklearn.datasets.mldata
|
||||
import sklearn.model_selection
|
||||
import numpy
|
||||
from config import DATA_HOME
|
||||
|
||||
__all__ = ['train_creator', 'test_creator']
|
||||
|
||||
|
||||
def __mnist_reader_creator__(data, target):
|
||||
def reader():
|
||||
n_samples = data.shape[0]
|
||||
for i in xrange(n_samples):
|
||||
yield (data[i] / 255.0).astype(numpy.float32), int(target[i])
|
||||
|
||||
return reader
|
||||
|
||||
|
||||
TEST_SIZE = 10000
|
||||
|
||||
data = sklearn.datasets.mldata.fetch_mldata(
|
||||
"MNIST original", data_home=DATA_HOME)
|
||||
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
|
||||
data.data, data.target, test_size=TEST_SIZE, random_state=0)
|
||||
|
||||
|
||||
def train_creator():
|
||||
return __mnist_reader_creator__(X_train, y_train)
|
||||
|
||||
|
||||
def test_creator():
|
||||
return __mnist_reader_creator__(X_test, y_test)
|
||||
|
||||
|
||||
def unittest():
|
||||
assert len(list(test_creator()())) == TEST_SIZE
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest()
|
@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
pushd `dirname $0` > /dev/null
|
||||
SCRIPTPATH=$PWD
|
||||
popd > /dev/null
|
||||
|
||||
cd $SCRIPTPATH
|
||||
|
||||
$1 -m pip install ../../../../paddle/dist/*.whl
|
||||
|
||||
test_list="test_data_feeder.py"
|
||||
|
||||
export PYTHONPATH=$PWD/../../../../python/
|
||||
|
||||
for fn in $test_list
|
||||
do
|
||||
echo "test $fn"
|
||||
$1 $fn
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
done
|
@ -0,0 +1,238 @@
|
||||
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
|
||||
import py_paddle.swig_paddle as api
|
||||
import numpy as np
|
||||
|
||||
from paddle.v2 import data_type
|
||||
from paddle.v2.data_feeder import DataFeeder
|
||||
|
||||
|
||||
class DataFeederTest(unittest.TestCase):
|
||||
def dense_reader(self, size):
|
||||
data = np.random.random(size)
|
||||
return data
|
||||
|
||||
def sparse_binary_reader(self, high, size_limit, non_empty=False):
|
||||
num = np.random.randint(size_limit) # num could be 0
|
||||
while non_empty and num == 0:
|
||||
num = np.random.randint(size_limit)
|
||||
return np.random.randint(high, size=num).tolist()
|
||||
|
||||
def test_dense(self):
|
||||
def compare(input):
|
||||
feeder = DataFeeder([('image', data_type.dense_vector(784))],
|
||||
{'image': 0})
|
||||
arg = feeder(input)
|
||||
output = arg.getSlotValue(0).copyToNumpyMat()
|
||||
input = np.array(input, dtype='float32')
|
||||
self.assertAlmostEqual(input.all(), output.all())
|
||||
|
||||
# test numpy array
|
||||
batch_size = 32
|
||||
dim = 784
|
||||
data = []
|
||||
for i in xrange(batch_size):
|
||||
each_sample = []
|
||||
each_sample.append(self.dense_reader(dim))
|
||||
data.append(each_sample)
|
||||
compare(data)
|
||||
|
||||
# each feature is a list
|
||||
data = []
|
||||
for i in xrange(batch_size):
|
||||
each_sample = []
|
||||
each_sample.append(self.dense_reader(dim).tolist())
|
||||
data.append(each_sample)
|
||||
compare(data)
|
||||
|
||||
# test tuple
|
||||
data = []
|
||||
for i in xrange(batch_size):
|
||||
each_sample = (self.dense_reader(dim).tolist(), )
|
||||
data.append(each_sample)
|
||||
compare(data)
|
||||
|
||||
def test_sparse_binary(self):
|
||||
dim = 10000
|
||||
batch_size = 32
|
||||
data = []
|
||||
for i in xrange(batch_size):
|
||||
each_sample = []
|
||||
each_sample.append(self.sparse_binary_reader(dim, 50))
|
||||
data.append(each_sample)
|
||||
feeder = DataFeeder([('input', data_type.sparse_binary_vector(dim))],
|
||||
{'input': 0})
|
||||
arg = feeder(data)
|
||||
output = arg.getSlotValue(0)
|
||||
assert isinstance(output, api.Matrix)
|
||||
for i in xrange(batch_size):
|
||||
self.assertEqual(output.getSparseRowCols(i), data[i][0])
|
||||
|
||||
def test_sparse(self):
|
||||
dim = 10000
|
||||
batch_size = 32
|
||||
v = []
|
||||
w = []
|
||||
data = []
|
||||
for dat in xrange(batch_size):
|
||||
each_sample = []
|
||||
a = self.sparse_binary_reader(dim, 40, non_empty=True)
|
||||
b = self.dense_reader(len(a)).tolist()
|
||||
v.append(a)
|
||||
w.append(np.array(b, dtype="float32"))
|
||||
each_sample.append(zip(a, b))
|
||||
data.append(each_sample)
|
||||
|
||||
feeder = DataFeeder([('input', data_type.sparse_vector(dim))],
|
||||
{'input': 0})
|
||||
arg = feeder(data)
|
||||
output = arg.getSlotValue(0)
|
||||
assert isinstance(output, api.Matrix)
|
||||
for i in xrange(batch_size):
|
||||
self.assertEqual(output.getSparseRowCols(i), v[i])
|
||||
cols_value = output.getSparseRowColsVal(i)
|
||||
value = [val[1] for val in cols_value]
|
||||
value = np.array(value, dtype="float32")
|
||||
self.assertAlmostEqual(value.all(), w[i].all())
|
||||
|
||||
def test_integer(self):
|
||||
dim = 100
|
||||
batch_size = 32
|
||||
index = []
|
||||
for i in xrange(batch_size):
|
||||
each_sample = []
|
||||
each_sample.append(np.random.randint(dim))
|
||||
index.append(each_sample)
|
||||
feeder = DataFeeder([('input', data_type.integer_value(dim))],
|
||||
{'input': 0})
|
||||
arg = feeder(index)
|
||||
output = arg.getSlotIds(0).copyToNumpyArray()
|
||||
index = np.array(index, dtype='int')
|
||||
self.assertEqual(output.all(), index.flatten().all())
|
||||
|
||||
def test_integer_sequence(self):
|
||||
dim = 10000
|
||||
batch_size = 32
|
||||
start = [0]
|
||||
data = []
|
||||
for i in xrange(batch_size):
|
||||
each_sample = []
|
||||
each_sample.append(
|
||||
self.sparse_binary_reader(
|
||||
dim, 30, non_empty=True))
|
||||
data.append(each_sample)
|
||||
start.append(len(each_sample[0]) + start[-1])
|
||||
feeder = DataFeeder([('input', data_type.integer_value_sequence(dim))],
|
||||
{'input': 0})
|
||||
arg = feeder(data)
|
||||
output_data = arg.getSlotIds(0).copyToNumpyArray()
|
||||
output_start = arg.getSlotSequenceStartPositions(0).copyToNumpyArray()
|
||||
|
||||
index = []
|
||||
for dat in data:
|
||||
index.extend(x for x in dat[0]) # only one feature, so dat[0]
|
||||
index = np.array(index, dtype='int')
|
||||
start = np.array(start, dtype='int')
|
||||
self.assertEqual(output_data.all(), index.all())
|
||||
self.assertEqual(output_start.all(), start.all())
|
||||
|
||||
def test_multiple_features(self):
|
||||
batch_size = 2
|
||||
data = []
|
||||
for i in xrange(batch_size):
|
||||
each_sample = []
|
||||
each_sample.append(np.random.randint(10))
|
||||
each_sample.append(
|
||||
self.sparse_binary_reader(
|
||||
20000, 40, non_empty=True))
|
||||
each_sample.append(self.dense_reader(100))
|
||||
data.append(each_sample)
|
||||
|
||||
# test multiple features
|
||||
data_types = [('fea0', data_type.dense_vector(100)),
|
||||
('fea1', data_type.sparse_binary_vector(20000)),
|
||||
('fea2', data_type.integer_value(10))]
|
||||
feeder = DataFeeder(data_types, {'fea0': 2, 'fea1': 1, 'fea2': 0})
|
||||
arg = feeder(data)
|
||||
output_dense = arg.getSlotValue(0).copyToNumpyMat()
|
||||
output_sparse = arg.getSlotValue(1)
|
||||
output_index = arg.getSlotIds(2).copyToNumpyArray()
|
||||
for i in xrange(batch_size):
|
||||
self.assertEqual(output_dense[i].all(), data[i][2].all())
|
||||
self.assertEqual(output_sparse.getSparseRowCols(i), data[i][1])
|
||||
self.assertEqual(output_index[i], data[i][0])
|
||||
|
||||
# reader returns 3 features, but only use 2 features
|
||||
data_types = [('fea0', data_type.dense_vector(100)),
|
||||
('fea2', data_type.integer_value(10))]
|
||||
feeder = DataFeeder(data_types, {'fea0': 2, 'fea2': 0})
|
||||
arg = feeder(data)
|
||||
output_dense = arg.getSlotValue(0).copyToNumpyMat()
|
||||
output_index = arg.getSlotIds(1).copyToNumpyArray()
|
||||
for i in xrange(batch_size):
|
||||
self.assertEqual(output_dense[i].all(), data[i][2].all())
|
||||
self.assertEqual(output_index[i], data[i][0])
|
||||
|
||||
# reader returns 3 featreus, one is duplicate data
|
||||
data_types = [('fea0', data_type.dense_vector(100)),
|
||||
('fea1', data_type.sparse_binary_vector(20000)),
|
||||
('fea2', data_type.integer_value(10)),
|
||||
('fea3', data_type.dense_vector(100))]
|
||||
feeder = DataFeeder(data_types,
|
||||
{'fea0': 2,
|
||||
'fea1': 1,
|
||||
'fea2': 0,
|
||||
'fea3': 2})
|
||||
arg = feeder(data)
|
||||
fea0 = arg.getSlotValue(0).copyToNumpyMat()
|
||||
fea1 = arg.getSlotValue(1)
|
||||
fea2 = arg.getSlotIds(2).copyToNumpyArray()
|
||||
fea3 = arg.getSlotValue(3).copyToNumpyMat()
|
||||
for i in xrange(batch_size):
|
||||
self.assertEqual(fea0[i].all(), data[i][2].all())
|
||||
self.assertEqual(fea1.getSparseRowCols(i), data[i][1])
|
||||
self.assertEqual(fea2[i], data[i][0])
|
||||
self.assertEqual(fea3[i].all(), data[i][2].all())
|
||||
|
||||
def test_multiple_features_tuple(self):
|
||||
batch_size = 2
|
||||
data = []
|
||||
for i in xrange(batch_size):
|
||||
a = np.random.randint(10)
|
||||
b = self.sparse_binary_reader(20000, 40, non_empty=True)
|
||||
c = self.dense_reader(100)
|
||||
each_sample = (a, b, c)
|
||||
data.append(each_sample)
|
||||
|
||||
# test multiple features
|
||||
data_types = [('fea0', data_type.dense_vector(100)),
|
||||
('fea1', data_type.sparse_binary_vector(20000)),
|
||||
('fea2', data_type.integer_value(10))]
|
||||
feeder = DataFeeder(data_types, {'fea0': 2, 'fea1': 1, 'fea2': 0})
|
||||
arg = feeder(data)
|
||||
out_dense = arg.getSlotValue(0).copyToNumpyMat()
|
||||
out_sparse = arg.getSlotValue(1)
|
||||
out_index = arg.getSlotIds(2).copyToNumpyArray()
|
||||
for i in xrange(batch_size):
|
||||
self.assertEqual(out_dense[i].all(), data[i][2].all())
|
||||
self.assertEqual(out_sparse.getSparseRowCols(i), data[i][1])
|
||||
self.assertEqual(out_index[i], data[i][0])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
api.initPaddle("--use_gpu=0")
|
||||
unittest.main()
|
Loading…
Reference in new issue