avx_docs
qiaolongfei 9 years ago
commit 59e4b2aed3

@ -126,7 +126,7 @@ class ImageClassifier():
# For oversampling, average predictions across crops.
# If not, the shape of output[name]: (1, class_number),
# the mean is also applicable.
return output[output_layer].mean(0)
return output[output_layer]['value'].mean(0)
def predict(self, image=None, output_layer=None):
assert isinstance(image, basestring)

@ -1,4 +1,3 @@
import numpy
import paddle.v2 as paddle
import mnist_util
@ -27,19 +26,14 @@ def main():
cost = paddle.layer.classification_cost(input=inference, label=label)
parameters = paddle.parameters.create(cost)
for param_name in parameters.keys():
array = parameters.get(param_name)
array[:] = numpy.random.uniform(low=-1.0, high=1.0, size=array.shape)
parameters.set(parameter_name=param_name, value=array)
adam_optimizer = paddle.optimizer.Adam(learning_rate=0.01)
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
para = parameters.get('___fc_2__.w0')
print "Pass %d, Batch %d, Cost %f, Weight Mean Of Fc 2 is %f" % (
event.pass_id, event.batch_id, event.cost, para.mean())
if event.batch_id % 100 == 0:
print "Pass %d, Batch %d, Cost %f, %s" % (
event.pass_id, event.batch_id, event.cost, event.metrics)
else:
pass
@ -50,11 +44,12 @@ def main():
parameters=parameters,
event_handler=event_handler,
batch_size=32, # batch size should be refactor in Data reader
data_types={ # data_types will be removed, It should be in
data_types=[ # data_types will be removed, It should be in
# network topology
'pixel': images.type,
'label': label.type
})
('pixel', images.type),
('label', label.type)],
reader_dict={'pixel':0, 'label':1}
)
if __name__ == '__main__':

@ -156,7 +156,7 @@ class ImageClassifier():
# For oversampling, average predictions across crops.
# If not, the shape of output[name]: (1, class_number),
# the mean is also applicable.
res[name] = output[name].mean(0)
res[name] = output[name]['value'].mean(0)
return res

@ -139,24 +139,12 @@ lstmemory
:members: lstmemory
:noindex:
lstm_step_layer
---------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: lstm_step_layer
:noindex:
grumemory
---------
.. automodule:: paddle.trainer_config_helpers.layers
:members: grumemory
:noindex:
gru_step_layer
---------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: gru_step_layer
:noindex:
Recurrent Layer Group
=====================
@ -172,6 +160,18 @@ recurrent_group
:members: recurrent_group
:noindex:
lstm_step_layer
---------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: lstm_step_layer
:noindex:
gru_step_layer
---------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: gru_step_layer
:noindex:
beam_search
------------
.. automodule:: paddle.trainer_config_helpers.layers
@ -308,6 +308,12 @@ repeat_layer
:members: repeat_layer
:noindex:
rotate_layer
------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: rotate_layer
:noindex:
seq_reshape_layer
-----------------
.. automodule:: paddle.trainer_config_helpers.layers
@ -462,6 +468,12 @@ ctc_layer
:members: ctc_layer
:noindex:
warp_ctc_layer
--------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: warp_ctc_layer
:noindex:
nce_layer
-----------
.. automodule:: paddle.trainer_config_helpers.layers

@ -38,6 +38,13 @@ Arguments* Arguments::createByPaddleArgumentVector(void* ptr) {
return args;
}
Arguments* Arguments::createByPaddleArgument(const void* ptr) {
auto p = (paddle::Argument*)(ptr);
auto args = new Arguments();
args->m->outputs.push_back(*p);
return args;
}
Matrix* Arguments::getSlotValue(size_t idx) const throw(RangeError) {
auto& a = m->getArg(idx);
return Matrix::createByPaddleMatrixPtr(&a.value);

@ -144,12 +144,12 @@ Parameter* GradientMachine::getParameter(size_t i) throw(RangeError) {
void GradientMachine::randParameters() { m->machine->randParameters(); }
Matrix* GradientMachine::getLayerOutput(const std::string& layerName) const
Arguments* GradientMachine::getLayerOutput(const std::string& layerName) const
throw(UnsupportError) {
auto nn = std::dynamic_pointer_cast<paddle::NeuralNetwork>(m->machine);
auto nn = m->machine;
if (nn) {
auto mat = nn->getLayerOutput(layerName);
return Matrix::createByPaddleMatrixPtr(&mat);
auto arg = nn->getLayerOutput(layerName);
return Arguments::createByPaddleArgument(&arg);
} else {
throw UnsupportError();
}

@ -454,6 +454,7 @@ public:
private:
static Arguments* createByPaddleArgumentVector(void* ptr);
static Arguments* createByPaddleArgument(const void* ptr);
void* getInternalArgumentsPtr() const;
private:
@ -769,7 +770,7 @@ public:
void randParameters();
Matrix* getLayerOutput(const std::string& layerName) const
Arguments* getLayerOutput(const std::string& layerName) const
throw(UnsupportError);
/**
@ -956,7 +957,7 @@ public:
Arguments* getForwardOutput();
Matrix* getLayerOutput(const std::string& layerName);
Arguments* getLayerOutput(const std::string& layerName) const;
};
/// the N-Best results generated from one input sequence.

@ -131,12 +131,11 @@ void Trainer::testOneDataBatch(size_t batchSize, const Arguments& args) {
void TrainerPrivate::finishTestPeriod() { tester_->finishTestPeriod(); }
void Trainer::finishTestPeriod() { m->finishTestPeriod(); }
Matrix* Trainer::getLayerOutput(const std::string& layerName) {
auto nn = std::dynamic_pointer_cast<paddle::NeuralNetwork>(
this->m->getGradientMachine());
Arguments* Trainer::getLayerOutput(const std::string& layerName) const {
auto nn = this->m->getGradientMachine();
CHECK(nn) << "trainerInternal_.getGradientMachine() is not NeuralNetwork";
auto m = nn->getLayerOutput(layerName);
return Matrix::createByPaddleMatrixPtr(&m);
auto arg = nn->getLayerOutput(layerName);
return Arguments::createByPaddleArgument(&arg);
}
void Trainer::forwardOneBatch(size_t batchSize) {

@ -134,6 +134,10 @@ public:
backward(callback);
}
virtual Argument getLayerOutput(const std::string& layerName) {
return *((Argument*)nullptr);
}
// see comment in Layer.h for the function with the same name
virtual void resetState() {}

@ -282,6 +282,18 @@ void MultiGradientMachine::forwardBackward(const std::vector<Argument>& inArgs,
backwardImp(callback);
}
Argument MultiGradientMachine::getLayerOutput(const std::string& layerName) {
std::vector<Argument> args;
args.reserve(threads_.size());
for (auto& thread : threads_) {
args.push_back(thread->getGradientMachine()->getLayerOutput(layerName));
}
outLayerArgs_.concat(args, false /* use_gpu */, outArgStream_, passType_);
return outLayerArgs_;
}
void MultiGradientMachine::backwardImp(const UpdateCallback& callback) {
for (size_t i = 0; i < parameters_.size(); i++) {
if (!parameters_[i]->useGpu() || parameters_[i]->isStatic()) continue;

@ -189,6 +189,8 @@ public:
PassType passType,
const UpdateCallback& callback);
virtual Argument getLayerOutput(const std::string& layerName);
virtual void onPassEnd();
virtual void finish();
@ -314,6 +316,8 @@ protected:
std::vector<Argument> outArgs_;
hl_stream_t outArgStream_;
Argument outLayerArgs_;
/// ParameterType which needs to be merged from each GPU
std::vector<ParameterType> mergeTypes_;
int numDevices_; /* number of gpu devices */

@ -293,11 +293,10 @@ void NeuralNetwork::backward(const UpdateCallback& callback) {
}
}
MatrixPtr NeuralNetwork::getLayerOutput(const std::string& layerName) {
auto it = layerMap_.find(layerName);
CHECK(it != layerMap_.end()) << "Cannot find layer: " << layerName;
return it->second->getOutputValue();
Argument NeuralNetwork::getLayerOutput(const std::string& layerName) {
return getLayer(layerName)->getOutput();
}
void NeuralNetwork::onPassEnd() {
for (auto& layer : layers_) {
layer->onPassEnd();

@ -87,7 +87,8 @@ public:
virtual void backward(const UpdateCallback& callback = nullptr);
MatrixPtr getLayerOutput(const std::string& layerName);
virtual Argument getLayerOutput(const std::string& layerName);
const LayerPtr& getLayer(const std::string& layerName) const {
auto it = layerMap_.find(layerName);
CHECK(it != layerMap_.end()) << "Unknown layer " << layerName;

@ -42,7 +42,7 @@ void CosSimLayer::forward(PassType passType) {
/* malloc memory for the output_ if necessary */
int batchSize = getInputValue(0)->getHeight();
int size = getSize();
CHECK_EQ(forward_.size(), 1) << "Only one forward function needed";
CHECK_EQ(forward_.size(), 1UL) << "Only one forward function needed";
{
REGISTER_TIMER_INFO("CosFwResetTimer", getName().c_str());
@ -68,7 +68,7 @@ void CosSimLayer::forward(PassType passType) {
void CosSimLayer::backward(const UpdateCallback& callback) {
/* activation */ {
REGISTER_TIMER_INFO("CosBpAtvTimer", getName().c_str());
CHECK_EQ(backward_.size(), 1) << "Only one backward function needed";
CHECK_EQ(backward_.size(), 1UL) << "Only one backward function needed";
const auto outG = this->getOutputGrad();
const auto outV = this->getOutputValue();

@ -112,7 +112,7 @@ bool CosSimVecMatLayer::init(const LayerMap& layerMap,
void CosSimVecMatLayer::forward(PassType passType) {
Layer::forward(passType);
CHECK_EQ(forward_.size(), 1) << "Only one forward function needed";
CHECK_EQ(forward_.size(), 1UL) << "Only one forward function needed";
MatrixPtr inV0 = getInputValue(0);
MatrixPtr inV1 = getInputValue(1);
@ -145,7 +145,7 @@ void CosSimVecMatLayer::forward(PassType passType) {
}
void CosSimVecMatLayer::backward(const UpdateCallback& callback) {
CHECK_EQ(backward_.size(), 1) << "Only one forward function needed";
CHECK_EQ(backward_.size(), 1UL) << "Only one forward function needed";
MatrixPtr inV0 = getInputValue(0);
MatrixPtr inV1 = getInputValue(1);

@ -17,10 +17,10 @@ limitations under the License. */
TEST(RowBuffer, testAutoGrow) {
paddle::RowBuffer buf(128);
ASSERT_EQ(128, buf.getWidth());
ASSERT_EQ(128UL, buf.getWidth());
ASSERT_TRUE(buf.isAutoGrowth());
buf.resize(2);
ASSERT_EQ(2, buf.getRowCount());
ASSERT_EQ(2UL, buf.getRowCount());
for (size_t i = 0; i < buf.getWidth() * 2; ++i) {
buf.data()[i] = i;
}
@ -35,7 +35,7 @@ TEST(RowBuffer, testAutoGrow) {
data[i] = i;
}
ASSERT_EQ(3, buf.getRowCount());
ASSERT_EQ(3UL, buf.getRowCount());
for (size_t i = 0; i < buf.getRowCount() - 1; ++i) {
for (size_t j = 0; j < buf.getWidth(); ++j) {
ASSERT_NEAR(i * buf.getWidth() + j, buf.get(i)[j], 1e-5);
@ -51,7 +51,7 @@ TEST(RowBuffer, testWithMemBuf) {
std::make_shared<paddle::CpuMemoryHandle>(128 * 2 * sizeof(real));
paddle::RowBuffer buf(mem, 128);
ASSERT_TRUE(!buf.isAutoGrowth());
ASSERT_EQ(2, buf.getRowCount());
ASSERT_EQ(2UL, buf.getRowCount());
for (size_t i = 0; i < buf.getWidth() * 2; ++i) {
buf.data()[i] = i;
}

@ -23,7 +23,8 @@ __all__ = ['DataProviderConverter']
class IScanner(object):
def __init__(self, input_type, pos):
self.input_type = input_type
assert isinstance(self.input_type, dp2.InputType)
if not isinstance(self.input_type, dp2.InputType):
raise ValueError("input type should be dataprovider2.InputType")
self.pos = pos
def scan(self, dat):
@ -50,7 +51,6 @@ class DenseScanner(IScanner):
def finish_scan(self, argument):
assert isinstance(argument, swig_paddle.Arguments)
assert isinstance(self.input_type, dp2.InputType)
if self.__mat__.dtype != numpy.float32:
self.__mat__ = self.__mat__.astype(numpy.float32)
m = swig_paddle.Matrix.createDenseFromNumpy(self.__mat__, True, False)
@ -63,7 +63,6 @@ class SparseBinaryScanner(IScanner):
self.__rows__ = [0]
self.__cols__ = []
self.__height__ = 0
self.__nnz__ = 0
self.__value__ = []
def scan(self, dat):
@ -76,7 +75,6 @@ class SparseBinaryScanner(IScanner):
def finish_scan(self, argument):
assert isinstance(argument, swig_paddle.Arguments)
assert isinstance(self.input_type, dp2.InputType)
m = swig_paddle.Matrix.createSparse(self.__height__,
self.input_type.dim,
len(self.__cols__),

@ -208,7 +208,7 @@ def __monkeypatch_gradient_machine__():
output = dict()
for name in layerNames:
output[name] = __matrix_to_numpy__(self.getLayerOutput(name))
output[name] = __arguments_to_numpy__(0, self.getLayerOutput(name))
return output
swig_paddle.GradientMachine.getLayerOutputs = getLayerOutputs

@ -112,6 +112,8 @@ __all__ = [
'priorbox_layer',
'spp_layer',
'pad_layer',
'eos_layer',
'layer_support',
]
@ -708,6 +710,7 @@ class MixedLayerType(LayerOutput):
# update the size which might be computed inside MixedLayer
# according to the operator's output size
self.size = ml.config.size
self.finalized = True
@wrap_name_default("mixed")
@ -1287,6 +1290,12 @@ def last_seq(input,
"""
Get Last Timestamp Activation of a sequence.
The simple usage is:
.. code-block:: python
seq = last_seq(input=layer)
:param agg_level: Aggregated level
:param name: Layer name.
:type name: basestring
@ -1325,6 +1334,12 @@ def first_seq(input,
"""
Get First Timestamp Activation of a sequence.
The simple usage is:
.. code-block:: python
seq = first_seq(input=layer)
:param agg_level: aggregation level
:param name: Layer name.
:type name: basestring
@ -1425,7 +1440,7 @@ def repeat_layer(input, num_repeats, name=None, layer_attr=None):
.. code-block:: python
expand = repeat_layer(layer, 4)
expand = repeat_layer(input=layer, num_repeats=4)
:param input: Input layer
:type input: LayerOutput
@ -1797,6 +1812,12 @@ def cos_sim(a, b, scale=1, size=1, name=None, layer_attr=None):
Note that the above computation is for one sample. Multiple samples are
processed in one batch.
The example usage is:
.. code-block:: python
cos = cos_sim(a=layer1, b=layer2, size=3)
:param name: layer name
:type name: basestring
:param a: input layer a
@ -1958,6 +1979,16 @@ def img_conv_layer(input,
pieces. First 256/4 = 64 channels will process by first 32 filters. The
rest channels will be processed by rest group of filters.
The example usage is:
.. code-block:: python
conv = img_conv_layer(input=data, filter_size=1, filter_size_y=1,
num_channels=8,
num_filters=16, stride=1,
bias_attr=False,
act=ReluActivation())
:param name: Layer name.
:type name: basestring
:param input: Layer Input.
@ -2097,6 +2128,34 @@ def img_pool_layer(input,
.. _pooling: http://ufldl.stanford.edu/tutorial/supervised/Pooling/
- ceil_mode=True:
.. math::
w = 1 + int(ceil(input\_width + 2 * padding - pool\_size) / float(stride))
h = 1 + int(ceil(input\_height + 2 * padding\_y - pool\_size\_y) / float(stride\_y))
- ceil_mode=False:
.. math::
w = 1 + int(floor(input\_width + 2 * padding - pool\_size) / float(stride))
h = 1 + int(floor(input\_height + 2 * padding\_y - pool\_size\_y) / float(stride\_y))
The example usage is:
.. code-block:: python
maxpool = img_pool_layer(input=conv,
pool_size=3,
pool_size_y=5,
num_channels=8,
stride=1,
stride_y=2,
padding=1,
padding_y=2,
pool_type=MaxPooling())
:param padding: pooling padding width.
:type padding: int
:param padding_y: pooling padding height. It's equal to padding by default.
@ -2123,19 +2182,6 @@ def img_pool_layer(input,
:param ceil_mode: Wether to use ceil mode to calculate output height and with.
Defalut is True. If set false, Otherwise use floor.
- ceil_mode=True:
.. math::
w = 1 + int(ceil(input_width + 2 * padding - pool_size) / float(stride))
h = 1 + int(ceil(input_height + 2 * padding_y - pool_size_y) / float(stride_y))
- ceil_mode=False:
.. math::
w = 1 + int(floor(input_width + 2 * padding - pool_size) / float(stride))
h = 1 + int(floor(input_height + 2 * padding_y - pool_size_y) / float(stride_y))
:type ceil_mode: bool
:return: LayerOutput object.
:rtype: LayerOutput
@ -2197,6 +2243,15 @@ def spp_layer(input,
The details please refer to
`Kaiming He's paper <https://arxiv.org/abs/1406.4729>`_.
The example usage is:
.. code-block:: python
spp = spp_layer(input=data,
pyramid_height=2,
num_channels=16,
pool_type=MaxPooling())
:param name: layer name.
:type name: basestring
:param input: layer's input.
@ -2285,6 +2340,12 @@ def img_cmrnorm_layer(input,
The details please refer to
`Alex's paper <http://www.cs.toronto.edu/~fritz/absps/imagenet.pdf>`_.
The example usage is:
.. code-block:: python
norm = img_cmrnorm_layer(input=net, size=5)
:param name: layer name.
:type name: None|basestring
:param input: layer's input.
@ -2340,6 +2401,12 @@ def batch_norm_layer(input,
The details of batch normalization please refer to this
`paper <http://arxiv.org/abs/1502.03167>`_.
The example usage is:
.. code-block:: python
norm = batch_norm_layer(input=net, act=ReluActivation())
:param name: layer name.
:type name: basestring
:param input: batch normalization input. Better be linear activation.
@ -3907,13 +3974,13 @@ def conv_shift_layer(a, b, name=None, layer_attr=None):
.. code-block:: python
conv_shift = conv_shift_layer(input=[layer1, layer2])
conv_shift = conv_shift_layer(a=layer1, b=layer2)
:param name: layer name
:type name: basestring
:param a: Input layer a.
:type a: LayerOutput
:param b: input layer b
:param b: input layer b.
:type b: LayerOutput
:param layer_attr: layer's extra attribute.
:type layer_attr: ExtraLayerAttribute
@ -4005,8 +4072,8 @@ def tensor_layer(a,
@wrap_act_default()
@layer_support()
def selective_fc_layer(input,
select,
size,
select=None,
act=None,
name=None,
pass_generation=False,
@ -4033,6 +4100,7 @@ def selective_fc_layer(input,
:type input: LayerOutput|list|tuple
:param select: The select layer. The output of select layer should be a
sparse binary matrix, and treat as the mask of selective fc.
If is None, acts exactly like fc_layer.
:type select: LayerOutput
:param size: The layer dimension.
:type size: int
@ -4261,7 +4329,7 @@ def block_expand_layer(input,
.. code-block:: python
block_expand = block_expand_layer(input,
block_expand = block_expand_layer(input=layer,
num_channels=128,
stride_x=1,
stride_y=1,
@ -4465,7 +4533,7 @@ def warp_ctc_layer(input,
- You can set 'blank' to any value ranged in [0, num_classes], which
should be consistent as that used in your labels.
- As a native 'softmax' activation is interated to the warp-ctc library,
'linear' activation is expected instead in the 'input' layer.
'linear' activation is expected instead in the 'input' layer.
The simple usage:
@ -4598,6 +4666,13 @@ def crf_decoding_layer(input,
this layer will also calculate error. output.value[i] is 1 for incorrect
decoding or 0 for correct decoding.
The simple usage:
.. code-block:: python
crf_decoding = crf_decoding_layer(input=input,
size=label_dim)
:param input: The first input layer.
:type input: LayerOutput
:param size: size of this layer.

@ -18,12 +18,14 @@ import parameters
import trainer
import event
import data_type
import data_feeder
import attr
import pooling
import py_paddle.swig_paddle as api
__all__ = [
'optimizer', 'layer', 'activation', 'parameters', 'init', 'trainer',
'event', 'data_type', 'attr'
'event', 'data_type', 'attr', 'pooling', 'data_feeder'
]

@ -0,0 +1,100 @@
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py_paddle import swig_paddle
from py_paddle import DataProviderConverter
import data_type
__all__ = ['DataFeeder']
class DataFeeder(DataProviderConverter):
"""
DataFeeder converts the data returned by paddle.reader into a data structure
of Arguments which is defined in the API. The paddle.reader usually returns
a list of mini-batch data entries. Each data entry in the list is one sampe.
Each sample is a list or a tuple with one feature or multiple features.
DataFeeder converts this mini-batch data entries into Arguments in order
to feed it to C++ interface.
The example usage:
data_types = [('image', paddle.data_type.dense_vector(784)),
('label', paddle.data_type.integer_value(10))]
reader_dict = {'image':0, 'label':1}
feeder = DataFeeder(data_types=data_types, reader_dict=reader_dict)
minibatch_data = [
( [1.0,2.0,3.0,4.0], 5, [6,7,8] ), # first sample
( [1.0,2.0,3.0,4.0], 5, [6,7,8] ) # second sample
]
# or minibatch_data = [
# [ [1.0,2.0,3.0,4.0], 5, [6,7,8] ], # first sample
# [ [1.0,2.0,3.0,4.0], 5, [6,7,8] ] # second sample
# ]
arg = feeder(minibatch_data)
"""
def __init__(self, data_types, reader_dict):
"""
:param data_types: A list to specify data name and type. Each item is
a tuple of (data_name, data_type). For example:
[('image', paddle.data_type.dense_vector(784)),
('label', paddle.data_type.integer_value(10))]
:type data_types: A list of tuple
:param reader_dict: A dictionary to specify the position of each data
in the input data.
:type reader_dict: dict()
"""
self.input_names = []
input_types = []
self.reader_dict = reader_dict
for each in data_types:
self.input_names.append(each[0])
assert isinstance(each[1], data_type.InputType)
input_types.append(each[1])
DataProviderConverter.__init__(self, input_types)
def convert(self, dat, argument=None):
"""
:param dat: A list of mini-batch data. Each sample is a list or tuple
one feature or multiple features.
for example:
[
([0.2, 0.2], ), # first sample
([0.8, 0.3], ), # second sample
]
or,
[
[[0.2, 0.2], ], # first sample
[[0.8, 0.3], ], # second sample
]
:type dat: List
:param argument: An Arguments object contains this mini-batch data with
one or multiple features. The Arguments definition is
in the API.
:type argument: swig_paddle.Arguments
"""
def reorder_data(data):
retv = []
for each in data:
reorder = []
for name in self.input_names:
reorder.append(each[self.reader_dict[name]])
retv.append(reorder)
return retv
return DataProviderConverter.convert(self, reorder_data(dat), argument)

@ -14,9 +14,9 @@
from paddle.trainer.PyDataProvider2 import \
InputType, dense_vector, sparse_binary_vector,\
sparse_vector, integer_value
sparse_vector, integer_value, integer_value_sequence
__all__ = [
'InputType', 'dense_vector', 'sparse_binary_vector', 'sparse_vector',
'integer_value'
'integer_value', 'integer_value_sequence'
]

@ -0,0 +1,8 @@
import os
__all__ = ['DATA_HOME']
DATA_HOME = os.path.expanduser('~/.cache/paddle_data_set')
if not os.path.exists(DATA_HOME):
os.makedirs(DATA_HOME)

@ -0,0 +1,39 @@
import sklearn.datasets.mldata
import sklearn.model_selection
import numpy
from config import DATA_HOME
__all__ = ['train_creator', 'test_creator']
def __mnist_reader_creator__(data, target):
def reader():
n_samples = data.shape[0]
for i in xrange(n_samples):
yield (data[i] / 255.0).astype(numpy.float32), int(target[i])
return reader
TEST_SIZE = 10000
data = sklearn.datasets.mldata.fetch_mldata(
"MNIST original", data_home=DATA_HOME)
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
data.data, data.target, test_size=TEST_SIZE, random_state=0)
def train_creator():
return __mnist_reader_creator__(X_train, y_train)
def test_creator():
return __mnist_reader_creator__(X_test, y_test)
def unittest():
assert len(list(test_creator()())) == TEST_SIZE
if __name__ == '__main__':
unittest()

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save