Merge pull request #13815 from jacquesqiao/optimize-pyreader

optimize pyreader
revert-14324-fix_vlog
Qiao Longfei 7 years ago committed by GitHub
commit 5b7a9dd7ac
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -189,6 +189,7 @@ paddle.fluid.layers.batch ArgSpec(args=['reader', 'batch_size'], varargs=None, k
paddle.fluid.layers.double_buffer ArgSpec(args=['reader', 'place', 'name'], varargs=None, keywords=None, defaults=(None, None))
paddle.fluid.layers.random_data_generator ArgSpec(args=['low', 'high', 'shapes', 'lod_levels', 'for_parallel'], varargs=None, keywords=None, defaults=(True,))
paddle.fluid.layers.py_reader ArgSpec(args=['capacity', 'shapes', 'dtypes', 'lod_levels', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, None, True))
paddle.fluid.layers.create_py_reader_by_data ArgSpec(args=['capacity', 'feed_list', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, True))
paddle.fluid.layers.Preprocessor.__init__ ArgSpec(args=['self', 'reader', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.Preprocessor.block ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
paddle.fluid.layers.Preprocessor.inputs ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)

File diff suppressed because it is too large Load Diff

@ -53,15 +53,24 @@ def simple_fc_net(in_size,
hidden_sizes,
batch_size,
queue_capacity,
use_double_buffer=False):
reader = fluid.layers.py_reader(
capacity=queue_capacity,
shapes=[[-1, in_size], [-1, 1]],
lod_levels=[0, 0],
dtypes=['float32', 'int64'],
use_double_buffer=False)
feed_queue = reader.queue
reader = fluid.layers.batch(reader, batch_size=batch_size)
use_double_buffer=False,
use_feed_list=True):
if use_feed_list:
data = fluid.layers.data(name="data", dtype='float32', shape=[in_size])
label = fluid.layers.data(name='label', dtype='int64', shape=[1])
py_reader = fluid.layers.create_py_reader_by_data(
capacity=queue_capacity,
use_double_buffer=False,
feed_list=[data, label])
else:
py_reader = fluid.layers.py_reader(
capacity=queue_capacity,
shapes=[[-1, in_size], [-1, 1]],
lod_levels=[0, 0],
dtypes=['float32', 'int64'],
use_double_buffer=False)
feed_queue = py_reader.queue
reader = fluid.layers.batch(py_reader, batch_size=batch_size)
if use_double_buffer:
reader = fluid.layers.double_buffer(reader)
@ -83,7 +92,7 @@ def simple_fc_net(in_size,
optimizer = fluid.optimizer.Adam()
optimizer.minimize(loss)
return in_data, label, loss, optimizer, feed_queue
return in_data, label, loss, optimizer, feed_queue, py_reader
class TestPyReaderUsingExecutor(unittest.TestCase):
@ -100,16 +109,22 @@ class TestPyReaderUsingExecutor(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]):
for use_parallel_executor in [False, True]:
for use_double_buffer in [False, True]:
print('Test Parameters:'),
print({
'use_cuda': use_cuda,
'use_parallel_executor': use_parallel_executor,
'use_double_buffer': use_double_buffer
})
self.main(use_cuda, use_parallel_executor,
use_double_buffer)
def random_reader(self):
for use_feed_list in [False, True]:
for use_decorate_paddle_reader in [False, True]:
print('Test Parameters:'),
print({
'use_cuda': use_cuda,
'use_parallel_executor': use_parallel_executor,
'use_double_buffer': use_double_buffer,
'use_feed_list': use_feed_list,
'use_decorate_paddle_reader':
use_decorate_paddle_reader
})
self.main(use_cuda, use_parallel_executor,
use_double_buffer, use_feed_list,
use_decorate_paddle_reader)
def tensor_reader(self, use_decorate_paddle_reader):
def reader():
self.inputs = []
cnt = 0
@ -133,34 +148,43 @@ class TestPyReaderUsingExecutor(unittest.TestCase):
elif not self.use_double_buffer:
break
yield tensors
if use_decorate_paddle_reader:
yield [(in_data, label)]
else:
yield tensors
cnt += 1
yield None
if not use_decorate_paddle_reader:
yield None
return reader
def main(self,
use_cuda=True,
use_parallel_executor=False,
use_double_buffer=False):
use_double_buffer=False,
use_feed_list=False,
use_decorate_paddle_reader=False):
assert not use_cuda or use_cuda and core.is_compiled_with_cuda()
self.use_cuda = use_cuda
self.use_parallel_executor = use_parallel_executor
self.use_double_buffer = use_double_buffer
self.use_feed_list = use_feed_list
self.use_decorate_paddle_reader = use_decorate_paddle_reader
startup_program = fluid.Program()
main_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
in_data, label, loss, optimizer, feed_queue = simple_fc_net(
in_data, label, loss, optimizer, feed_queue, py_reader = simple_fc_net(
in_size=self.in_size,
class_num=self.class_num,
hidden_sizes=self.hidden_sizes,
batch_size=self.batch_size,
queue_capacity=self.queue_capacity,
use_double_buffer=self.use_double_buffer)
use_double_buffer=self.use_double_buffer,
use_feed_list=self.use_feed_list)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
@ -178,10 +202,14 @@ class TestPyReaderUsingExecutor(unittest.TestCase):
main_exe = startup_exe
self.batch_size_times = 1
reader = self.random_reader()
thread = threading.Thread(
target=feed_data, args=(feed_queue, reader))
thread.start()
reader = self.tensor_reader(use_decorate_paddle_reader)
if use_decorate_paddle_reader:
py_reader.decorate_paddle_reader(reader)
py_reader.start()
else:
thread = threading.Thread(
target=feed_data, args=(feed_queue, reader))
thread.start()
self.outputs = []
for _ in range(self.iterations):

Loading…
Cancel
Save