|
|
|
@ -178,7 +178,32 @@ def SE_ResNeXt152():
|
|
|
|
|
return loss
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class ParallelExecutor(unittest.TestCase):
|
|
|
|
|
class TestParallelExecutorBase(unittest.TestCase):
|
|
|
|
|
def check_network_convergence(self, method, memory_opt=True, iter=10):
|
|
|
|
|
main = fluid.Program()
|
|
|
|
|
startup = fluid.Program()
|
|
|
|
|
with fluid.program_guard(main, startup):
|
|
|
|
|
loss = method()
|
|
|
|
|
adam = fluid.optimizer.Adam()
|
|
|
|
|
adam.minimize(loss)
|
|
|
|
|
if memory_opt:
|
|
|
|
|
fluid.memory_optimize(main)
|
|
|
|
|
|
|
|
|
|
exe = fluid.ParallelExecutor(loss_name=loss.name, use_cuda=True)
|
|
|
|
|
first_loss, = exe.run([loss.name])
|
|
|
|
|
first_loss = numpy.array(first_loss)
|
|
|
|
|
|
|
|
|
|
for i in xrange(iter):
|
|
|
|
|
exe.run([])
|
|
|
|
|
|
|
|
|
|
last_loss, = exe.run([loss.name])
|
|
|
|
|
last_loss = numpy.array(last_loss)
|
|
|
|
|
|
|
|
|
|
print first_loss, last_loss
|
|
|
|
|
self.assertGreater(first_loss[0], last_loss[0])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestMNIST(TestParallelExecutorBase):
|
|
|
|
|
@classmethod
|
|
|
|
|
def setUpClass(cls):
|
|
|
|
|
# Convert mnist to recordio file
|
|
|
|
@ -195,6 +220,16 @@ class ParallelExecutor(unittest.TestCase):
|
|
|
|
|
fluid.recordio_writer.convert_reader_to_recordio_file(
|
|
|
|
|
'./mnist.recordio', reader, feeder)
|
|
|
|
|
|
|
|
|
|
def test_simple_fc(self):
|
|
|
|
|
self.check_network_convergence(simple_fc_net)
|
|
|
|
|
|
|
|
|
|
def test_batchnorm_fc(self):
|
|
|
|
|
self.check_network_convergence(fc_with_batchnorm)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestResnet(TestParallelExecutorBase):
|
|
|
|
|
@classmethod
|
|
|
|
|
def setUpClass(cls):
|
|
|
|
|
with fluid.program_guard(fluid.Program(), fluid.Program()):
|
|
|
|
|
reader = paddle.batch(flowers.train(), batch_size=4)
|
|
|
|
|
feeder = fluid.DataFeeder(
|
|
|
|
@ -208,34 +243,5 @@ class ParallelExecutor(unittest.TestCase):
|
|
|
|
|
fluid.recordio_writer.convert_reader_to_recordio_file(
|
|
|
|
|
"./flowers.recordio", reader, feeder)
|
|
|
|
|
|
|
|
|
|
def test_simple_fc(self):
|
|
|
|
|
self.check_network_convergence(simple_fc_net)
|
|
|
|
|
|
|
|
|
|
def test_batchnorm_fc(self):
|
|
|
|
|
self.check_network_convergence(fc_with_batchnorm)
|
|
|
|
|
|
|
|
|
|
def check_network_convergence(self, method, memory_opt=True, iter=10):
|
|
|
|
|
main = fluid.Program()
|
|
|
|
|
startup = fluid.Program()
|
|
|
|
|
with fluid.program_guard(main, startup):
|
|
|
|
|
loss = method()
|
|
|
|
|
adam = fluid.optimizer.Adam()
|
|
|
|
|
adam.minimize(loss)
|
|
|
|
|
if memory_opt:
|
|
|
|
|
fluid.memory_optimize(main)
|
|
|
|
|
|
|
|
|
|
exe = fluid.ParallelExecutor(loss_name=loss.name, use_cuda=True)
|
|
|
|
|
first_loss, = exe.run([loss.name])
|
|
|
|
|
first_loss = numpy.array(first_loss)
|
|
|
|
|
|
|
|
|
|
for i in xrange(iter):
|
|
|
|
|
exe.run([])
|
|
|
|
|
|
|
|
|
|
last_loss, = exe.run([loss.name])
|
|
|
|
|
last_loss = numpy.array(last_loss)
|
|
|
|
|
|
|
|
|
|
print first_loss, last_loss
|
|
|
|
|
self.assertGreater(first_loss[0], last_loss[0])
|
|
|
|
|
|
|
|
|
|
def test_resnet(self):
|
|
|
|
|
self.check_network_convergence(SE_ResNeXt152, iter=20)
|
|
|
|
|
self.check_network_convergence(SE_ResNeXt152, iter=200)
|
|
|
|
|