|
|
|
@ -464,6 +464,39 @@ class TestDistLookupTable(TestDistLookupTableBase):
|
|
|
|
|
self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestAsyncLocalLookupTable(TestDistLookupTableBase):
|
|
|
|
|
def net_conf(self):
|
|
|
|
|
self.network_with_table(is_sparse=True, is_distributed=False)
|
|
|
|
|
|
|
|
|
|
def transpiler_test_impl(self):
|
|
|
|
|
config = fluid.DistributeTranspilerConfig()
|
|
|
|
|
config.sync_mode = False
|
|
|
|
|
pserver1, startup1 = self.get_pserver(self.pserver1_ep, config)
|
|
|
|
|
|
|
|
|
|
self.assertEqual(len(pserver1.blocks), 3)
|
|
|
|
|
# 0 listen_and_serv
|
|
|
|
|
# 1 optimize for fc_w or fc_b adam
|
|
|
|
|
self.assertEqual([op.type for op in pserver1.blocks[1].ops],
|
|
|
|
|
["adam", "scale", "scale"])
|
|
|
|
|
# 2 optimize for table adam
|
|
|
|
|
# NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
|
|
|
|
|
self.assertEqual([op.type for op in pserver1.blocks[2].ops],
|
|
|
|
|
["adam", "scale", "scale"])
|
|
|
|
|
|
|
|
|
|
trainer = self.get_trainer(config)
|
|
|
|
|
self.assertEqual(len(trainer.blocks), 1)
|
|
|
|
|
ops = [
|
|
|
|
|
'lookup_table', 'sequence_pool', 'lookup_table', 'sequence_pool',
|
|
|
|
|
'concat', 'mul', 'elementwise_add', 'cross_entropy', 'mean',
|
|
|
|
|
'fill_constant', 'mean_grad', 'cross_entropy_grad',
|
|
|
|
|
'elementwise_add_grad', 'send', 'mul_grad', 'send', 'concat_grad',
|
|
|
|
|
'sequence_pool_grad', 'lookup_table_grad', 'sequence_pool_grad',
|
|
|
|
|
'lookup_table_grad', 'sum', 'split_selected_rows', 'send', 'recv',
|
|
|
|
|
'recv', 'recv', 'concat'
|
|
|
|
|
]
|
|
|
|
|
self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestAsyncDistLookupTable(TestDistLookupTableBase):
|
|
|
|
|
def net_conf(self):
|
|
|
|
|
self.network_with_table(is_sparse=True, is_distributed=True)
|
|
|
|
|