|
|
|
@ -20,24 +20,29 @@ class TestSeqProject(OpTest):
|
|
|
|
|
# one level, batch size
|
|
|
|
|
x = np.random.uniform(0.1, 1, [self.input_size[0],
|
|
|
|
|
self.input_size[1]]).astype('float32')
|
|
|
|
|
|
|
|
|
|
self.begin_pad = np.max([0, -self.context_start])
|
|
|
|
|
self.end_pad = np.max([0, self.context_start + self.context_length - 1])
|
|
|
|
|
self.total_pad = self.begin_pad + self.end_pad
|
|
|
|
|
if self.total_pad == 0:
|
|
|
|
|
self.total_pad = 1
|
|
|
|
|
|
|
|
|
|
# PaddingData mast be not empty.
|
|
|
|
|
# Otherwise(EnforceNotMet: enforce numel() > 0 failed, 0 <= 0)
|
|
|
|
|
padding_data = np.random.uniform(
|
|
|
|
|
0.1, 1, [self.total_pad, self.input_size[1]]).astype('float32')
|
|
|
|
|
w = np.random.uniform(
|
|
|
|
|
0.1, 1, [self.context_length, self.input_size[1]]).astype('float32')
|
|
|
|
|
|
|
|
|
|
begin_pad = np.max([0, -self.context_start])
|
|
|
|
|
end_pad = np.max([0, self.context_start + self.context_length - 1])
|
|
|
|
|
total_pad = begin_pad + end_pad
|
|
|
|
|
padding_data = np.random.uniform(
|
|
|
|
|
0.1, 1, [total_pad, self.input_size[1]]).astype('float32')
|
|
|
|
|
self.pad_data = padding_data
|
|
|
|
|
self.inputs = {
|
|
|
|
|
'X': (x, self.lod),
|
|
|
|
|
'PaddingData': (padding_data, [[0, self.total_pad]]),
|
|
|
|
|
'Filter': (w, [[0, self.context_length]])
|
|
|
|
|
'Filter': w,
|
|
|
|
|
}
|
|
|
|
|
self.inputs_val = ['X', 'Filter']
|
|
|
|
|
self.inputs_val_no_x = ['Filter']
|
|
|
|
|
self.inputs_val_no_f = ['X']
|
|
|
|
|
|
|
|
|
|
if total_pad != 0:
|
|
|
|
|
self.inputs['PaddingData'] = padding_data
|
|
|
|
|
self.inputs_val = ['X', 'PaddingData', 'Filter']
|
|
|
|
|
self.inputs_val_no_x = ['PaddingData', 'Filter']
|
|
|
|
|
self.inputs_val_no_f = ['PaddingData', 'X']
|
|
|
|
|
|
|
|
|
|
self.attrs = {
|
|
|
|
|
'context_start': self.context_start,
|
|
|
|
|
'context_length': self.context_length,
|
|
|
|
@ -51,7 +56,7 @@ class TestSeqProject(OpTest):
|
|
|
|
|
def compute(self):
|
|
|
|
|
x, lod = self.inputs['X']
|
|
|
|
|
filter = self.inputs['Filter']
|
|
|
|
|
pading_data, _ = self.inputs['PaddingData']
|
|
|
|
|
pading_data = self.pad_data
|
|
|
|
|
out = np.zeros((self.input_size[0], self.context_length *
|
|
|
|
|
self.input_size[1])).astype('float32')
|
|
|
|
|
lod = lod[0]
|
|
|
|
@ -90,12 +95,12 @@ class TestSeqProject(OpTest):
|
|
|
|
|
out[out_begin:out_end, j * self.input_size[1]:(j + 1) *
|
|
|
|
|
self.input_size[1]] += in_sub
|
|
|
|
|
|
|
|
|
|
filter_dim = filter[0].shape
|
|
|
|
|
filter_dim = filter.shape
|
|
|
|
|
output_dim = self.outputs['Out'].shape
|
|
|
|
|
filter[0].shape = filter_dim[0] * filter_dim[1]
|
|
|
|
|
filter.shape = filter_dim[0] * filter_dim[1]
|
|
|
|
|
self.outputs['Out'].shape = (output_dim[0], )
|
|
|
|
|
np.dot(out, filter[0], out=self.outputs['Out'])
|
|
|
|
|
filter[0].shape = filter_dim
|
|
|
|
|
np.dot(out, filter, out=self.outputs['Out'])
|
|
|
|
|
filter.shape = filter_dim
|
|
|
|
|
self.outputs['Out'].shape = output_dim
|
|
|
|
|
|
|
|
|
|
def test_check_output(self):
|
|
|
|
@ -104,16 +109,14 @@ class TestSeqProject(OpTest):
|
|
|
|
|
def test_check_grad(self):
|
|
|
|
|
if self.padding_trainable:
|
|
|
|
|
self.check_grad(
|
|
|
|
|
set(['X', 'PaddingData', 'Filter']),
|
|
|
|
|
'Out',
|
|
|
|
|
max_relative_error=0.05)
|
|
|
|
|
set(self.inputs_val), 'Out', max_relative_error=0.05)
|
|
|
|
|
|
|
|
|
|
def test_check_grad_input(self):
|
|
|
|
|
self.check_grad(
|
|
|
|
|
['X'],
|
|
|
|
|
'Out',
|
|
|
|
|
max_relative_error=0.05,
|
|
|
|
|
no_grad_set=set(['PaddingData', 'Filter']))
|
|
|
|
|
no_grad_set=set(self.inputs_val_no_x))
|
|
|
|
|
|
|
|
|
|
def test_check_grad_padding_data(self):
|
|
|
|
|
if self.padding_trainable:
|
|
|
|
@ -128,19 +131,20 @@ class TestSeqProject(OpTest):
|
|
|
|
|
['Filter'],
|
|
|
|
|
'Out',
|
|
|
|
|
max_relative_error=0.05,
|
|
|
|
|
no_grad_set=set(['X', 'PaddingData']))
|
|
|
|
|
no_grad_set=set(self.inputs_val_no_f))
|
|
|
|
|
|
|
|
|
|
def test_check_grad_input_filter(self):
|
|
|
|
|
self.check_grad(
|
|
|
|
|
['X', 'Filter'],
|
|
|
|
|
'Out',
|
|
|
|
|
max_relative_error=0.05,
|
|
|
|
|
no_grad_set=set(['PaddingData']))
|
|
|
|
|
if self.padding_trainable:
|
|
|
|
|
self.check_grad(
|
|
|
|
|
['X', 'Filter'],
|
|
|
|
|
'Out',
|
|
|
|
|
max_relative_error=0.05,
|
|
|
|
|
no_grad_set=set(['PaddingData']))
|
|
|
|
|
|
|
|
|
|
def test_check_grad_padding_input(self):
|
|
|
|
|
if self.padding_trainable:
|
|
|
|
|
self.check_grad(
|
|
|
|
|
['X', 'PaddingData'],
|
|
|
|
|
self.inputs_val_no_f,
|
|
|
|
|
'Out',
|
|
|
|
|
max_relative_error=0.05,
|
|
|
|
|
no_grad_set=set(['Filter']))
|
|
|
|
@ -148,7 +152,7 @@ class TestSeqProject(OpTest):
|
|
|
|
|
def test_check_grad_padding_filter(self):
|
|
|
|
|
if self.padding_trainable:
|
|
|
|
|
self.check_grad(
|
|
|
|
|
['PaddingData', 'Filter'],
|
|
|
|
|
self.inputs_val_no_x,
|
|
|
|
|
'Out',
|
|
|
|
|
max_relative_error=0.05,
|
|
|
|
|
no_grad_set=set(['X']))
|
|
|
|
@ -191,69 +195,5 @@ class TestSeqProjectCase2(TestSeqProject):
|
|
|
|
|
[self.input_size[0]]]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
'''
|
|
|
|
|
class TestSeqProjectCases(TestSeqProject):
|
|
|
|
|
def setUp(self):
|
|
|
|
|
self.init_test_case()
|
|
|
|
|
self.op_type = 'sequence_project'
|
|
|
|
|
|
|
|
|
|
num = 0
|
|
|
|
|
for context_start in [-5, -3, -1, 0, 3]:
|
|
|
|
|
for context_length in [1, 2, 5, 7]:
|
|
|
|
|
for batch_size in [1, 2, 5, 7]:
|
|
|
|
|
for padding_trainable in [False, True]:
|
|
|
|
|
|
|
|
|
|
if context_length == 1 and context_start == 0 and padding_trainable:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
self.context_start = context_start
|
|
|
|
|
self.context_length = context_length
|
|
|
|
|
self.padding_trainable = padding_trainable
|
|
|
|
|
self.input_size = [batch_size, 23]
|
|
|
|
|
x = np.random.uniform(0.1, 1,
|
|
|
|
|
self.input_size).astype('float32')
|
|
|
|
|
self.lod = [[0, self.input_size[0]]]
|
|
|
|
|
if self.input_size[0] > 2:
|
|
|
|
|
idx = range(self.input_size[0])
|
|
|
|
|
del idx[0]
|
|
|
|
|
self.lod = [
|
|
|
|
|
[0] + np.sort(random.sample(idx, 2)).tolist() +
|
|
|
|
|
[self.input_size[0]]
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
self.begin_pad = np.max([0, -self.context_start])
|
|
|
|
|
self.end_pad = np.max([0, self.context_start + self.context_length - 1])
|
|
|
|
|
self.total_pad = self.begin_pad + self.end_pad
|
|
|
|
|
if self.total_pad == 0:
|
|
|
|
|
self.total_pad = 1
|
|
|
|
|
# PaddingData mast be not empty. Otherwise(EnforceNotMet: enforce numel() > 0 failed, 0 <= 0)
|
|
|
|
|
padding_data = np.random.uniform(
|
|
|
|
|
0.1, 1, [self.total_pad, self.input_size[1]]).astype('float32')
|
|
|
|
|
|
|
|
|
|
self.inputs = {
|
|
|
|
|
'X': (x, self.lod),
|
|
|
|
|
'PaddingData': (padding_data, [[0, self.total_pad]])
|
|
|
|
|
}
|
|
|
|
|
self.attrs = {
|
|
|
|
|
'context_start': self.context_start,
|
|
|
|
|
'context_length': self.context_length,
|
|
|
|
|
'padding_trainable': self.padding_trainable,
|
|
|
|
|
'context_stride': self.context_stride
|
|
|
|
|
}
|
|
|
|
|
out = np.zeros((self.input_size[0], self.input_size[1] *
|
|
|
|
|
self.context_length)).astype('float32')
|
|
|
|
|
self.outputs = {'Out': out}
|
|
|
|
|
print num
|
|
|
|
|
print self.attrs
|
|
|
|
|
print batch_size
|
|
|
|
|
print padding_trainable
|
|
|
|
|
print "$$$$$$$$$$$$$"
|
|
|
|
|
|
|
|
|
|
self.compute()
|
|
|
|
|
self.test_check_output()
|
|
|
|
|
|
|
|
|
|
num += 1
|
|
|
|
|
'''
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
|
unittest.main()
|
|
|
|
|