|
|
|
@ -22,6 +22,7 @@ import six
|
|
|
|
|
import time
|
|
|
|
|
import itertools
|
|
|
|
|
import collections
|
|
|
|
|
from collections import defaultdict
|
|
|
|
|
|
|
|
|
|
import paddle.fluid as fluid
|
|
|
|
|
import paddle.fluid.core as core
|
|
|
|
@ -257,8 +258,65 @@ class OpTest(unittest.TestCase):
|
|
|
|
|
outs, _ = self._calc_output(place)
|
|
|
|
|
return outs
|
|
|
|
|
|
|
|
|
|
def _calc_output(self, place, parallel=False, no_check_set=None):
|
|
|
|
|
def _create_var_from_numpy(self, value):
|
|
|
|
|
if isinstance(value, tuple):
|
|
|
|
|
data = value[0]
|
|
|
|
|
lod = value[1]
|
|
|
|
|
v = fluid.imperative.base.to_variable(value=data)
|
|
|
|
|
v._ivar.value().get_tensor().set_recursive_sequence_lengths(lod)
|
|
|
|
|
return v
|
|
|
|
|
else:
|
|
|
|
|
return fluid.imperative.base.to_variable(value)
|
|
|
|
|
|
|
|
|
|
def _calc_imperative_output(self, place, parallel=False, no_check_set=None):
|
|
|
|
|
with fluid.imperative.base.guard(place=place):
|
|
|
|
|
block = fluid.default_main_program().global_block()
|
|
|
|
|
|
|
|
|
|
# prepare input variable
|
|
|
|
|
inputs = defaultdict(list)
|
|
|
|
|
for name, np_value in six.iteritems(self.inputs):
|
|
|
|
|
if not isinstance(np_value, list):
|
|
|
|
|
np_value = [np_value]
|
|
|
|
|
|
|
|
|
|
for i in range(len(np_value)):
|
|
|
|
|
inputs[name].append(
|
|
|
|
|
self._create_var_from_numpy(np_value[i]))
|
|
|
|
|
|
|
|
|
|
# prepare output variable
|
|
|
|
|
outputs = defaultdict(list)
|
|
|
|
|
for name, np_value in six.iteritems(self.outputs):
|
|
|
|
|
if not isinstance(np_value, list):
|
|
|
|
|
np_value = [np_value]
|
|
|
|
|
|
|
|
|
|
for i in range(len(np_value)):
|
|
|
|
|
value = np_value[i]
|
|
|
|
|
if isinstance(value, tuple):
|
|
|
|
|
v = block.create_var(
|
|
|
|
|
name="%s_out%d" % (name, i),
|
|
|
|
|
dtype=value[0].dtype,
|
|
|
|
|
type=core.VarDesc.VarType.LOD_TENSOR,
|
|
|
|
|
persistable=False,
|
|
|
|
|
stop_gradient=False)
|
|
|
|
|
v._ivar.value().get_tensor(
|
|
|
|
|
).set_recursive_sequence_lengths(value[1])
|
|
|
|
|
else:
|
|
|
|
|
v = block.create_var(
|
|
|
|
|
name="%s_out%d" % (name, i),
|
|
|
|
|
dtype=value.dtype,
|
|
|
|
|
type=core.VarDesc.VarType.LOD_TENSOR,
|
|
|
|
|
persistable=False,
|
|
|
|
|
stop_gradient=False)
|
|
|
|
|
outputs[name].append(v)
|
|
|
|
|
|
|
|
|
|
block.append_op(
|
|
|
|
|
type=self.op_type,
|
|
|
|
|
inputs=inputs,
|
|
|
|
|
outputs=outputs,
|
|
|
|
|
attrs=self.attrs)
|
|
|
|
|
|
|
|
|
|
return outputs
|
|
|
|
|
|
|
|
|
|
def _calc_output(self, place, parallel=False, no_check_set=None):
|
|
|
|
|
program = Program()
|
|
|
|
|
block = program.global_block()
|
|
|
|
|
self._append_ops(block)
|
|
|
|
@ -305,8 +363,13 @@ class OpTest(unittest.TestCase):
|
|
|
|
|
place,
|
|
|
|
|
atol,
|
|
|
|
|
no_check_set=None,
|
|
|
|
|
equal_nan=False):
|
|
|
|
|
equal_nan=False,
|
|
|
|
|
check_imperative=False):
|
|
|
|
|
if check_imperative:
|
|
|
|
|
imperative_outs = self._calc_imperative_output(
|
|
|
|
|
place, no_check_set=no_check_set)
|
|
|
|
|
outs, fetch_list = self._calc_output(place, no_check_set=no_check_set)
|
|
|
|
|
|
|
|
|
|
for out_name, out_dup in Operator.get_op_outputs(self.op_type):
|
|
|
|
|
if out_name not in self.outputs:
|
|
|
|
|
continue
|
|
|
|
@ -330,6 +393,10 @@ class OpTest(unittest.TestCase):
|
|
|
|
|
type(sub_out))
|
|
|
|
|
for item in sub_out:
|
|
|
|
|
sub_out_name, expect = item[0], item[1]
|
|
|
|
|
if check_imperative:
|
|
|
|
|
imperative_actual = imperative_outs[sub_out_name][0]
|
|
|
|
|
imperative_actual_t = np.array(
|
|
|
|
|
imperative_actual._ivar.value().get_tensor())
|
|
|
|
|
idx = find_actual(sub_out_name, fetch_list)
|
|
|
|
|
actual = outs[idx]
|
|
|
|
|
actual_t = np.array(actual)
|
|
|
|
@ -340,12 +407,31 @@ class OpTest(unittest.TestCase):
|
|
|
|
|
actual_t, expect_t, atol=atol, equal_nan=equal_nan),
|
|
|
|
|
"Output (" + sub_out_name + ") has diff at " +
|
|
|
|
|
str(place))
|
|
|
|
|
if check_imperative:
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
np.allclose(
|
|
|
|
|
imperative_actual_t,
|
|
|
|
|
expect_t,
|
|
|
|
|
atol=atol,
|
|
|
|
|
equal_nan=equal_nan),
|
|
|
|
|
"Output (" + sub_out_name + ") has diff at " +
|
|
|
|
|
str(place) + " in imperative mode")
|
|
|
|
|
if isinstance(expect, tuple):
|
|
|
|
|
self.assertListEqual(
|
|
|
|
|
actual.recursive_sequence_lengths(), expect[1],
|
|
|
|
|
"Output (" + sub_out_name +
|
|
|
|
|
") has different lod at " + str(place))
|
|
|
|
|
if check_imperative:
|
|
|
|
|
self.assertListEqual(
|
|
|
|
|
imperative_actual._ivar.value().get_tensor()
|
|
|
|
|
.recursive_sequence_lengths(), expect[1],
|
|
|
|
|
"Output (" + out_name + ") has different lod at " +
|
|
|
|
|
str(place) + " in imperative mode")
|
|
|
|
|
else:
|
|
|
|
|
if check_imperative:
|
|
|
|
|
imperative_actual = imperative_outs[out_name][0]
|
|
|
|
|
imperative_actual_t = np.array(
|
|
|
|
|
imperative_actual._ivar.value().get_tensor())
|
|
|
|
|
idx = find_actual(out_name, fetch_list)
|
|
|
|
|
actual = outs[idx]
|
|
|
|
|
actual_t = np.array(actual)
|
|
|
|
@ -357,10 +443,27 @@ class OpTest(unittest.TestCase):
|
|
|
|
|
"Output (" + out_name + ") has diff at " + str(place) +
|
|
|
|
|
"\nExpect " + str(expect_t) + "\n" + "But Got" +
|
|
|
|
|
str(actual_t) + " in class " + self.__class__.__name__)
|
|
|
|
|
if check_imperative:
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
np.allclose(
|
|
|
|
|
imperative_actual_t,
|
|
|
|
|
expect_t,
|
|
|
|
|
atol=atol,
|
|
|
|
|
equal_nan=equal_nan),
|
|
|
|
|
"Output (" + out_name + ") has diff at " + str(place) +
|
|
|
|
|
"\nExpect " + str(expect_t) + "\n" + "But Got" +
|
|
|
|
|
str(imperative_actual_t) + " in class " +
|
|
|
|
|
self.__class__.__name__)
|
|
|
|
|
if isinstance(expect, tuple):
|
|
|
|
|
self.assertListEqual(actual.recursive_sequence_lengths(),
|
|
|
|
|
expect[1], "Output (" + out_name +
|
|
|
|
|
") has different lod at " + str(place))
|
|
|
|
|
if check_imperative:
|
|
|
|
|
self.assertListEqual(
|
|
|
|
|
imperative_actual._ivar.value().get_tensor()
|
|
|
|
|
.recursive_sequence_lengths(), expect[1],
|
|
|
|
|
"Output (" + out_name + ") has different lod at " +
|
|
|
|
|
str(place) + " in imperative mode")
|
|
|
|
|
|
|
|
|
|
def _get_places(self):
|
|
|
|
|
if self.dtype == np.float16:
|
|
|
|
@ -383,10 +486,15 @@ class OpTest(unittest.TestCase):
|
|
|
|
|
places.append(core.CUDAPlace(0))
|
|
|
|
|
return places
|
|
|
|
|
|
|
|
|
|
def check_output(self, atol=1e-5, no_check_set=None, equal_nan=False):
|
|
|
|
|
def check_output(self,
|
|
|
|
|
atol=1e-5,
|
|
|
|
|
no_check_set=None,
|
|
|
|
|
equal_nan=False,
|
|
|
|
|
check_imperative=False):
|
|
|
|
|
places = self._get_places()
|
|
|
|
|
for place in places:
|
|
|
|
|
self.check_output_with_place(place, atol, no_check_set, equal_nan)
|
|
|
|
|
self.check_output_with_place(place, atol, no_check_set, equal_nan,
|
|
|
|
|
check_imperative)
|
|
|
|
|
|
|
|
|
|
def check_output_customized(self, checker):
|
|
|
|
|
places = self._get_places()
|
|
|
|
|