diff --git a/python/paddle/fluid/tests/unittests/ir/inference/inference_pass_test.py b/python/paddle/fluid/tests/unittests/ir/inference/inference_pass_test.py index 0b8ea1f939..d3a53bbbff 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/inference_pass_test.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/inference_pass_test.py @@ -78,7 +78,10 @@ class InferencePassTest(unittest.TestCase): shape = tensor_shapes[name] shape[0] = 1 tensor = predictor.get_input_tensor(name) - tensor.copy_from_cpu(list(self.feeds.values())[i]) + feed_data = list(self.feeds.values())[i] + tensor.copy_from_cpu(np.array(feed_data)) + if type(feed_data) == fluid.LoDTensor: + tensor.set_lod(feed_data.lod()) predictor.zero_copy_run() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add2_act_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add2_act_fuse_pass.py index 16979488a6..d6dbd397b9 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add2_act_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add2_act_fuse_pass.py @@ -44,7 +44,8 @@ class ConvElementwiseAdd2ActFusePassTest(InferencePassTest): def test_check_output(self): if core.is_compiled_with_cuda(): - self.check_output_with_option([True]) + use_gpu = True + self.check_output_with_option(use_gpu) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_act_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_act_fuse_pass.py index f4014f7cd4..2e9035420d 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_act_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_act_fuse_pass.py @@ -46,7 +46,8 @@ class ConvElementwiseAddActFusePassTest(InferencePassTest): def test_check_output(self): if core.is_compiled_with_cuda(): - self.check_output_with_option([True]) + use_gpu = True + self.check_output_with_option(use_gpu) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_fuse_pass.py index cea007d56e..7c4e0d6e76 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_fuse_pass.py @@ -42,7 +42,8 @@ class ConvElementwiseAddFusePassTest(InferencePassTest): def test_check_output(self): if core.is_compiled_with_cuda(): - self.check_output_with_option([True]) + use_gpu = True + self.check_output_with_option(use_gpu) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_transpose_flatten_concat_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_transpose_flatten_concat_fuse_pass.py index 6444264f80..dfcd1758db 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_transpose_flatten_concat_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_transpose_flatten_concat_fuse_pass.py @@ -42,7 +42,8 @@ class TransposeFlattenConcatFusePassTest(InferencePassTest): def test_check_output(self): # There is no cpu pass for transpose_flatten_concat_fuse if core.is_compiled_with_cuda(): - self.check_output_with_option([True]) + use_gpu = True + self.check_output_with_option(use_gpu) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py index 41f02b0427..4661333ffe 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py @@ -48,7 +48,8 @@ class TransposeFlattenConcatFusePassTRTTest(InferencePassTest): def test_check_output(self): # There is no cpu pass for transpose_flatten_concat_fuse if core.is_compiled_with_cuda(): - self.check_output_with_option([True]) + use_gpu = True + self.check_output_with_option(use_gpu) if __name__ == "__main__":