Implement a common python unittest to test the ir passes. (#22209)
* Implement a common python unittest to test the ir passes. test=develop * Save the results in np.array and support to startup on CPU. test=develop * Fix the unittest. test=develop * Add check_program to check whether the optimized program is different from the origin one. test=develop * Remove the inferface all_ops. test=develop * Add exception test in pass_test. test=developrevert-22710-feature/integrated_ps_api
parent
99f5907e02
commit
b7cac50b64
@ -0,0 +1,6 @@
|
|||||||
|
file(GLOB TEST_IR_PASSES RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py")
|
||||||
|
string(REPLACE ".py" "" TEST_IR_PASSES "${TEST_IR_PASSES}")
|
||||||
|
|
||||||
|
foreach(target ${TEST_IR_PASSES})
|
||||||
|
py_test_modules(${target} MODULES ${target})
|
||||||
|
endforeach()
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,52 @@
|
|||||||
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from pass_test import PassTest
|
||||||
|
import paddle.fluid as fluid
|
||||||
|
import paddle.fluid.core as core
|
||||||
|
|
||||||
|
|
||||||
|
class FCFusePassTest(PassTest):
|
||||||
|
def setUp(self):
|
||||||
|
with fluid.program_guard(self.main_program, self.startup_program):
|
||||||
|
data = fluid.data(
|
||||||
|
name="data", shape=[32, 128], dtype="float32", lod_level=0)
|
||||||
|
tmp_0 = fluid.layers.fc(input=data,
|
||||||
|
size=128,
|
||||||
|
num_flatten_dims=1,
|
||||||
|
act="relu")
|
||||||
|
tmp_1 = fluid.layers.fc(input=tmp_0, size=32, num_flatten_dims=1)
|
||||||
|
tmp_2 = fluid.layers.softmax(input=tmp_1)
|
||||||
|
|
||||||
|
self.feeds = {"data": np.random.random((32, 128)).astype("float32")}
|
||||||
|
self.fetch_list = [tmp_0, tmp_1, tmp_2]
|
||||||
|
self.pass_names = "fc_fuse_pass"
|
||||||
|
self.fused_op_type = "fc"
|
||||||
|
self.num_fused_ops = 2
|
||||||
|
|
||||||
|
def test_check_output(self):
|
||||||
|
use_gpu_set = [False]
|
||||||
|
if core.is_compiled_with_cuda():
|
||||||
|
use_gpu_set.append(True)
|
||||||
|
for use_gpu in use_gpu_set:
|
||||||
|
self.pass_attrs = {"fc_fuse_pass": {"use_gpu": use_gpu}}
|
||||||
|
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
|
||||||
|
self.check_output_with_place(place, startup_on_cpu=True)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
unittest.main()
|
Loading…
Reference in new issue