Add conv_bias pass version python test (#28278)

* add conv_bias pass version test

* update according to reviews
TCChenlong-patch-1
lidanqing 5 years ago committed by GitHub
parent 0d25d55a86
commit 12b9587be5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -147,12 +147,19 @@ void ConvBiasFusePass::ApplyImpl(ir::Graph* graph) const {
} // namespace paddle } // namespace paddle
REGISTER_PASS(conv_bias_mkldnn_fuse_pass, REGISTER_PASS(conv_bias_mkldnn_fuse_pass,
paddle::framework::ir::ConvBiasFusePass); paddle::framework::ir::ConvBiasFusePass);
REGISTER_PASS(conv_transpose_bias_mkldnn_fuse_pass,
paddle::framework::ir::Conv2DTransposeBiasFusePass);
REGISTER_PASS(conv3d_bias_mkldnn_fuse_pass,
paddle::framework::ir::Conv3DBiasFusePass);
REGISTER_PASS_CAPABILITY(conv_bias_mkldnn_fuse_pass) REGISTER_PASS_CAPABILITY(conv_bias_mkldnn_fuse_pass)
.AddCombination( .AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination() paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("conv2d", 0) .EQ("conv2d", 0)
.EQ("elementwise_add", 0)); .EQ("elementwise_add", 0));
REGISTER_PASS(conv_transpose_bias_mkldnn_fuse_pass,
paddle::framework::ir::Conv2DTransposeBiasFusePass);
REGISTER_PASS_CAPABILITY(conv_transpose_bias_mkldnn_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.EQ("conv2d_transpose", 0)
.EQ("elementwise_add", 0));
REGISTER_PASS(conv3d_bias_mkldnn_fuse_pass,
paddle::framework::ir::Conv3DBiasFusePass);

@ -20,11 +20,11 @@ from inference_pass_test import InferencePassTest
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.core import AnalysisConfig from paddle.fluid.core import AnalysisConfig
"""Test for fusion of conv and bias.""" from paddle.fluid.core import PassVersionChecker
#padding SAME #padding SAME
class ConvBiasMkldnnFusePassTest(InferencePassTest): class ConvBiasMkldnnFusePassSamePadTest(InferencePassTest):
def setUp(self): def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program): with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data( data = fluid.data(
@ -48,10 +48,12 @@ class ConvBiasMkldnnFusePassTest(InferencePassTest):
def test_check_output(self): def test_check_output(self):
use_gpu = False use_gpu = False
self.check_output_with_option(use_gpu) self.check_output_with_option(use_gpu)
self.assertTrue(
PassVersionChecker.IsCompatible("conv_bias_mkldnn_fuse_pass"))
#padding VALID #padding VALID
class ConvBiasMkldnnFusePassTest1(InferencePassTest): class ConvBiasMkldnnFusePassValidPadTest(ConvBiasMkldnnFusePassSamePadTest):
def setUp(self): def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program): with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data( data = fluid.data(
@ -72,13 +74,9 @@ class ConvBiasMkldnnFusePassTest1(InferencePassTest):
self.fetch_list = [conv_out] self.fetch_list = [conv_out]
self.enable_mkldnn = True self.enable_mkldnn = True
def test_check_output(self):
use_gpu = False
self.check_output_with_option(use_gpu)
#padding number #padding EXPLICT NUMBER
class ConvBiasMkldnnFusePassTest2(InferencePassTest): class ConvBiasMkldnnFusePassExplictPadTest(ConvBiasMkldnnFusePassSamePadTest):
def setUp(self): def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program): with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data( data = fluid.data(
@ -99,13 +97,8 @@ class ConvBiasMkldnnFusePassTest2(InferencePassTest):
self.fetch_list = [conv_out] self.fetch_list = [conv_out]
self.enable_mkldnn = True self.enable_mkldnn = True
def test_check_output(self):
use_gpu = False
self.check_output_with_option(use_gpu)
#dilation not supported yet, just print warning log and does not fuse class ConvBiasMkldnnFusePassGroupTest(ConvBiasMkldnnFusePassSamePadTest):
class ConvBiasMkldnnFusePassTest3(InferencePassTest):
def setUp(self): def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program): with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data( data = fluid.data(
@ -118,7 +111,6 @@ class ConvBiasMkldnnFusePassTest3(InferencePassTest):
num_filters=3, num_filters=3,
filter_size=3, filter_size=3,
padding="VALID", padding="VALID",
dilation=2,
groups=3, groups=3,
bias_attr=param_attr, bias_attr=param_attr,
use_cudnn=False, use_cudnn=False,
@ -131,13 +123,9 @@ class ConvBiasMkldnnFusePassTest3(InferencePassTest):
self.fetch_list = [conv_out] self.fetch_list = [conv_out]
self.enable_mkldnn = True self.enable_mkldnn = True
def test_check_output(self):
use_gpu = False
self.check_output_with_option(use_gpu)
class ConvBiasMkldnnFusePassDialtionsGroupsTest(
#all conv params except for dilation ConvBiasMkldnnFusePassSamePadTest):
class ConvBiasMkldnnFusePassTest4(InferencePassTest):
def setUp(self): def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program): with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data( data = fluid.data(
@ -150,6 +138,7 @@ class ConvBiasMkldnnFusePassTest4(InferencePassTest):
num_filters=3, num_filters=3,
filter_size=3, filter_size=3,
padding="VALID", padding="VALID",
dilation=2,
groups=3, groups=3,
bias_attr=param_attr, bias_attr=param_attr,
use_cudnn=False, use_cudnn=False,
@ -162,9 +151,33 @@ class ConvBiasMkldnnFusePassTest4(InferencePassTest):
self.fetch_list = [conv_out] self.fetch_list = [conv_out]
self.enable_mkldnn = True self.enable_mkldnn = True
class ConvTransposeMkldnnFusePassDialtionsGroupsTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name="data", shape=[-1, 3, 5, 5], dtype="float32")
param_attr = fluid.ParamAttr(
initializer=fluid.initializer.Xavier(uniform=False),
learning_rate=0.001)
conv_out = fluid.layers.conv2d_transpose(
input=data,
num_filters=3,
filter_size=3,
padding="SAME",
dilation=1,
bias_attr=param_attr,
use_cudnn=False)
self.feeds = {"data": np.random.random((1, 3, 5, 5)).astype("float32")}
self.fetch_list = [conv_out]
self.enable_mkldnn = True
def test_check_output(self): def test_check_output(self):
use_gpu = False use_gpu = False
self.check_output_with_option(use_gpu) self.check_output_with_option(use_gpu)
self.assertTrue(
PassVersionChecker.IsCompatible(
"conv_transpose_bias_mkldnn_fuse_pass"))
if __name__ == "__main__": if __name__ == "__main__":

Loading…
Cancel
Save