|
|
|
@ -20,11 +20,11 @@ from inference_pass_test import InferencePassTest
|
|
|
|
|
import paddle.fluid as fluid
|
|
|
|
|
import paddle.fluid.core as core
|
|
|
|
|
from paddle.fluid.core import AnalysisConfig
|
|
|
|
|
"""Test for fusion of conv and bias."""
|
|
|
|
|
from paddle.fluid.core import PassVersionChecker
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#padding SAME
|
|
|
|
|
class ConvBiasMkldnnFusePassTest(InferencePassTest):
|
|
|
|
|
class ConvBiasMkldnnFusePassSamePadTest(InferencePassTest):
|
|
|
|
|
def setUp(self):
|
|
|
|
|
with fluid.program_guard(self.main_program, self.startup_program):
|
|
|
|
|
data = fluid.data(
|
|
|
|
@ -48,10 +48,12 @@ class ConvBiasMkldnnFusePassTest(InferencePassTest):
|
|
|
|
|
def test_check_output(self):
|
|
|
|
|
use_gpu = False
|
|
|
|
|
self.check_output_with_option(use_gpu)
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
PassVersionChecker.IsCompatible("conv_bias_mkldnn_fuse_pass"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#padding VALID
|
|
|
|
|
class ConvBiasMkldnnFusePassTest1(InferencePassTest):
|
|
|
|
|
class ConvBiasMkldnnFusePassValidPadTest(ConvBiasMkldnnFusePassSamePadTest):
|
|
|
|
|
def setUp(self):
|
|
|
|
|
with fluid.program_guard(self.main_program, self.startup_program):
|
|
|
|
|
data = fluid.data(
|
|
|
|
@ -72,13 +74,9 @@ class ConvBiasMkldnnFusePassTest1(InferencePassTest):
|
|
|
|
|
self.fetch_list = [conv_out]
|
|
|
|
|
self.enable_mkldnn = True
|
|
|
|
|
|
|
|
|
|
def test_check_output(self):
|
|
|
|
|
use_gpu = False
|
|
|
|
|
self.check_output_with_option(use_gpu)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#padding number
|
|
|
|
|
class ConvBiasMkldnnFusePassTest2(InferencePassTest):
|
|
|
|
|
#padding EXPLICT NUMBER
|
|
|
|
|
class ConvBiasMkldnnFusePassExplictPadTest(ConvBiasMkldnnFusePassSamePadTest):
|
|
|
|
|
def setUp(self):
|
|
|
|
|
with fluid.program_guard(self.main_program, self.startup_program):
|
|
|
|
|
data = fluid.data(
|
|
|
|
@ -99,13 +97,8 @@ class ConvBiasMkldnnFusePassTest2(InferencePassTest):
|
|
|
|
|
self.fetch_list = [conv_out]
|
|
|
|
|
self.enable_mkldnn = True
|
|
|
|
|
|
|
|
|
|
def test_check_output(self):
|
|
|
|
|
use_gpu = False
|
|
|
|
|
self.check_output_with_option(use_gpu)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#dilation not supported yet, just print warning log and does not fuse
|
|
|
|
|
class ConvBiasMkldnnFusePassTest3(InferencePassTest):
|
|
|
|
|
class ConvBiasMkldnnFusePassGroupTest(ConvBiasMkldnnFusePassSamePadTest):
|
|
|
|
|
def setUp(self):
|
|
|
|
|
with fluid.program_guard(self.main_program, self.startup_program):
|
|
|
|
|
data = fluid.data(
|
|
|
|
@ -118,7 +111,6 @@ class ConvBiasMkldnnFusePassTest3(InferencePassTest):
|
|
|
|
|
num_filters=3,
|
|
|
|
|
filter_size=3,
|
|
|
|
|
padding="VALID",
|
|
|
|
|
dilation=2,
|
|
|
|
|
groups=3,
|
|
|
|
|
bias_attr=param_attr,
|
|
|
|
|
use_cudnn=False,
|
|
|
|
@ -131,13 +123,9 @@ class ConvBiasMkldnnFusePassTest3(InferencePassTest):
|
|
|
|
|
self.fetch_list = [conv_out]
|
|
|
|
|
self.enable_mkldnn = True
|
|
|
|
|
|
|
|
|
|
def test_check_output(self):
|
|
|
|
|
use_gpu = False
|
|
|
|
|
self.check_output_with_option(use_gpu)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#all conv params except for dilation
|
|
|
|
|
class ConvBiasMkldnnFusePassTest4(InferencePassTest):
|
|
|
|
|
class ConvBiasMkldnnFusePassDialtionsGroupsTest(
|
|
|
|
|
ConvBiasMkldnnFusePassSamePadTest):
|
|
|
|
|
def setUp(self):
|
|
|
|
|
with fluid.program_guard(self.main_program, self.startup_program):
|
|
|
|
|
data = fluid.data(
|
|
|
|
@ -150,6 +138,7 @@ class ConvBiasMkldnnFusePassTest4(InferencePassTest):
|
|
|
|
|
num_filters=3,
|
|
|
|
|
filter_size=3,
|
|
|
|
|
padding="VALID",
|
|
|
|
|
dilation=2,
|
|
|
|
|
groups=3,
|
|
|
|
|
bias_attr=param_attr,
|
|
|
|
|
use_cudnn=False,
|
|
|
|
@ -162,9 +151,33 @@ class ConvBiasMkldnnFusePassTest4(InferencePassTest):
|
|
|
|
|
self.fetch_list = [conv_out]
|
|
|
|
|
self.enable_mkldnn = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class ConvTransposeMkldnnFusePassDialtionsGroupsTest(InferencePassTest):
|
|
|
|
|
def setUp(self):
|
|
|
|
|
with fluid.program_guard(self.main_program, self.startup_program):
|
|
|
|
|
data = fluid.data(name="data", shape=[-1, 3, 5, 5], dtype="float32")
|
|
|
|
|
param_attr = fluid.ParamAttr(
|
|
|
|
|
initializer=fluid.initializer.Xavier(uniform=False),
|
|
|
|
|
learning_rate=0.001)
|
|
|
|
|
conv_out = fluid.layers.conv2d_transpose(
|
|
|
|
|
input=data,
|
|
|
|
|
num_filters=3,
|
|
|
|
|
filter_size=3,
|
|
|
|
|
padding="SAME",
|
|
|
|
|
dilation=1,
|
|
|
|
|
bias_attr=param_attr,
|
|
|
|
|
use_cudnn=False)
|
|
|
|
|
|
|
|
|
|
self.feeds = {"data": np.random.random((1, 3, 5, 5)).astype("float32")}
|
|
|
|
|
self.fetch_list = [conv_out]
|
|
|
|
|
self.enable_mkldnn = True
|
|
|
|
|
|
|
|
|
|
def test_check_output(self):
|
|
|
|
|
use_gpu = False
|
|
|
|
|
self.check_output_with_option(use_gpu)
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
PassVersionChecker.IsCompatible(
|
|
|
|
|
"conv_transpose_bias_mkldnn_fuse_pass"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
|