Update 2.0 convolution api (#26491)

* update Conv2d Conv3d conv2d conv3d api
test_feature_precision_test_c
LielinJiang 5 years ago committed by GitHub
parent 7c42f056e2
commit 3a9417f4f7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -26,7 +26,7 @@ import paddle.fluid as fluid
import paddle.fluid.dygraph as dygraph
from paddle.fluid import core
from paddle.fluid.optimizer import SGDOptimizer
from paddle.nn import Conv2D, Pool2D, Linear, SyncBatchNorm
from paddle.nn import Conv2d, Pool2D, Linear, SyncBatchNorm
from paddle.fluid.dygraph.base import to_variable
from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase
@ -42,26 +42,24 @@ class TestLayer(fluid.dygraph.Layer):
act=None):
super(TestLayer, self).__init__()
self._conv = Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
self._conv = Conv2d(
in_channels=num_channels,
out_channels=num_filters,
kernel_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
bias_attr=False)
self._sync_batch_norm = SyncBatchNorm(num_filters)
self._conv2 = Conv2D(
num_channels=num_filters,
num_filters=num_filters,
filter_size=filter_size,
self._conv2 = Conv2d(
in_channels=num_filters,
out_channels=num_filters,
kernel_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
bias_attr=False)
self._sync_batch_norm2 = SyncBatchNorm(

@ -20,6 +20,10 @@ import paddle.fluid.initializer as I
import unittest
def _reverse_repeat_list(t, n):
return list(x for x in reversed(t) for _ in range(n))
class Conv2DTestCase(unittest.TestCase):
def __init__(self,
methodName='runTest',
@ -29,12 +33,11 @@ class Conv2DTestCase(unittest.TestCase):
num_filters=8,
filter_size=3,
padding=0,
padding_mode='zeros',
stride=1,
dilation=1,
groups=1,
act=None,
no_bias=False,
use_cudnn=True,
data_format="NCHW",
dtype="float32"):
super(Conv2DTestCase, self).__init__(methodName)
@ -45,12 +48,16 @@ class Conv2DTestCase(unittest.TestCase):
self.filter_size = filter_size
self.padding = padding
if padding_mode in {'reflect', 'replicate', 'circular'}:
_paired_padding = fluid.layers.utils.convert_to_list(padding, 2,
'padding')
self._reversed_padding_repeated_twice = _reverse_repeat_list(
_paired_padding, 2)
self.padding_mode = padding_mode
self.stride = stride
self.dilation = dilation
self.groups = groups
self.act = act
self.no_bias = no_bias
self.use_cudnn = use_cudnn
self.data_format = data_format
self.dtype = dtype
@ -91,19 +98,27 @@ class Conv2DTestCase(unittest.TestCase):
bias_attr = False
else:
bias_attr = I.NumpyArrayInitializer(self.bias)
if self.padding_mode != 'zeros':
x_var = F.pad(x_var,
self._reversed_padding_repeated_twice,
mode=self.padding_mode,
data_format=self.data_format)
padding = 0
else:
padding = self.padding
y_var = fluid.layers.conv2d(
x_var,
self.num_filters,
self.filter_size,
padding=self.padding,
padding=padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
param_attr=weight_attr,
bias_attr=bias_attr,
use_cudnn=self.use_cudnn,
act=self.act,
data_format=self.data_format)
feed_dict = {"input": self.input}
exe = fluid.Executor(place)
exe.run(start)
@ -122,16 +137,24 @@ class Conv2DTestCase(unittest.TestCase):
"weight", self.weight_shape, dtype=self.dtype)
b_var = fluid.data(
"bias", (self.num_filters, ), dtype=self.dtype)
if self.padding_mode != 'zeros':
x_var = F.pad(x_var,
self._reversed_padding_repeated_twice,
mode=self.padding_mode,
data_format=self.data_format)
padding = 0
else:
padding = self.padding
y_var = F.conv2d(
x_var,
w_var,
b_var if not self.no_bias else None,
padding=self.padding,
padding=padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
use_cudnn=self.use_cudnn,
data_format=self.data_format)
feed_dict = {"input": self.input, "weight": self.weight}
if self.bias is not None:
@ -143,18 +166,16 @@ class Conv2DTestCase(unittest.TestCase):
def paddle_nn_layer(self):
x_var = dg.to_variable(self.input)
conv = nn.Conv2D(
conv = nn.Conv2d(
self.num_channels,
self.num_filters,
self.filter_size,
padding=self.padding,
padding_mode=self.padding_mode,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
use_cudnn=self.use_cudnn,
data_format=self.data_format,
dtype=self.dtype)
data_format=self.data_format)
conv.weight.set_value(self.weight)
if not self.no_bias:
conv.bias.set_value(self.bias)
@ -198,7 +219,7 @@ def add_cases(suite):
methodName='runTest', stride=2, dilation=(2, 1)))
suite.addTest(
Conv2DTestCase(
methodName='runTest', padding="same", no_bias=True, act="sigmoid"))
methodName='runTest', padding="same", no_bias=True))
suite.addTest(
Conv2DTestCase(
methodName='runTest', filter_size=(3, 3), padding='valid'))
@ -222,15 +243,28 @@ def add_cases(suite):
num_filters=6,
num_channels=3,
groups=3,
use_cudnn=False,
act="sigmoid",
padding="valid"))
suite.addTest(
Conv2DTestCase(
methodName='runTest',
filter_size=(3, 3),
padding=1,
padding_mode='reflect'))
suite.addTest(
Conv2DTestCase(
methodName='runTest',
filter_size=(3, 3),
padding=1,
padding_mode='replicate'))
suite.addTest(
Conv2DTestCase(
methodName='runTest',
filter_size=(3, 3),
padding=1,
padding_mode='circular'))
def add_error_cases(suite):
suite.addTest(
Conv2DErrorTestCase(
methodName='runTest', use_cudnn="not_valid"))
suite.addTest(
Conv2DErrorTestCase(
methodName='runTest', num_channels=5, groups=2))

@ -32,9 +32,7 @@ class Conv3DTestCase(unittest.TestCase):
stride=1,
dilation=1,
groups=1,
act=None,
no_bias=False,
use_cudnn=True,
data_format="NCDHW",
dtype="float32"):
super(Conv3DTestCase, self).__init__(methodName)
@ -48,9 +46,7 @@ class Conv3DTestCase(unittest.TestCase):
self.stride = stride
self.dilation = dilation
self.groups = groups
self.act = act
self.no_bias = no_bias
self.use_cudnn = use_cudnn
self.data_format = data_format
self.dtype = dtype
@ -101,8 +97,6 @@ class Conv3DTestCase(unittest.TestCase):
groups=self.groups,
param_attr=weight_attr,
bias_attr=bias_attr,
use_cudnn=self.use_cudnn,
act=self.act,
data_format=self.data_format)
feed_dict = {"input": self.input}
exe = fluid.Executor(place)
@ -130,8 +124,6 @@ class Conv3DTestCase(unittest.TestCase):
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
use_cudnn=self.use_cudnn,
data_format=self.data_format)
feed_dict = {"input": self.input, "weight": self.weight}
if self.bias is not None:
@ -143,7 +135,7 @@ class Conv3DTestCase(unittest.TestCase):
def paddle_nn_layer(self):
x_var = dg.to_variable(self.input)
conv = nn.Conv3D(
conv = nn.Conv3d(
self.num_channels,
self.num_filters,
self.filter_size,
@ -151,10 +143,7 @@ class Conv3DTestCase(unittest.TestCase):
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
use_cudnn=self.use_cudnn,
data_format=self.data_format,
dtype=self.dtype)
data_format=self.data_format)
conv.weight.set_value(self.weight)
if not self.no_bias:
conv.bias.set_value(self.bias)
@ -225,15 +214,10 @@ def add_cases(suite):
num_filters=6,
num_channels=3,
groups=3,
use_cudnn=False,
act="sigmoid",
padding="valid"))
def add_error_cases(suite):
suite.addTest(
Conv3DErrorTestCase(
methodName='runTest', use_cudnn="not_valid"))
suite.addTest(
Conv3DErrorTestCase(
methodName='runTest', num_channels=5, groups=2))

@ -117,7 +117,7 @@ class TestDygraphWeightNorm(unittest.TestCase):
def test_check_output(self):
fluid.enable_imperative()
linear = paddle.nn.Conv2D(2, 3, 3)
linear = paddle.nn.Conv2d(2, 3, 3)
before_weight = linear.weight.numpy()
if self.dim == None:
self.dim = -1
@ -169,7 +169,7 @@ class TestDygraphRemoveWeightNorm(unittest.TestCase):
def test_check_output(self):
fluid.enable_imperative()
linear = paddle.nn.Conv2D(2, 3, 3)
linear = paddle.nn.Conv2d(2, 3, 3)
before_weight = linear.weight
wn = weight_norm(linear, dim=self.dim)
rwn = remove_weight_norm(linear)

@ -37,7 +37,6 @@ class TestFunctionalConv2D(TestCase):
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
def prepare(self):
@ -88,7 +87,6 @@ class TestFunctionalConv2D(TestCase):
param_attr=I.NumpyArrayInitializer(self.weight),
bias_attr=False
if self.no_bias else I.NumpyArrayInitializer(self.bias),
use_cudnn=self.use_cudnn,
act=self.act,
data_format=self.data_format)
exe = fluid.Executor(self.place)
@ -121,9 +119,11 @@ class TestFunctionalConv2D(TestCase):
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
data_format=self.data_format,
use_cudnn=self.use_cudnn)
data_format=self.data_format)
if self.act == 'sigmoid':
y = F.sigmoid(y)
exe = fluid.Executor(self.place)
exe.run(start)
feed_dict = {"input": self.input, "weight": self.weight}
@ -144,10 +144,12 @@ class TestFunctionalConv2D(TestCase):
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
act=self.act,
groups=self.groups,
data_format=self.data_format,
use_cudnn=self.use_cudnn)
data_format=self.data_format)
if self.act == 'sigmoid':
y = F.sigmoid(y)
out = y.numpy()
return out
@ -185,7 +187,6 @@ class TestFunctionalConv2DError(TestCase):
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
def test_exception(self):
@ -228,9 +229,7 @@ class TestFunctionalConv2DError(TestCase):
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
data_format=self.data_format,
use_cudnn=self.use_cudnn)
data_format=self.data_format)
class TestFunctionalConv2DCase2(TestFunctionalConv2D):
@ -383,21 +382,6 @@ class TestFunctionalConv2DErrorCase4(TestFunctionalConv2DError):
self.data_format = "NCHW"
class TestFunctionalConv2DErrorCase6(TestFunctionalConv2DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = "not_valid"
self.data_format = "NCHW"
class TestFunctionalConv2DErrorCase7(TestFunctionalConv2DError):
def setUp(self):
self.in_channels = 3

@ -37,7 +37,6 @@ class TestFunctionalConv3D(TestCase):
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
def prepare(self):
@ -88,7 +87,6 @@ class TestFunctionalConv3D(TestCase):
param_attr=I.NumpyArrayInitializer(self.weight),
bias_attr=False
if self.no_bias else I.NumpyArrayInitializer(self.bias),
use_cudnn=self.use_cudnn,
act=self.act,
data_format=self.data_format)
exe = fluid.Executor(self.place)
@ -121,9 +119,11 @@ class TestFunctionalConv3D(TestCase):
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
data_format=self.data_format,
use_cudnn=self.use_cudnn)
data_format=self.data_format)
if self.act == 'sigmoid':
y = F.sigmoid(y)
exe = fluid.Executor(self.place)
exe.run(start)
feed_dict = {"input": self.input, "weight": self.weight}
@ -144,10 +144,12 @@ class TestFunctionalConv3D(TestCase):
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
act=self.act,
groups=self.groups,
data_format=self.data_format,
use_cudnn=self.use_cudnn)
data_format=self.data_format)
if self.act == 'sigmoid':
y = F.sigmoid(y)
out = y.numpy()
return out
@ -185,7 +187,6 @@ class TestFunctionalConv3DError(TestCase):
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
def test_exception(self):
@ -228,9 +229,10 @@ class TestFunctionalConv3DError(TestCase):
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
data_format=self.data_format,
use_cudnn=self.use_cudnn)
data_format=self.data_format)
if self.act == 'sigmoid':
y = F.sigmoid(y)
class TestFunctionalConv3DCase2(TestFunctionalConv3D):
@ -244,7 +246,6 @@ class TestFunctionalConv3DCase2(TestFunctionalConv3D):
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
@ -259,7 +260,6 @@ class TestFunctionalConv3DCase3(TestFunctionalConv3D):
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
@ -274,7 +274,6 @@ class TestFunctionalConv3DCase4(TestFunctionalConv3D):
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
@ -289,7 +288,6 @@ class TestFunctionalConv3DCase5(TestFunctionalConv3D):
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
@ -304,7 +302,6 @@ class TestFunctionalConv3DCase6(TestFunctionalConv3D):
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCDHW"
@ -319,7 +316,6 @@ class TestFunctionalConv3DCase7(TestFunctionalConv3D):
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCDHW"
@ -349,7 +345,6 @@ class TestFunctionalConv3DErrorCase2(TestFunctionalConv3DError):
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = False
self.data_format = "NCDHW"
@ -364,7 +359,6 @@ class TestFunctionalConv3DErrorCase3(TestFunctionalConv3DError):
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = False
self.data_format = "not_valid"
@ -379,22 +373,6 @@ class TestFunctionalConv3DErrorCase4(TestFunctionalConv3DError):
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = False
self.data_format = "NCDHW"
class TestFunctionalConv3DErrorCase6(TestFunctionalConv3DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = "not_valid"
self.data_format = "NCDHW"
@ -409,7 +387,6 @@ class TestFunctionalConv3DErrorCase7(TestFunctionalConv3DError):
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "not_valid"
@ -424,7 +401,6 @@ class TestFunctionalConv3DErrorCase8(TestFunctionalConv3DError):
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCDHW"
@ -439,7 +415,6 @@ class TestFunctionalConv3DErrorCase9(TestFunctionalConv3DError):
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = False
self.data_format = "NCDHW"
@ -454,7 +429,6 @@ class TestFunctionalConv3DErrorCase10(TestFunctionalConv3DError):
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = False
self.data_format = "NDHWC"

@ -28,11 +28,11 @@ class LeNetDygraph(fluid.dygraph.Layer):
super(LeNetDygraph, self).__init__()
self.num_classes = num_classes
self.features = nn.Sequential(
nn.Conv2D(
nn.Conv2d(
1, 6, 3, stride=1, padding=1),
nn.ReLU(),
nn.Pool2D(2, 'max', 2),
nn.Conv2D(
nn.Conv2d(
6, 16, 5, stride=1, padding=0),
nn.ReLU(),
nn.Pool2D(2, 'max', 2))
@ -61,7 +61,7 @@ def init_weights(layer):
new_bias = paddle.fill_constant(
layer.bias.shape, layer.bias.dtype, value=-0.1)
layer.bias.set_value(new_bias)
elif type(layer) == nn.Conv2D:
elif type(layer) == nn.Conv2d:
new_weight = paddle.fill_constant(
layer.weight.shape, layer.weight.dtype, value=0.7)
layer.weight.set_value(new_weight)
@ -81,7 +81,7 @@ class TestLayerApply(unittest.TestCase):
if type(layer) == nn.Linear:
np.testing.assert_allclose(layer.weight.numpy(), 0.9)
np.testing.assert_allclose(layer.bias.numpy(), -0.1)
elif type(layer) == nn.Conv2D:
elif type(layer) == nn.Conv2d:
np.testing.assert_allclose(layer.weight.numpy(), 0.7)
np.testing.assert_allclose(layer.bias.numpy(), -0.2)

@ -27,11 +27,11 @@ class LeNetDygraph(fluid.dygraph.Layer):
def __init__(self):
super(LeNetDygraph, self).__init__()
self.features = nn.Sequential(
nn.Conv2D(
nn.Conv2d(
1, 6, 3, stride=1, padding=1),
nn.ReLU(),
nn.Pool2D(2, 'max', 2),
nn.Conv2D(
nn.Conv2d(
6, 16, 5, stride=1, padding=0),
nn.ReLU(),
nn.Pool2D(2, 'max', 2))

@ -26,7 +26,7 @@ paddle.manual_seed(SEED)
class Generator(fluid.dygraph.Layer):
def __init__(self):
super(Generator, self).__init__()
self.conv1 = paddle.nn.Conv2D(3, 3, 3, 1)
self.conv1 = paddle.nn.Conv2d(3, 3, 3, padding=1)
def forward(self, x):
x = self.conv1(x)
@ -37,7 +37,7 @@ class Generator(fluid.dygraph.Layer):
class Discriminator(fluid.dygraph.Layer):
def __init__(self):
super(Discriminator, self).__init__()
self.convd = paddle.nn.Conv2D(6, 3, 1)
self.convd = paddle.nn.Conv2d(6, 3, 1)
def forward(self, x):
x = self.convd(x)

@ -23,7 +23,7 @@ import shutil
import tempfile
from paddle import fluid
from paddle.nn import Conv2D, Pool2D, Linear, ReLU, Sequential
from paddle.nn import Conv2d, Pool2D, Linear, ReLU, Sequential
from paddle.fluid.dygraph.base import to_variable
import paddle.incubate.hapi as hapi
@ -40,11 +40,11 @@ class LeNetDygraph(fluid.dygraph.Layer):
super(LeNetDygraph, self).__init__()
self.num_classes = num_classes
self.features = Sequential(
Conv2D(
Conv2d(
1, 6, 3, stride=1, padding=1),
ReLU(),
Pool2D(2, 'max', 2),
Conv2D(
Conv2d(
6, 16, 5, stride=1, padding=0),
ReLU(),
Pool2D(2, 'max', 2))

@ -22,7 +22,7 @@ import shutil
import tempfile
from paddle import fluid
from paddle.nn import Conv2D, Pool2D, Linear, ReLU, Sequential
from paddle.nn import Conv2d, Pool2D, Linear, ReLU, Sequential
from paddle.incubate.hapi.utils import uncombined_weight_to_state_dict
@ -32,11 +32,11 @@ class LeNetDygraph(fluid.dygraph.Layer):
super(LeNetDygraph, self).__init__()
self.num_classes = num_classes
self.features = Sequential(
Conv2D(
Conv2d(
1, 6, 3, stride=1, padding=1),
ReLU(),
Pool2D(2, 'max', 2),
Conv2D(
Conv2d(
6, 16, 5, stride=1, padding=0),
ReLU(),
Pool2D(2, 'max', 2))

@ -13,7 +13,7 @@
#limitations under the License.
import paddle.fluid as fluid
from paddle.nn import Conv2D, Pool2D, Linear, ReLU, Sequential
from paddle.nn import Conv2d, Pool2D, Linear, ReLU, Sequential
__all__ = ['LeNet']
@ -39,11 +39,11 @@ class LeNet(fluid.dygraph.Layer):
super(LeNet, self).__init__()
self.num_classes = num_classes
self.features = Sequential(
Conv2D(
Conv2d(
1, 6, 3, stride=1, padding=1),
ReLU(),
Pool2D(2, 'max', 2),
Conv2D(
Conv2d(
6, 16, 5, stride=1, padding=0),
ReLU(),
Pool2D(2, 'max', 2))

@ -13,7 +13,7 @@
# limitations under the License.
import paddle.fluid as fluid
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
from paddle.nn import Conv2d, Pool2D, BatchNorm, Linear, ReLU
from paddle.fluid.dygraph.container import Sequential
from ...download import get_weights_path_from_url
@ -105,12 +105,11 @@ def make_layers(cfg, batch_norm=False):
layers += [Pool2D(pool_size=2, pool_stride=2)]
else:
if batch_norm:
conv2d = Conv2D(in_channels, v, filter_size=3, padding=1)
layers += [conv2d, BatchNorm(v, act='relu')]
conv2d = Conv2d(in_channels, v, kernel_size=3, padding=1)
layers += [conv2d, BatchNorm(v), ReLU()]
else:
conv2d = Conv2D(
in_channels, v, filter_size=3, padding=1, act='relu')
layers += [conv2d]
conv2d = Conv2d(in_channels, v, kernel_size=3, padding=1)
layers += [conv2d, ReLU()]
in_channels = v
return Sequential(*layers)

@ -93,9 +93,9 @@ from .layer.common import Dropout2D #DEFINE_ALIAS
from .layer.common import Dropout3D #DEFINE_ALIAS
from .layer.pooling import AdaptiveAvgPool2d #DEFINE_ALIAS
from .layer.pooling import AdaptiveAvgPool3d #DEFINE_ALIAS
from .layer.conv import Conv2D #DEFINE_ALIAS
from .layer.conv import Conv2d #DEFINE_ALIAS
from .layer.conv import Conv3d #DEFINE_ALIAS
from .layer.conv import ConvTranspose2d #DEFINE_ALIAS
from .layer.conv import Conv3D #DEFINE_ALIAS
from .layer.conv import ConvTranspose3d #DEFINE_ALIAS
# from .layer.conv import TreeConv #DEFINE_ALIAS
# from .layer.conv import Conv1D #DEFINE_ALIAS

File diff suppressed because it is too large Load Diff

@ -57,9 +57,9 @@ from .common import Dropout2D #DEFINE_ALIAS
from .common import Dropout3D #DEFINE_ALIAS
from .pooling import AdaptiveAvgPool2d #DEFINE_ALIAS
from .pooling import AdaptiveAvgPool3d #DEFINE_ALIAS
from .conv import Conv2D #DEFINE_ALIAS
from .conv import Conv2d #DEFINE_ALIAS
from .conv import Conv3d #DEFINE_ALIAS
from .conv import ConvTranspose2d #DEFINE_ALIAS
from .conv import Conv3D #DEFINE_ALIAS
from .conv import ConvTranspose3d #DEFINE_ALIAS
# from .conv import TreeConv #DEFINE_ALIAS
# from .conv import Conv1D #DEFINE_ALIAS

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save