add norm 2.0 api, test=develop (#26465)
* add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=developrevert-26856-strategy_example2
parent
a8b5741fb4
commit
0679678212
@ -0,0 +1,129 @@
|
||||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import unittest
|
||||
import numpy as np
|
||||
import paddle.fluid.core as core
|
||||
from paddle.fluid.op import Operator
|
||||
import paddle.fluid as fluid
|
||||
from op_test import OpTest, _set_use_system_allocator
|
||||
from paddle.fluid.framework import grad_var_name
|
||||
import paddle.fluid as fluid
|
||||
from paddle.fluid import Program, program_guard
|
||||
import paddle
|
||||
|
||||
|
||||
class TestBatchNorm(unittest.TestCase):
|
||||
def test_name(self):
|
||||
places = [fluid.CPUPlace()]
|
||||
if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
|
||||
places.append(fluid.CUDAPlace(0))
|
||||
for p in places:
|
||||
with fluid.dygraph.guard(p):
|
||||
batch_norm1d = paddle.nn.BatchNorm1d(1, name="test")
|
||||
|
||||
def test_error(self):
|
||||
places = [fluid.CPUPlace()]
|
||||
if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
|
||||
places.append(fluid.CUDAPlace(0))
|
||||
for p in places:
|
||||
#paddle.disable_static()
|
||||
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
|
||||
x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32')
|
||||
|
||||
def error1d():
|
||||
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
|
||||
batch_norm1d = paddle.nn.BatchNorm1d(1)
|
||||
batch_norm1d(fluid.dygraph.to_variable(x_data_4))
|
||||
|
||||
def error2d():
|
||||
x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32')
|
||||
batch_norm2d = paddle.nn.BatchNorm2d(1)
|
||||
batch_norm2d(fluid.dygraph.to_variable(x_data_3))
|
||||
|
||||
def error3d():
|
||||
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
|
||||
batch_norm3d = paddle.nn.BatchNorm3d(1)
|
||||
batch_norm3d(fluid.dygraph.to_variable(x_data_4))
|
||||
|
||||
with fluid.dygraph.guard(p):
|
||||
self.assertRaises(ValueError, error1d)
|
||||
self.assertRaises(ValueError, error2d)
|
||||
self.assertRaises(ValueError, error3d)
|
||||
|
||||
def test_dygraph(self):
|
||||
places = [fluid.CPUPlace()]
|
||||
if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
|
||||
places.append(fluid.CUDAPlace(0))
|
||||
for p in places:
|
||||
shape = [4, 10, 4, 4]
|
||||
|
||||
def compute_v1(x, is_test, trainable_statistics):
|
||||
with fluid.dygraph.guard(p):
|
||||
bn = fluid.dygraph.BatchNorm(
|
||||
shape[1],
|
||||
is_test=is_test,
|
||||
trainable_statistics=trainable_statistics)
|
||||
y = bn(fluid.dygraph.to_variable(x))
|
||||
return y.numpy()
|
||||
|
||||
def compute_v2(x):
|
||||
with fluid.dygraph.guard(p):
|
||||
bn = paddle.nn.BatchNorm2d(shape[1])
|
||||
y = bn(fluid.dygraph.to_variable(x))
|
||||
return y.numpy()
|
||||
|
||||
x = np.random.randn(*shape).astype("float32")
|
||||
y1 = compute_v1(x, False, False)
|
||||
y2 = compute_v2(x)
|
||||
self.assertTrue(np.allclose(y1, y2))
|
||||
|
||||
def test_static(self):
|
||||
places = [fluid.CPUPlace()]
|
||||
if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
|
||||
places.append(fluid.CUDAPlace(0))
|
||||
for p in places:
|
||||
exe = fluid.Executor(p)
|
||||
shape = [4, 10, 16, 16]
|
||||
|
||||
def compute_v1(x_np, is_test, trainable_statistics):
|
||||
with program_guard(Program(), Program()):
|
||||
bn = fluid.dygraph.BatchNorm(
|
||||
shape[1],
|
||||
is_test=is_test,
|
||||
trainable_statistics=trainable_statistics)
|
||||
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
|
||||
y = bn(x)
|
||||
exe.run(fluid.default_startup_program())
|
||||
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
|
||||
return r
|
||||
|
||||
def compute_v2(x_np):
|
||||
with program_guard(Program(), Program()):
|
||||
bn = paddle.nn.BatchNorm2d(shape[1])
|
||||
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
|
||||
y = bn(x)
|
||||
exe.run(fluid.default_startup_program())
|
||||
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
|
||||
return r
|
||||
|
||||
x = np.random.randn(*shape).astype("float32")
|
||||
y1 = compute_v1(x, False, False)
|
||||
y2 = compute_v2(x)
|
||||
self.assertTrue(np.allclose(y1, y2))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
@ -0,0 +1,86 @@
|
||||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import unittest
|
||||
import numpy as np
|
||||
import paddle.fluid.core as core
|
||||
from paddle.fluid.op import Operator
|
||||
import paddle.fluid as fluid
|
||||
from op_test import OpTest, _set_use_system_allocator
|
||||
from paddle.fluid.framework import grad_var_name
|
||||
import paddle.fluid as fluid
|
||||
from paddle.fluid import Program, program_guard
|
||||
import paddle
|
||||
|
||||
|
||||
class TestDygraphGroupNormv2(unittest.TestCase):
|
||||
def test_dygraph(self):
|
||||
places = [fluid.CPUPlace()]
|
||||
if core.is_compiled_with_cuda() and core.op_support_gpu("group_norm"):
|
||||
places.append(fluid.CUDAPlace(0))
|
||||
for p in places:
|
||||
shape = [2, 6, 2, 2]
|
||||
|
||||
def compute_v1(x):
|
||||
with fluid.dygraph.guard(p):
|
||||
gn = fluid.dygraph.GroupNorm(channels=2, groups=2)
|
||||
y = gn(fluid.dygraph.to_variable(x))
|
||||
return y.numpy()
|
||||
|
||||
def compute_v2(x):
|
||||
with fluid.dygraph.guard(p):
|
||||
gn = paddle.nn.GroupNorm(num_channels=2, num_groups=2)
|
||||
y = gn(fluid.dygraph.to_variable(x))
|
||||
return y.numpy()
|
||||
|
||||
x = np.random.randn(*shape).astype("float32")
|
||||
y1 = compute_v1(x)
|
||||
y2 = compute_v2(x)
|
||||
self.assertTrue(np.allclose(y1, y2))
|
||||
|
||||
def test_static(self):
|
||||
places = [fluid.CPUPlace()]
|
||||
if core.is_compiled_with_cuda() and core.op_support_gpu("layer_norm"):
|
||||
places.append(fluid.CUDAPlace(0))
|
||||
for p in places:
|
||||
exe = fluid.Executor(p)
|
||||
shape = [2, 6, 2, 2]
|
||||
|
||||
def compute_v1(x_np):
|
||||
with program_guard(Program(), Program()):
|
||||
gn = fluid.dygraph.GroupNorm(channels=2, groups=2)
|
||||
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
|
||||
y = gn(x)
|
||||
exe.run(fluid.default_startup_program())
|
||||
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
|
||||
return r
|
||||
|
||||
def compute_v2(x_np):
|
||||
with program_guard(Program(), Program()):
|
||||
gn = paddle.nn.GroupNorm(num_channels=2, num_groups=2)
|
||||
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
|
||||
y = gn(x)
|
||||
exe.run(fluid.default_startup_program())
|
||||
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
|
||||
return r
|
||||
|
||||
x = np.random.randn(*shape).astype("float32")
|
||||
y1 = compute_v1(x)
|
||||
y2 = compute_v2(x)
|
||||
self.assertTrue(np.allclose(y1, y2))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
@ -0,0 +1,115 @@
|
||||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import unittest
|
||||
import numpy as np
|
||||
import paddle.fluid.core as core
|
||||
from paddle.fluid.op import Operator
|
||||
import paddle.fluid as fluid
|
||||
from op_test import OpTest, _set_use_system_allocator
|
||||
from paddle.fluid.framework import grad_var_name
|
||||
import paddle.fluid as fluid
|
||||
from paddle.fluid import Program, program_guard
|
||||
import paddle
|
||||
|
||||
|
||||
class TestInstanceNorm(unittest.TestCase):
|
||||
def test_error(self):
|
||||
places = [fluid.CPUPlace()]
|
||||
if core.is_compiled_with_cuda() and core.op_support_gpu(
|
||||
"instance_norm"):
|
||||
places.append(fluid.CUDAPlace(0))
|
||||
for p in places:
|
||||
|
||||
def error1d():
|
||||
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
|
||||
instance_norm1d = paddle.nn.InstanceNorm1d(1)
|
||||
instance_norm1d(fluid.dygraph.to_variable(x_data_4))
|
||||
|
||||
def error2d():
|
||||
x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32')
|
||||
instance_norm2d = paddle.nn.InstanceNorm2d(1)
|
||||
instance_norm2d(fluid.dygraph.to_variable(x_data_3))
|
||||
|
||||
def error3d():
|
||||
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
|
||||
instance_norm3d = paddle.nn.BatchNorm3d(1)
|
||||
instance_norm3d(fluid.dygraph.to_variable(x_data_4))
|
||||
|
||||
with fluid.dygraph.guard(p):
|
||||
self.assertRaises(ValueError, error1d)
|
||||
self.assertRaises(ValueError, error2d)
|
||||
self.assertRaises(ValueError, error3d)
|
||||
|
||||
def test_dygraph(self):
|
||||
places = [fluid.CPUPlace()]
|
||||
if core.is_compiled_with_cuda() and core.op_support_gpu(
|
||||
"instance_norm"):
|
||||
places.append(fluid.CUDAPlace(0))
|
||||
for p in places:
|
||||
shape = [4, 10, 4, 4]
|
||||
|
||||
def compute_v1(x):
|
||||
with fluid.dygraph.guard(p):
|
||||
bn = fluid.dygraph.InstanceNorm(shape[1])
|
||||
y = bn(fluid.dygraph.to_variable(x))
|
||||
return y.numpy()
|
||||
|
||||
def compute_v2(x):
|
||||
with fluid.dygraph.guard(p):
|
||||
bn = paddle.nn.InstanceNorm2d(shape[1])
|
||||
y = bn(fluid.dygraph.to_variable(x))
|
||||
return y.numpy()
|
||||
|
||||
x = np.random.randn(*shape).astype("float32")
|
||||
y1 = compute_v1(x)
|
||||
y2 = compute_v2(x)
|
||||
self.assertTrue(np.allclose(y1, y2))
|
||||
|
||||
def test_static(self):
|
||||
places = [fluid.CPUPlace()]
|
||||
if core.is_compiled_with_cuda() and core.op_support_gpu(
|
||||
"instance_norm"):
|
||||
places.append(fluid.CUDAPlace(0))
|
||||
for p in places:
|
||||
exe = fluid.Executor(p)
|
||||
shape = [4, 10, 16, 16]
|
||||
|
||||
def compute_v1(x_np):
|
||||
with program_guard(Program(), Program()):
|
||||
ins = fluid.dygraph.InstanceNorm(shape[1])
|
||||
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
|
||||
y = ins(x)
|
||||
exe.run(fluid.default_startup_program())
|
||||
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
|
||||
return r
|
||||
|
||||
def compute_v2(x_np):
|
||||
with program_guard(Program(), Program()):
|
||||
ins = paddle.nn.InstanceNorm2d(shape[1])
|
||||
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
|
||||
y = ins(x)
|
||||
exe.run(fluid.default_startup_program())
|
||||
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
|
||||
return r
|
||||
|
||||
x = np.random.randn(*shape).astype("float32")
|
||||
y1 = compute_v1(x)
|
||||
y2 = compute_v2(x)
|
||||
self.assertTrue(np.allclose(y1, y2))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
@ -0,0 +1,86 @@
|
||||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import unittest
|
||||
import numpy as np
|
||||
import paddle.fluid.core as core
|
||||
from paddle.fluid.op import Operator
|
||||
import paddle.fluid as fluid
|
||||
from op_test import OpTest, _set_use_system_allocator
|
||||
from paddle.fluid.framework import grad_var_name
|
||||
import paddle.fluid as fluid
|
||||
from paddle.fluid import Program, program_guard
|
||||
import paddle
|
||||
|
||||
|
||||
class TestDygraphLayerNormv2(unittest.TestCase):
|
||||
def test_dygraph(self):
|
||||
places = [fluid.CPUPlace()]
|
||||
if core.is_compiled_with_cuda() and core.op_support_gpu("layer_norm"):
|
||||
places.append(fluid.CUDAPlace(0))
|
||||
for p in places:
|
||||
shape = [4, 10, 4, 4]
|
||||
|
||||
def compute_v1(x):
|
||||
with fluid.dygraph.guard(p):
|
||||
ln = fluid.dygraph.LayerNorm(shape[1:])
|
||||
y = ln(fluid.dygraph.to_variable(x))
|
||||
return y.numpy()
|
||||
|
||||
def compute_v2(x):
|
||||
with fluid.dygraph.guard(p):
|
||||
ln = paddle.nn.LayerNorm(shape[1:])
|
||||
y = ln(fluid.dygraph.to_variable(x))
|
||||
return y.numpy()
|
||||
|
||||
x = np.random.randn(*shape).astype("float32")
|
||||
y1 = compute_v1(x)
|
||||
y2 = compute_v2(x)
|
||||
self.assertTrue(np.allclose(y1, y2))
|
||||
|
||||
def test_static(self):
|
||||
places = [fluid.CPUPlace()]
|
||||
if core.is_compiled_with_cuda() and core.op_support_gpu("layer_norm"):
|
||||
places.append(fluid.CUDAPlace(0))
|
||||
for p in places:
|
||||
exe = fluid.Executor(p)
|
||||
shape = [4, 10, 16, 16]
|
||||
|
||||
def compute_v1(x_np):
|
||||
with program_guard(Program(), Program()):
|
||||
ln = fluid.dygraph.LayerNorm(shape[1:])
|
||||
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
|
||||
y = ln(x)
|
||||
exe.run(fluid.default_startup_program())
|
||||
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
|
||||
return r
|
||||
|
||||
def compute_v2(x_np):
|
||||
with program_guard(Program(), Program()):
|
||||
ln = paddle.nn.LayerNorm(shape[1:])
|
||||
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
|
||||
y = ln(x)
|
||||
exe.run(fluid.default_startup_program())
|
||||
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
|
||||
return r
|
||||
|
||||
x = np.random.randn(*shape).astype("float32")
|
||||
y1 = compute_v1(x)
|
||||
y2 = compute_v2(x)
|
||||
self.assertTrue(np.allclose(y1, y2))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Loading…
Reference in new issue