Add bf16 matmul, fc, elementwise add and mul (#28729)
* Add bf16 matmul, fc, elementwise add and mul * Correct unit testmusl/fix_failed_unittests_in_musl
parent
efc3b182f0
commit
8c0ea4bffe
@ -0,0 +1,60 @@
|
||||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
import unittest
|
||||
import numpy as np
|
||||
import paddle.fluid.core as core
|
||||
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
|
||||
from paddle import enable_static
|
||||
|
||||
|
||||
@unittest.skipIf(not core.supports_bfloat16(),
|
||||
"place does not support BF16 evaluation")
|
||||
class TestElementwiseAddBf16MklDNNOp(OpTest):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_add"
|
||||
self.use_mkldnn = True
|
||||
self.mkldnn_data_type = "bfloat16"
|
||||
self.axis = -1
|
||||
|
||||
self.generate_data()
|
||||
self.inputs = {
|
||||
'X': convert_float_to_uint16(self.x),
|
||||
'Y': convert_float_to_uint16(self.y)
|
||||
}
|
||||
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
|
||||
self.outputs = {'Out': convert_float_to_uint16(self.out)}
|
||||
|
||||
def generate_data(self):
|
||||
self.x = np.random.random(100, ).astype(np.float32)
|
||||
self.y = np.random.random(100, ).astype(np.float32)
|
||||
self.out = np.add(self.x, self.y)
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output_with_place(core.CPUPlace())
|
||||
|
||||
def test_check_grad_normal(self):
|
||||
pass
|
||||
|
||||
def test_check_grad_ingore_x(self):
|
||||
pass
|
||||
|
||||
def test_check_grad_ingore_y(self):
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
enable_static()
|
||||
unittest.main()
|
@ -0,0 +1,60 @@
|
||||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
import unittest
|
||||
import numpy as np
|
||||
import paddle.fluid.core as core
|
||||
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
|
||||
from paddle import enable_static
|
||||
|
||||
|
||||
@unittest.skipIf(not core.supports_bfloat16(),
|
||||
"place does not support BF16 evaluation")
|
||||
class TestElementwiseMulBf16MklDNNOp(OpTest):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_mul"
|
||||
self.use_mkldnn = True
|
||||
self.mkldnn_data_type = "bfloat16"
|
||||
self.axis = -1
|
||||
|
||||
self.generate_data()
|
||||
self.inputs = {
|
||||
'X': convert_float_to_uint16(self.x),
|
||||
'Y': convert_float_to_uint16(self.y)
|
||||
}
|
||||
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
|
||||
self.outputs = {'Out': convert_float_to_uint16(self.out)}
|
||||
|
||||
def generate_data(self):
|
||||
self.x = np.random.random(100, ).astype(np.float32)
|
||||
self.y = np.random.random(100, ).astype(np.float32)
|
||||
self.out = np.multiply(self.x, self.y)
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output_with_place(core.CPUPlace())
|
||||
|
||||
def test_check_grad_normal(self):
|
||||
pass
|
||||
|
||||
def test_check_grad_ingore_x(self):
|
||||
pass
|
||||
|
||||
def test_check_grad_ingore_y(self):
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
enable_static()
|
||||
unittest.main()
|
@ -0,0 +1,85 @@
|
||||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import unittest
|
||||
import numpy as np
|
||||
import paddle.fluid.core as core
|
||||
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
|
||||
from paddle import enable_static
|
||||
|
||||
|
||||
def fully_connected_naive(input, weights, bias_data):
|
||||
result = np.dot(input, weights) + bias_data
|
||||
return result
|
||||
|
||||
|
||||
class MatrixGenerate:
|
||||
def __init__(self, mb, ic, oc, h, w):
|
||||
self.input = np.random.random((mb, ic * h * w)).astype(np.float32)
|
||||
self.weights = np.random.random((ic * h * w, oc)).astype(np.float32)
|
||||
|
||||
|
||||
@unittest.skipIf(not core.supports_bfloat16(),
|
||||
"place does not support BF16 evaluation")
|
||||
class TestFcBf16MklDNNOp(OpTest):
|
||||
def generate_data(self):
|
||||
self.matrix = MatrixGenerate(1, 10, 15, 3, 3)
|
||||
self.bias = np.random.random(15).astype("float32")
|
||||
|
||||
def setUp(self):
|
||||
self.op_type = "fc"
|
||||
self.use_mkldnn = True
|
||||
self.mkldnn_data_type = "bfloat16"
|
||||
self.force_fp32_output = False
|
||||
self.generate_data()
|
||||
|
||||
self.output = fully_connected_naive(self.matrix.input,
|
||||
self.matrix.weights, self.bias)
|
||||
if not self.force_fp32_output:
|
||||
self.output = convert_float_to_uint16(self.output)
|
||||
|
||||
self.inputs = {
|
||||
'Input': convert_float_to_uint16(self.matrix.input),
|
||||
'W': self.matrix.weights,
|
||||
'Bias': self.bias
|
||||
}
|
||||
|
||||
self.attrs = {
|
||||
'use_mkldnn': self.use_mkldnn,
|
||||
'force_fp32_output': self.force_fp32_output
|
||||
}
|
||||
|
||||
self.outputs = {'Out': self.output}
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output_with_place(core.CPUPlace())
|
||||
|
||||
def test_check_grad_normal(self):
|
||||
pass
|
||||
|
||||
def test_check_grad_no_weight(self):
|
||||
pass
|
||||
|
||||
|
||||
class TestFCMKLDNNOp1(TestFcBf16MklDNNOp):
|
||||
def generate_data(self):
|
||||
self.matrix = MatrixGenerate(2, 15, 48, 2, 2)
|
||||
self.bias = np.random.random(48).astype(np.float32)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
enable_static()
|
||||
unittest.main()
|
@ -0,0 +1,121 @@
|
||||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import unittest
|
||||
import os
|
||||
import numpy as np
|
||||
import paddle.fluid.core as core
|
||||
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
|
||||
from paddle import enable_static
|
||||
|
||||
|
||||
@unittest.skipIf(not core.supports_bfloat16(),
|
||||
"place does not support BF16 evaluation")
|
||||
class TestMatmulBf16MklDNNOp(OpTest):
|
||||
def generate_data(self):
|
||||
self.x = np.random.random((25, 2, 2)).astype(np.float32)
|
||||
self.y = np.random.random((25, 2, 2)).astype(np.float32)
|
||||
self.alpha = 1.0
|
||||
self.out = self.alpha * np.matmul(self.x, self.y)
|
||||
|
||||
def set_attributes(self):
|
||||
self.alpha = self.alpha if hasattr(self, 'alpha') else 1.0
|
||||
self.attrs = {
|
||||
'alpha': self.alpha,
|
||||
"use_mkldnn": self.use_mkldnn,
|
||||
"mkldnn_data_type": self.mkldnn_data_type,
|
||||
"force_fp32_output": self.force_fp32_output
|
||||
}
|
||||
|
||||
def setUp(self):
|
||||
self.op_type = "matmul"
|
||||
self.use_mkldnn = True
|
||||
self.dtype = np.uint16
|
||||
self.mkldnn_data_type = "bfloat16"
|
||||
self.force_fp32_output = False
|
||||
self.generate_data()
|
||||
self.set_attributes()
|
||||
|
||||
if not self.force_fp32_output:
|
||||
self.out = convert_float_to_uint16(self.out)
|
||||
self.outputs = {'Out': self.out}
|
||||
|
||||
self.x = convert_float_to_uint16(self.x)
|
||||
self.y = convert_float_to_uint16(self.y)
|
||||
self.inputs = {'X': self.x, 'Y': self.y}
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output_with_place(core.CPUPlace())
|
||||
|
||||
def test_check_grad(self):
|
||||
pass
|
||||
|
||||
|
||||
class TestDnnlMatMulOpAlpha(TestMatmulBf16MklDNNOp):
|
||||
def generate_data(self):
|
||||
self.x = np.random.random((17, 2, 3)).astype(np.float32)
|
||||
self.y = np.random.random((17, 3, 2)).astype(np.float32)
|
||||
self.alpha = 2.0
|
||||
self.out = self.alpha * np.matmul(self.x, self.y)
|
||||
|
||||
|
||||
class TestDnnlMatMulOp2D(TestMatmulBf16MklDNNOp):
|
||||
def generate_data(self):
|
||||
self.x = np.random.random((12, 9)).astype(np.float32)
|
||||
self.y = np.random.random((9, 12)).astype(np.float32)
|
||||
self.out = np.matmul(self.x, self.y)
|
||||
|
||||
|
||||
class TestDnnlMatMulOpTransposeX(TestMatmulBf16MklDNNOp):
|
||||
def generate_data(self):
|
||||
self.x = np.random.random((12, 9)).astype(np.float32)
|
||||
self.y = np.random.random((12, 9)).astype(np.float32)
|
||||
self.out = np.matmul(np.transpose(self.x), self.y)
|
||||
|
||||
def set_attributes(self):
|
||||
self.attrs = {
|
||||
"use_mkldnn": self.use_mkldnn,
|
||||
"mkldnn_data_type": self.mkldnn_data_type,
|
||||
'transpose_X': True
|
||||
}
|
||||
|
||||
|
||||
class TestDnnlMatMulOpTransposeY(TestMatmulBf16MklDNNOp):
|
||||
def generate_data(self):
|
||||
self.x = np.random.random((12, 9)).astype(np.float32)
|
||||
self.y = np.random.random((12, 9)).astype(np.float32)
|
||||
self.out = np.matmul(self.x, np.transpose(self.y))
|
||||
|
||||
def set_attributes(self):
|
||||
self.attrs = {
|
||||
"use_mkldnn": self.use_mkldnn,
|
||||
"mkldnn_data_type": self.mkldnn_data_type,
|
||||
'transpose_Y': True
|
||||
}
|
||||
|
||||
|
||||
class TestMatmulBf16MklDNNForceFp32Output(TestMatmulBf16MklDNNOp):
|
||||
def generate_data(self):
|
||||
self.x = np.random.random((12, 9)).astype(np.float32)
|
||||
self.y = np.random.random((9, 12)).astype(np.float32)
|
||||
self.force_fp32_output = True
|
||||
self.alpha = 0.5
|
||||
self.out = self.alpha * np.matmul(self.x, self.y)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
enable_static()
|
||||
unittest.main()
|
Loading…
Reference in new issue