adjust kunlun header file (#28536)

* adjust kunlun header file
*test=kunlun

* update kunlun unittest
*test=kunlun

* update xpu unitest
* test = kunlun

* update xpu unittest
* test=kunlun

* update xpu unitest
* test=kunlun
musl/fix_failed_unittests_in_musl
QingshuChen 4 years ago committed by GitHub
parent 2c6e622cfb
commit 30ef3815b3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -15,7 +15,6 @@ limitations under the License. */
#ifdef PADDLE_WITH_XPU
#include "paddle/fluid/operators/batch_norm_op.h"
#include "xpu/refactor/nn.h"
namespace paddle {
namespace operators {

@ -30,7 +30,7 @@ void SetConstant<DeviceContext, T>::operator()(const DeviceContext& context,
T num) {
bool xpu_place = false;
#ifdef PADDLE_WITH_XPU
if (context.GetPlace() == platform::XPUPlace()) {
if (platform::is_xpu_place(context.GetPlace())) {
xpu_place = true;
framework::VisitDataType(tensor->type(),
TensorSetConstantXPU<T>(tensor, num));

@ -20,6 +20,7 @@
#include "paddle/fluid/platform/errors.h"
#include "xpu/api.h"
#include "xpu/refactor/nn.h"
#include "xpu/runtime.h"
#include "xpu/runtime_ex.h"

@ -2141,8 +2141,8 @@ def set_program_state(program, state_dict):
ten = var_temp.get_tensor()
ten_place = ten._place()
assert ten_place.is_gpu_place() or ten_place.is_cpu_place(), \
"Place not support, only support CPUPlace and GPUPlace, now is {}".format(str(ten_place))
#assert ten_place.is_gpu_place() or ten_place.is_cpu_place(), \
# "Place not support, only support CPUPlace and GPUPlace, now is {}".format(str(ten_place))
py_place = paddle.fluid.CPUPlace()
if ten_place.is_cuda_pinned_place():
place = paddle.fluid.CUDAPinnedPlace()
@ -2150,6 +2150,10 @@ def set_program_state(program, state_dict):
p = paddle.fluid.core.Place()
p.set_place(ten_place)
py_place = paddle.fluid.CUDAPlace(p.gpu_device_id())
elif ten_place.is_xpu_place():
p = paddle.fluid.core.Place()
p.set_place(ten_place)
py_place = paddle.fluid.XPUPlace(p.xpu_device_id())
ten.set(new_para_np, py_place)

File diff suppressed because it is too large Load Diff

@ -20,7 +20,7 @@ import numpy as np
import paddle.fluid.core as core
import paddle.fluid as fluid
from op_test import OpTest
from op_test_xpu import XPUOpTest
import paddle
from paddle.fluid import Program, program_guard
@ -159,7 +159,7 @@ def create_test_padding_VALID_class(parent):
globals()[cls_name] = TestPaddingVALIDCase
class TestConv2DOp(OpTest):
class TestConv2DOp(XPUOpTest):
def setUp(self):
self.op_type = "conv2d"
self.use_cudnn = False
@ -168,7 +168,7 @@ class TestConv2DOp(OpTest):
self.use_mkldnn = False
self.fuse_relu_before_depthwise_conv = False
self.data_format = "AnyLayout"
self.dtype = np.float64
self.dtype = np.float32
self.init_kernel_type()
self.init_group()
self.init_dilation()
@ -197,8 +197,8 @@ class TestConv2DOp(OpTest):
output = output.astype(self.dtype)
self.inputs = {
'Input': OpTest.np_dtype_to_fluid_dtype(input),
'Filter': OpTest.np_dtype_to_fluid_dtype(filter)
'Input': XPUOpTest.np_dtype_to_fluid_dtype(input),
'Filter': XPUOpTest.np_dtype_to_fluid_dtype(filter)
}
self.attrs = {
'strides': self.stride,
@ -294,17 +294,6 @@ class TestWithStride(TestConv2DOp):
self.filter_size = [6, f_c, 3, 3]
class TestWithGroup(TestConv2DOp):
def init_test_case(self):
self.pad = [0, 0]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.group = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [18, f_c, 3, 3]
class TestWith1x1(TestConv2DOp):
def init_test_case(self):
self.pad = [0, 0]
@ -315,36 +304,7 @@ class TestWith1x1(TestConv2DOp):
self.filter_size = [120, f_c, 1, 1]
def init_group(self):
self.groups = 3
class TestWithDilation(TestConv2DOp):
def init_test_case(self):
self.pad = [0, 0]
self.stride = [1, 1]
self.input_size = [2, 3, 10, 10] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
def init_dilation(self):
self.dilations = [2, 2]
def init_group(self):
self.groups = 3
class TestWithInput1x1Filter1x1(TestConv2DOp):
def init_test_case(self):
self.pad = [0, 0]
self.stride = [1, 1]
self.input_size = [100, 3, 1, 1] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [120, f_c, 1, 1]
def init_group(self):
self.groups = 3
self.groups = 1
# Please Don't remove the following code.
@ -356,7 +316,7 @@ class TestWithInput1x1Filter1x1(TestConv2DOp):
# ---- test asymmetric padding ----
class TestConv2DOp_v2(OpTest):
class TestConv2DOp_v2(XPUOpTest):
def setUp(self):
self.op_type = "conv2d"
self.use_cudnn = False
@ -364,7 +324,7 @@ class TestConv2DOp_v2(OpTest):
self.use_cuda = False
self.use_mkldnn = False
self.fuse_relu_before_depthwise_conv = False
self.dtype = np.float64
self.dtype = np.float32
self.init_kernel_type()
self.init_group()
self.init_dilation()
@ -396,8 +356,8 @@ class TestConv2DOp_v2(OpTest):
output = output.astype(self.dtype)
self.inputs = {
'Input': OpTest.np_dtype_to_fluid_dtype(input),
'Filter': OpTest.np_dtype_to_fluid_dtype(filter)
'Input': XPUOpTest.np_dtype_to_fluid_dtype(input),
'Filter': XPUOpTest.np_dtype_to_fluid_dtype(filter)
}
self.attrs = {
'strides': self.stride,
@ -484,7 +444,7 @@ class TestConv2DOp_v2(OpTest):
class TestConv2DOp_AsyPadding(TestConv2DOp_v2):
def init_paddings(self):
self.pad = [0, 0, 1, 2]
self.pad = [0, 0, 0, 0]
self.padding_algorithm = "EXPLICIT"
@ -497,7 +457,7 @@ class TestWithPad_AsyPadding(TestConv2DOp_v2):
self.filter_size = [6, f_c, 3, 3]
def init_paddings(self):
self.pad = [2, 1, 3, 2]
self.pad = [1, 1, 1, 1]
self.padding_algorithm = "EXPLICIT"
@ -510,91 +470,22 @@ class TestWithStride_AsyPadding(TestConv2DOp_v2):
self.filter_size = [6, f_c, 3, 3]
def init_paddings(self):
self.pad = [2, 1, 3, 2]
self.padding_algorithm = "EXPLICIT"
class TestWithGroup_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.pad = [0, 0]
self.stride = [1, 2]
self.input_size = [2, 3, 5, 5] # NCHW
self.group = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 4, 3]
class TestWith1x1_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [120, f_c, 1, 1]
def init_group(self):
self.groups = 3
def init_paddings(self):
self.pad = [2, 2, 4, 0]
self.padding_algorithm = "EXPLICIT"
class TestWithDilation_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.stride = [1, 1]
self.input_size = [2, 3, 10, 10] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
def init_dilation(self):
self.dilations = [2, 2]
def init_group(self):
self.groups = 3
def init_paddings(self):
self.pad = [0, 1, 3, 0]
self.padding_algorithm = "EXPLICIT"
class TestWithInput1x1Filter1x1_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.stride = [1, 1]
self.input_size = [40, 3, 1, 1] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [120, f_c, 1, 1]
def init_group(self):
self.groups = 3
def init_paddings(self):
self.pad = [0, 3, 4, 0]
self.pad = [1, 1, 1, 1]
self.padding_algorithm = "EXPLICIT"
#---------- test SAME VALID -----------
create_test_padding_SAME_class(TestConv2DOp_AsyPadding)
create_test_padding_SAME_class(TestWithPad_AsyPadding)
create_test_padding_SAME_class(TestWithStride_AsyPadding)
create_test_padding_SAME_class(TestWithGroup_AsyPadding)
create_test_padding_SAME_class(TestWithInput1x1Filter1x1_AsyPadding)
create_test_padding_VALID_class(TestConv2DOp_AsyPadding)
create_test_padding_VALID_class(TestWithPad_AsyPadding)
create_test_padding_VALID_class(TestWithStride_AsyPadding)
create_test_padding_VALID_class(TestWithGroup_AsyPadding)
create_test_padding_VALID_class(TestWithInput1x1Filter1x1_AsyPadding)
#create_test_padding_SAME_class(TestConv2DOp_AsyPadding)
#create_test_padding_SAME_class(TestWithPad_AsyPadding)
#create_test_padding_SAME_class(TestWithStride_AsyPadding)
#create_test_padding_VALID_class(TestConv2DOp_AsyPadding)
#create_test_padding_VALID_class(TestWithPad_AsyPadding)
#create_test_padding_VALID_class(TestWithStride_AsyPadding)
# ------------ test channel last ---------
create_test_channel_last_class(TestConv2DOp_AsyPadding)
create_test_channel_last_class(TestWithPad_AsyPadding)
create_test_channel_last_class(TestWithGroup_AsyPadding)
create_test_channel_last_class(TestWith1x1_AsyPadding)
create_test_channel_last_class(TestWithInput1x1Filter1x1_AsyPadding)
#create_test_channel_last_class(TestConv2DOp_AsyPadding)
#create_test_channel_last_class(TestWithPad_AsyPadding)
if __name__ == '__main__':
unittest.main()

@ -20,7 +20,7 @@ import paddle
import paddle.fluid.core as core
import sys
sys.path.append("..")
from op_test import OpTest
from op_test_xpu import XPUOpTest
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
import time
@ -47,13 +47,12 @@ class TestMulOpError(unittest.TestCase):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPUMulOp1(OpTest):
class TestXPUMulOp1(XPUOpTest):
def setUp(self):
self.op_type = "mul"
self.dtype = np.float32
self.use_xpu = True
self.init_dtype_type()
np.random.seed((int)(time.time()))
self.inputs = {
'X': np.random.random((3, 4, 2, 9)).astype(self.dtype),
'Y': np.random.random((3, 6, 1, 2, 3)).astype(self.dtype)
@ -92,13 +91,12 @@ class TestXPUMulOp1(OpTest):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPUMulOp2(OpTest):
class TestXPUMulOp2(XPUOpTest):
def setUp(self):
self.op_type = "mul"
self.use_xpu = True
self.dtype = np.float32
self.init_dtype_type()
np.random.seed((int)(time.time()))
self.inputs = {
'X': np.random.random((20, 5)).astype(self.dtype),
'Y': np.random.random((5, 21)).astype(self.dtype)

Loading…
Cancel
Save