update activation op on kunlun (#29577)

* fix expand && concat/transpose to new api

* update xpu_header

* update activation op on kunlun

* update activation op on kunlun

* update activation op on kunlun

* update activation op on kunlun

* update activation op on kunlun

* add nearest_interp on kunlun

* update error message
revert-31562-mean
TTerror 4 years ago committed by GitHub
parent cc387159f3
commit af8ded773a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -4,7 +4,7 @@ endif()
INCLUDE(ExternalProject)
SET(XPU_PROJECT "extern_xpu")
SET(XPU_URL "https://baidu-kunlun-public.su.bcebos.com/paddle_depence/xpu_2020_12_11.tar.gz" CACHE STRING "" FORCE)
SET(XPU_URL "https://baidu-kunlun-public.su.bcebos.com/paddle_depence/xpu_2020_12_15.tar.gz" CACHE STRING "" FORCE)
SET(XPU_SOURCE_DIR "${THIRD_PARTY_PATH}/xpu")
SET(XPU_DOWNLOAD_DIR "${XPU_SOURCE_DIR}/src/${XPU_PROJECT}")
SET(XPU_INSTALL_DIR "${THIRD_PARTY_PATH}/install/xpu")

File diff suppressed because it is too large Load Diff

@ -229,9 +229,7 @@ class InterpolateGradXPUKernel : public framework::OpKernel<T> {
int trans_mode = (align_corners) ? (0) : ((align_mode == 0) ? (1) : (2));
if (nearest) {
PADDLE_ENFORCE_EQ((data_layout == DataLayout::kNCHW), true,
platform::errors::InvalidArgument(
"XPU nearest is only support NCHW"));
trans_mode = (align_corners) ? (0) : (2);
}
r = xpu::interpolate2d_grad<T>(dev_ctx.x_context(), output_grad->data<T>(),
@ -252,7 +250,10 @@ class InterpolateGradXPUKernel : public framework::OpKernel<T> {
namespace ops = paddle::operators;
REGISTER_OP_XPU_KERNEL(bilinear_interp, ops::InterpolateXPUKernel<float>);
REGISTER_OP_XPU_KERNEL(nearest_interp, ops::InterpolateXPUKernel<float>);
REGISTER_OP_XPU_KERNEL(bilinear_interp_grad,
ops::InterpolateGradXPUKernel<float>);
REGISTER_OP_XPU_KERNEL(nearest_interp_grad,
ops::InterpolateGradXPUKernel<float>);
#endif

@ -73,8 +73,7 @@ class TestXPUSigmoid(TestXPUActivation):
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X'], 'Out', max_relative_error=0.01)
self.check_grad_with_place(place, ['X'], 'Out')
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
@ -90,6 +89,11 @@ class TestXPUTanh(TestXPUActivation):
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
@ -105,6 +109,11 @@ class TestXPUSqrt(TestXPUActivation):
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
@ -142,6 +151,11 @@ class TestXPURelu(TestXPUActivation):
self.inputs = {'X': x}
self.outputs = {'Out': out}
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
@ -157,6 +171,11 @@ class TestXPUGelu(TestXPUActivation):
self.outputs = {'Out': out}
self.attrs = {"approximate": approximate, 'use_xpu': True}
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
def gelu(x, approximate):
if approximate:
@ -223,6 +242,11 @@ class TestXPUSquare(TestXPUActivation):
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
@ -239,5 +263,36 @@ class TestXPUPow(TestXPUActivation):
self.outputs = {'Out': out}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPULeakyRelu(TestXPUActivation):
def setUp(self):
self.op_type = "leaky_relu"
self.init_dtype()
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
alpha = np.random.uniform(
0,
1, )
out = leaky_relu(x, alpha)
self.inputs = {'X': x}
self.outputs = {'Out': out}
self.attrs = {'use_xpu': True, 'alpha': alpha}
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
def leaky_relu(x, alpha):
if (alpha < 1):
y_ref = np.maximum(x, alpha * x)
else:
y_ref = np.minimum(x, alpha * x)
return y_ref.astype(x.dtype)
if __name__ == "__main__":
paddle.enable_static()
unittest.main()

Loading…
Cancel
Save