From 5f43f08a5d12103122f0a8d419575fee6970dec1 Mon Sep 17 00:00:00 2001 From: x00540480 Date: Tue, 19 Jan 2021 10:41:36 +0800 Subject: [PATCH] add elu for cpu --- .../cpu/mkldnn/eltwise_cpu_kernel.cc | 2 + .../cpu/mkldnn/eltwise_cpu_kernel.h | 5 ++ tests/st/ops/cpu/test_elu_op.py | 59 +++++++++++++++++++ 3 files changed, 66 insertions(+) create mode 100644 tests/st/ops/cpu/test_elu_op.py diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/eltwise_cpu_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/eltwise_cpu_kernel.cc index 22663612c0..6d94c2ed64 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/eltwise_cpu_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/eltwise_cpu_kernel.cc @@ -43,6 +43,8 @@ dnnl::eltwise_forward::desc EltWiseCPUKernel::GetForwardEltwiseDesc(const CNodeP return dnnl::eltwise_forward::desc(DnnlForward, dnnl::algorithm::eltwise_square, src_desc); } else if (kernel_name == "Tanh") { return dnnl::eltwise_forward::desc(DnnlForward, dnnl::algorithm::eltwise_tanh, src_desc); + } else if (kernel_name == "Elu") { + return dnnl::eltwise_forward::desc(DnnlForward, dnnl::algorithm::eltwise_elu, src_desc, 1.0); } else { MS_LOG(EXCEPTION) << "Eltwise operators don't support " << kernel_name; } diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/eltwise_cpu_kernel.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/eltwise_cpu_kernel.h index d9d0f49d31..b3009e7902 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/eltwise_cpu_kernel.h +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/eltwise_cpu_kernel.h @@ -36,6 +36,11 @@ class EltWiseCPUKernel : public MKLCPUKernel { dnnl::prop_kind DnnlForward = dnnl::prop_kind::forward_training; }; +MS_REG_CPU_KERNEL(Elu, KernelAttr().AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat16), + EltWiseCPUKernel); +MS_REG_CPU_KERNEL(Elu, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), + EltWiseCPUKernel); +MS_REG_CPU_KERNEL(Elu, KernelAttr().AddInputAttr(kNumberTypeFloat).AddOutputAttr(kNumberTypeFloat), EltWiseCPUKernel); MS_REG_CPU_KERNEL(ReLU, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), EltWiseCPUKernel); MS_REG_CPU_KERNEL(ReLU6, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), diff --git a/tests/st/ops/cpu/test_elu_op.py b/tests/st/ops/cpu/test_elu_op.py new file mode 100644 index 0000000000..82c8c49a1a --- /dev/null +++ b/tests/st/ops/cpu/test_elu_op.py @@ -0,0 +1,59 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import numpy as np +import pytest + +import mindspore.context as context +import mindspore.nn as nn +from mindspore import Tensor +from mindspore.ops import operations as P + +class NetElu(nn.Cell): + def __init__(self): + super(NetElu, self).__init__() + self.elu = P.Elu() + + def construct(self, x): + return self.elu(x) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_elu_fp16(): + x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]).astype(np.float16)) + expect = np.array([[-0.632, 4.0, -0.999], [2.0, -0.993, 9.0]]).astype(np.float16) + error = np.ones(shape=[2, 3]) * 1.0e-6 + + context.set_context(mode=context.GRAPH_MODE, device_target="CPU") + elu = NetElu() + output = elu(x) + diff = output.asnumpy() - expect + assert np.all(diff < error) + +@pytest.mark.level0 +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_elu_fp32(): + x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]).astype(np.float32)) + expect = np.array([[-0.632, 4.0, -0.999], [2.0, -0.993, 9.0]]).astype(np.float32) + error = np.ones(shape=[2, 3]) * 1.0e-6 + + context.set_context(mode=context.GRAPH_MODE, device_target="CPU") + elu = NetElu() + output = elu(x) + diff = output.asnumpy() - expect + assert np.all(diff < error)