!2810 Add operator adapting in ME for Softsign

Merge pull request !2810 from zhangzheng/softsign
pull/2810/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit 2cd9649b9e

@ -336,6 +336,21 @@ def get_bprop_softplus(self):
return bprop return bprop
@bprop_getters.register(P.Softsign)
def get_bprop_softsign(self):
"""Grad definition for `Softsign` operation."""
mul = P.Mul()
absolute = P.Abs()
div = P.Div()
square = P.Square()
def bprop(x, out, dout):
dx = mul(dout, div(1, square(1 + absolute(x))))
return (dx,)
return bprop
@bprop_getters.register(P.Tanh) @bprop_getters.register(P.Tanh)
def get_bprop_tanh(self): def get_bprop_tanh(self):
"""Grad definition for `Tanh` operation.""" """Grad definition for `Tanh` operation."""

@ -122,6 +122,7 @@ from .round import _round_tbe
from .tanh import _tanh_tbe from .tanh import _tanh_tbe
from .tanh_grad import _tanh_grad_tbe from .tanh_grad import _tanh_grad_tbe
from .softmax import _softmax_tbe from .softmax import _softmax_tbe
from .softsign import _softsign_tbe
from .softplus import _softplus_tbe from .softplus import _softplus_tbe
from .softplus_grad import _softplus_grad_tbe from .softplus_grad import _softplus_grad_tbe
from .softmax_grad_ext import _softmax_grad_ext_tbe from .softmax_grad_ext import _softmax_grad_ext_tbe

@ -0,0 +1,37 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Softsign op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
softsign_op_info = TBERegOp("Softsign") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("softsign.so") \
.compute_cost(10) \
.kernel_name("softsign") \
.partial_flag(True) \
.op_pattern("formatAgnostic") \
.input(0, "x", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F16_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
.get_op_info()
@op_info_register(softsign_op_info)
def _softsign_tbe():
"""Softsign TBE register"""
return

@ -68,7 +68,7 @@ from .nn_ops import (LSTM, SGD, Adam, SparseApplyAdam, SparseApplyLazyAdam, Appl
MaxPoolWithArgmax, OneHot, Pad, MirrorPad, PReLU, ReLU, ReLU6, ReLUV2, HSwish, HSigmoid, MaxPoolWithArgmax, OneHot, Pad, MirrorPad, PReLU, ReLU, ReLU6, ReLUV2, HSwish, HSigmoid,
ResizeBilinear, Sigmoid, ResizeBilinear, Sigmoid,
SigmoidCrossEntropyWithLogits, SigmoidCrossEntropyWithLogits,
SmoothL1Loss, Softmax, Softplus, LRN, SmoothL1Loss, Softmax, Softsign, Softplus, LRN,
SoftmaxCrossEntropyWithLogits, ROIAlign, SoftmaxCrossEntropyWithLogits, ROIAlign,
SparseSoftmaxCrossEntropyWithLogits, Tanh, SparseSoftmaxCrossEntropyWithLogits, Tanh,
TopK, BinaryCrossEntropy, SparseApplyAdagrad, LARSUpdate, ApplyFtrl, SparseApplyFtrl, TopK, BinaryCrossEntropy, SparseApplyAdagrad, LARSUpdate, ApplyFtrl, SparseApplyFtrl,
@ -115,6 +115,7 @@ __all__ = [
'SparseApplyLazyAdam', 'SparseApplyLazyAdam',
'Softplus', 'Softplus',
'Softmax', 'Softmax',
'Softsign',
'LogSoftmax', 'LogSoftmax',
'SoftmaxCrossEntropyWithLogits', 'SoftmaxCrossEntropyWithLogits',
'ROIAlign', 'ROIAlign',

@ -224,6 +224,41 @@ class Softplus(PrimitiveWithInfer):
return input_x return input_x
class Softsign(PrimitiveWithInfer):
r"""
Softsign activation function.
The function is shown as follows:
.. math::
\text{output} = \frac{\text{input_x}}{1 + \abs{\text{input_x}}},
Inputs:
- **input_x** (Tensor) - The input tensor whose data type should be float.
Outputs:
Tensor, with the same type and shape as the `input_x`.
Examples:
>>> input_x = Tensor(np.array([0, -1, 2, 30, -30]), mindspore.float32)
>>> softsign = P.Softsign()
>>> softsign(input_x)
[0. -0.5 0.6666667 0.9677419 -0.9677419]
"""
@prim_attr_register
def __init__(self):
"""init Softsign"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, mstype.float_type, self.name)
return input_x
class ReLU(PrimitiveWithInfer): class ReLU(PrimitiveWithInfer):
r""" r"""
Computes ReLU(Rectified Linear Unit) of input tensor element-wise. Computes ReLU(Rectified Linear Unit) of input tensor element-wise.

@ -1376,6 +1376,10 @@ test_case_nn_ops = [
'block': P.Softmax(), 'block': P.Softmax(),
'desc_inputs': [[5, 5]], 'desc_inputs': [[5, 5]],
'desc_bprop': [[5, 5]]}), 'desc_bprop': [[5, 5]]}),
('Softsign', {
'block': P.Softsign(),
'desc_inputs': [[5, 5]],
'desc_bprop': [[5, 5]]}),
('DepthwiseConv2dNative_1', { ('DepthwiseConv2dNative_1', {
'block': P.DepthwiseConv2dNative(3, (3, 3), pad_mode="pad", pad=1, stride=2), 'block': P.DepthwiseConv2dNative(3, (3, 3), pad_mode="pad", pad=1, stride=2),
'desc_inputs': [[10, 32, 32, 32], [1, 32, 3, 3]], 'desc_inputs': [[10, 32, 32, 32], [1, 32, 3, 3]],

Loading…
Cancel
Save