!1865 add inv,invgrad&invert for vm

Merge pull request !1865 from JichenZhao/bnops_for_vm
pull/1865/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit 0a897b0ce7

@ -98,6 +98,7 @@ static std::map<string, string> tbe_func_adapter_map = {
{"n_ms_with_mask", "nms_with_mask"},
{"square_sum_all", "square_sum_all"},
{"cum_sum", "cumsum_d"},
{"inv_grad", "inv_grad"},
{"apply_rms_prop", "apply_rms_prop_d"},
{"cum_prod", "cumprod_d"},
{"reduce_all", "reduce_all_d"},

@ -1025,3 +1025,14 @@ def get_bprop_atanh(self):
dx = div(1, tmp) * dout
return (dx,)
return bprop
@bprop_getters.register(P.Inv)
def get_bprop_inv(self):
"""Grad definition for 'Inv' operation"""
inv_grad = G.InvGrad()
def bprop(x, out, dout):
dx = inv_grad(x, dout)
return (dx,)
return bprop

@ -233,6 +233,9 @@ from .atan_grad import _atan_grad_tbe
from .atanh import _atanh_tbe
from .cosh import _cosh_tbe
from .sinh import _sinh_tbe
from .inv import _inv_tbe
from .inv_grad import _inv_grad_tbe
from .invert import _invert_tbe
from .basic_lstm_cell import _basic_lstm_cell_tbe
from .basic_lstm_cell_c_state_grad import _basic_lstm_cell_c_state_grad_tbe
from .basic_lstm_cell_weight_grad import _basic_lstm_cell_weight_grad_tbe

@ -0,0 +1,39 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Inv op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
inv_op_info = TBERegOp("Inv") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("inv.so") \
.compute_cost(10) \
.kernel_name("inv") \
.partial_flag(True) \
.input(0, "x", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.I32_Default, DataType.I32_Default) \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.F16_Default, DataType.F16_Default) \
.dtype_format(DataType.I8_Default, DataType.I8_Default) \
.dtype_format(DataType.U8_Default, DataType.U8_Default) \
.get_op_info()
@op_info_register(inv_op_info)
def _inv_tbe():
"""Inv TBE register"""
return

@ -0,0 +1,39 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""InvGrad op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
inv_grad_op_info = TBERegOp("InvGrad") \
.fusion_type("ELEMWISE") \
.async_flag(False) \
.binfile_name("inv_grad.so") \
.compute_cost(10) \
.kernel_name("inv_grad") \
.partial_flag(True) \
.input(0, "x", False, "required", "all") \
.input(1, "grad", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \
.dtype_format(DataType.I8_Default, DataType.I8_Default, DataType.I8_Default) \
.get_op_info()
@op_info_register(inv_grad_op_info)
def _inv_grad_tbe():
"""InvGrad TBE register"""
return

@ -0,0 +1,36 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Invert op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
invert_op_info = TBERegOp("Invert") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("invert.so") \
.compute_cost(10) \
.kernel_name("invert") \
.partial_flag(True) \
.input(0, "x", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.I16_Default, DataType.I16_Default) \
.dtype_format(DataType.U16_Default, DataType.U16_Default) \
.get_op_info()
@op_info_register(invert_op_info)
def _invert_tbe():
"""Invert TBE register"""
return

@ -41,7 +41,7 @@ from .control_ops import ControlDepend, GeSwitch, Merge
from .inner_ops import ScalarCast
from .math_ops import (Abs, ACos, Asin, Asinh, AddN, AssignAdd, AssignSub, Atan2, BatchMatMul, BitwiseAnd, BitwiseOr,
BitwiseXor,
BitwiseXor, Inv, Invert,
ReduceMax, ReduceMin, ReduceMean, ReduceSum, ReduceAll, ReduceProd, CumProd,
Cos, Div, DivNoNan, Equal, EqualCount, Exp, Expm1, Erf, Erfc, Floor, FloorDiv, FloorMod, Ceil,
Acosh, Greater, GreaterEqual, Less, LessEqual, Log, Log1p, LogicalAnd,
@ -141,6 +141,8 @@ __all__ = [
'RealDiv',
'Div',
'DivNoNan',
'Inv',
'Invert',
'TruncatedNormal',
'Fill',
'OnesLike',

@ -1308,3 +1308,20 @@ class BasicLSTMCellInputGrad(PrimitiveWithInfer):
validator.check_type_name("dgate", dgate_dtype, [mstype.float16, mstype.float32], self.name)
validator.check_type_name("w", w_dtype, [mstype.float16, mstype.float32], self.name)
return (dgate_dtype, dgate_dtype)
class InvGrad(PrimitiveWithInfer):
"""Computes gradients for inv operation."""
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, x, grad):
validator.check("x_shape", x, "grad_shape", grad, Rel.EQ, self.name)
return x
def infer_dtype(self, x, grad):
validator.check_type_name("dgate", x, [mstype.float16, mstype.float32, mstype.int32, mstype.int8], self.name)
validator.check_type_name("grad", grad, [mstype.float16, mstype.float32, mstype.int32, mstype.int8], self.name)
return x

@ -2597,3 +2597,63 @@ class BesselI1e(PrimitiveWithInfer):
def infer_dtype(self, x):
validator.check_tensor_type_same({'x': x}, mstype.number_type, self.name)
return x
class Inv(PrimitiveWithInfer):
"""
Computes Inv(Reciprocal) of input tensor element-wise.
Inputs:
- **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
Outputs:
Tensor, has the same shape as `input_x`.
Examples:
>>> inv = P.Inv()
>>> input_x = Tensor(np.array([0.25, 0.4, 0.31, 0.52]), mindspore.float32)
>>> output = inv(input_x)
[4., 2.5, 3.2258065, 1.923077]
"""
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, x_shape):
return x_shape
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({'x_dtype': x_dtype}, [mstype.float16, mstype.float32,
mstype.int32, mstype.int8,
mstype.uint8], self.name)
return x_dtype
class Invert(PrimitiveWithInfer):
"""
Flips all bits of input tensor element-wise.
Inputs:
- **input_x** (Tensor[int16], Tensor[uint16]) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
Outputs:
Tensor, has the same shape as `input_x`.
Examples:
>>> invert = P.Invert()
>>> input_x = Tensor(np.array([25, 4, 13, 9]), mindspore.int16)
>>> output = invert(input_x)
[-26, -5, -14, -10]
"""
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, x_shape):
return x_shape
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({'x_dtype': x_dtype}, [mstype.int16, mstype.uint16], self.name)
return x_dtype

@ -750,6 +750,15 @@ test_case_math_ops = [
'block': P.Sinh(),
'desc_inputs': [[3, 4, 5]],
'desc_bprop': [[3, 4, 5]]}),
('Inv', {
'block': P.Inv(),
'desc_inputs': [[21, 9, 12, 5]],
'desc_bprop': [[21, 9, 12, 5]]}),
('Invert', {
'block': P.Invert(),
'desc_inputs': [Tensor(np.array([[24, 4, 13, 9], [1, 5, 10, 8]]).astype(np.int16))],
'desc_bprop': [],
'skip': ['backward']}),
]
test_case_nn_ops = [

Loading…
Cancel
Save