From 228a959cc7b2a170e464df4a3256b5529c99a213 Mon Sep 17 00:00:00 2001 From: fangzehua Date: Wed, 22 Jul 2020 15:33:12 +0800 Subject: [PATCH] add reduce any op for vm --- .../kernel_compiler/tbe/tbe_adapter.cc | 1 + .../pass/const_input_to_attr_registry.cc | 1 + mindspore/core/base/core_ops.h | 1 + mindspore/ops/_grad/grad_math_ops.py | 10 +++++ mindspore/ops/_op_impl/tbe/__init__.py | 1 + mindspore/ops/_op_impl/tbe/reduce_any.py | 38 +++++++++++++++++++ mindspore/ops/operations/__init__.py | 3 +- mindspore/ops/operations/math_ops.py | 36 ++++++++++++++++++ tests/ut/python/ops/test_ops.py | 5 +++ 9 files changed, 95 insertions(+), 1 deletion(-) create mode 100644 mindspore/ops/_op_impl/tbe/reduce_any.py diff --git a/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_adapter.cc b/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_adapter.cc index 7b24bb17a2..d9f9bf86a7 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_adapter.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_adapter.cc @@ -126,6 +126,7 @@ static std::map tbe_func_adapter_map = { {"apply_rms_prop", "apply_rms_prop_d"}, {"cum_prod", "cumprod_d"}, {"reduce_all", "reduce_all_d"}, + {"reduce_any", "reduce_any_d"}, {"sparse_apply_adagrad", "sparse_apply_adagrad_d"}, {"unsorted_segment_min", "unsorted_segment_min_d"}, {"reduce_prod", "reduce_prod_d"}, diff --git a/mindspore/ccsrc/backend/optimizer/pass/const_input_to_attr_registry.cc b/mindspore/ccsrc/backend/optimizer/pass/const_input_to_attr_registry.cc index 814ad9567c..d7590775d6 100644 --- a/mindspore/ccsrc/backend/optimizer/pass/const_input_to_attr_registry.cc +++ b/mindspore/ccsrc/backend/optimizer/pass/const_input_to_attr_registry.cc @@ -46,6 +46,7 @@ ConstInputToAttrInfoRegistry::ConstInputToAttrInfoRegistry() { Register(prim::kPrimCumSum->name(), {1}); Register(prim::kPrimCumProd->name(), {1}); Register(prim::kPrimReduceAll->name(), {1}); + Register(prim::kPrimReduceAny->name(), {1}); Register(prim::kPrimUnsortedSegmentMin->name(), {2}); Register(kSparseGatherV2, {2}); Register(kUnsortedSegmentProdOpName, {2}); diff --git a/mindspore/core/base/core_ops.h b/mindspore/core/base/core_ops.h index e2ef23ca68..a04b983a2d 100755 --- a/mindspore/core/base/core_ops.h +++ b/mindspore/core/base/core_ops.h @@ -34,6 +34,7 @@ inline const PrimitivePtr kPrimMinimumGrad = std::make_shared("Minimu inline const PrimitivePtr kPrimReduceMean = std::make_shared("ReduceMean"); inline const PrimitivePtr kPrimReduceSum = std::make_shared("ReduceSum"); inline const PrimitivePtr kPrimReduceAll = std::make_shared("ReduceAll"); +inline const PrimitivePtr kPrimReduceAny = std::make_shared("ReduceAny"); inline const PrimitivePtr kPrimReduceMax = std::make_shared("ReduceMax"); inline const PrimitivePtr kPrimReduceMin = std::make_shared("ReduceMin"); inline const PrimitivePtr kPrimNeg = std::make_shared("Neg"); diff --git a/mindspore/ops/_grad/grad_math_ops.py b/mindspore/ops/_grad/grad_math_ops.py index 7dd09685cc..3e5949df05 100755 --- a/mindspore/ops/_grad/grad_math_ops.py +++ b/mindspore/ops/_grad/grad_math_ops.py @@ -641,6 +641,16 @@ def get_bprop_reduceall(self): return bprop +@bprop_getters.register(P.ReduceAny) +def get_bprop_reduceany(self): + """Grad definition for `ReduceAny` operation.""" + + def bprop(x, axis, out, dout): + return zeros_like(x), zeros_like(axis) + + return bprop + + @bprop_getters.register(P.ReduceMax) def get_bprop_reducemax(self): """Grad definition for `Max` operation.""" diff --git a/mindspore/ops/_op_impl/tbe/__init__.py b/mindspore/ops/_op_impl/tbe/__init__.py index ec3e9c708e..2fa2697b14 100644 --- a/mindspore/ops/_op_impl/tbe/__init__.py +++ b/mindspore/ops/_op_impl/tbe/__init__.py @@ -245,6 +245,7 @@ from .bitwise_and import _bitwise_and_tbe from .bitwise_or import _bitwise_or_tbe from .bitwise_xor import _bitwise_xor_tbe from .reduce_all import _reduce_all_tbe +from .reduce_any import _reduce_any_tbe from .sparse_apply_adagrad import _sparse_apply_adagrad_tbe from .unsorted_segment_min import _unsorted_segment_min_tbe from .asin import _asin_tbe diff --git a/mindspore/ops/_op_impl/tbe/reduce_any.py b/mindspore/ops/_op_impl/tbe/reduce_any.py new file mode 100644 index 0000000000..101a5e0506 --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/reduce_any.py @@ -0,0 +1,38 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""ReduceAny op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +reduce_any_op_info = TBERegOp("ReduceAny") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("reduce_any_d.so") \ + .compute_cost(10) \ + .kernel_name("reduce_any_d") \ + .partial_flag(True) \ + .attr("axis", "required", "listInt", "all") \ + .attr("keep_dims", "optional", "bool", "all") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .op_pattern("reduce") \ + .dtype_format(DataType.BOOL_None, DataType.BOOL_None) \ + .get_op_info() + + +@op_info_register(reduce_any_op_info) +def _reduce_any_tbe(): + """ReduceAny TBE register""" + return diff --git a/mindspore/ops/operations/__init__.py b/mindspore/ops/operations/__init__.py index 71f84cdeba..59290c3234 100644 --- a/mindspore/ops/operations/__init__.py +++ b/mindspore/ops/operations/__init__.py @@ -44,7 +44,7 @@ from .inner_ops import ScalarCast from .math_ops import (Abs, ACos, Asin, Asinh, AddN, AccumulateNV2, AssignAdd, AssignSub, Atan2, BatchMatMul, BitwiseAnd, BitwiseOr, BitwiseXor, Inv, Invert, ApproximateEqual, InplaceAdd, InplaceSub, - ReduceMax, ReduceMin, ReduceMean, ReduceSum, ReduceAll, ReduceProd, CumProd, + ReduceMax, ReduceMin, ReduceMean, ReduceSum, ReduceAll, ReduceProd, CumProd, ReduceAny, Cos, Div, DivNoNan, Equal, EqualCount, Exp, Expm1, Erf, Erfc, Floor, FloorDiv, FloorMod, Ceil, Acosh, Greater, GreaterEqual, Less, LessEqual, Log, Log1p, LogicalAnd, Mod, LogicalNot, LogicalOr, MatMul, Maximum, @@ -215,6 +215,7 @@ __all__ = [ 'CTCLoss', 'RNNTLoss', 'ReduceAll', + 'ReduceAny', 'ScalarToArray', 'ScalarToTensor', 'TupleToArray', diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index 940bf65576..9f02f0a8a6 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -405,6 +405,42 @@ class ReduceAll(_Reduce): return self.do_infer(input_x, axis, (mstype.bool_,)) +class ReduceAny(_Reduce): + """ + Reduce a dimension of a tensor by the "logical or" of all elements in the dimension. + + The dtype of the tensor to be reduced is bool. + + Args: + keep_dims (bool): If True, keep these reduced dimensions and the length is 1. + If False, don't keep these dimensions. + Default : False, don't keep these reduced dimensions. + + Inputs: + - **input_x** (Tensor[bool]) - The input tensor. + - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions. + Only constant value is allowed. + + Outputs: + Tensor, the dtype is bool. + + - If axis is (), and keep_dims is false, + the output is a 0-D tensor representing the "logical or" of of all elements in the input tensor. + - If axis is int, set as 2, and keep_dims is false, + and keep_dims is false, the shape of output is :math:`(x_1, x_3, ..., x_R)`. + - If axis is tuple(int), set as (2, 3), and keep_dims is false, + the shape of output is :math:`(x_1, x_4, ..., x_R)`. + + Examples: + >>> input_x = Tensor(np.array([[True, False], [True, True]])) + >>> op = P.ReduceAny(keep_dims=True) + >>> output = op(input_x, 1) + """ + + def __infer__(self, input_x, axis): + return self.do_infer(input_x, axis, (mstype.bool_,)) + + class ReduceMax(_Reduce): """ Reduce a dimension of a tensor by the maximum value in this dimension. diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index a45a4a858b..7ad9b0088e 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -1186,6 +1186,11 @@ test_case_math_ops = [ 'desc_const': [1], 'desc_inputs': [Tensor(np.array([[True, False], [True, True]]))], 'desc_bprop': []}), + ('ReduceAny', { + 'block': P.ReduceAny(), + 'desc_const': [1], + 'desc_inputs': [Tensor(np.array([[True, False], [True, True]]))], + 'desc_bprop': []}), ('BesselI0e', { 'block': P.BesselI0e(), 'desc_inputs': [[2, 3]],