From 5897793b01697bf1ca23fae0b760efcdd4e60301 Mon Sep 17 00:00:00 2001 From: liuwenhao4 Date: Fri, 3 Jul 2020 21:37:04 +0800 Subject: [PATCH] Fixing some tiny mistake of InplaceAdd, InplaceSub and InplaceUpdate vm ops and apply new character of dynamic format --- mindspore/ops/_op_impl/tbe/accumulate_n_v2.py | 10 ++++---- .../ops/_op_impl/tbe/approximate_equal.py | 6 ++--- .../ops/_op_impl/tbe/binary_cross_entropy.py | 6 ++--- mindspore/ops/_op_impl/tbe/lin_space.py | 4 ++-- mindspore/ops/_op_impl/tbe/mod.py | 16 +++++-------- mindspore/ops/_op_impl/tbe/reduce_mean_d.py | 9 ++++---- mindspore/ops/_op_impl/tbe/softsign.py | 4 ++-- mindspore/ops/_op_impl/tbe/splitv.py | 23 +------------------ mindspore/ops/operations/array_ops.py | 2 +- mindspore/ops/operations/math_ops.py | 4 ++-- 10 files changed, 28 insertions(+), 56 deletions(-) diff --git a/mindspore/ops/_op_impl/tbe/accumulate_n_v2.py b/mindspore/ops/_op_impl/tbe/accumulate_n_v2.py index fdd72a9494..b16233c37e 100644 --- a/mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +++ b/mindspore/ops/_op_impl/tbe/accumulate_n_v2.py @@ -27,11 +27,11 @@ accumulate_n_v2_op_info = TBERegOp("AccumulateNV2") \ .input(0, "x", False, "dynamic", "all") \ .output(0, "y", False, "required", "all") \ .op_pattern("broadcast") \ - .dtype_format(DataType.F16_Default, DataType.F16_Default) \ - .dtype_format(DataType.F32_Default, DataType.F32_Default) \ - .dtype_format(DataType.I32_Default, DataType.I32_Default) \ - .dtype_format(DataType.I8_Default, DataType.I8_Default) \ - .dtype_format(DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.F16_None, DataType.F16_None) \ + .dtype_format(DataType.F32_None, DataType.F32_None) \ + .dtype_format(DataType.I32_None, DataType.I32_None) \ + .dtype_format(DataType.I8_None, DataType.I8_None) \ + .dtype_format(DataType.U8_None, DataType.U8_None) \ .get_op_info() diff --git a/mindspore/ops/_op_impl/tbe/approximate_equal.py b/mindspore/ops/_op_impl/tbe/approximate_equal.py index 62b8a0c16d..195918a19d 100644 --- a/mindspore/ops/_op_impl/tbe/approximate_equal.py +++ b/mindspore/ops/_op_impl/tbe/approximate_equal.py @@ -28,10 +28,8 @@ approximate_equal_op_info = TBERegOp("ApproximateEqual") \ .input(0, "x1", False, "required", "all") \ .input(1, "x2", False, "required", "all") \ .output(0, "y", False, "required", "all") \ - .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.BOOL_Default) \ - .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.BOOL_5HD) \ - .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.BOOL_Default) \ - .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.BOOL_5HD) \ + .dtype_format(DataType.F16_None, DataType.F16_None, DataType.BOOL_None) \ + .dtype_format(DataType.F32_None, DataType.F32_None, DataType.BOOL_None) \ .get_op_info() diff --git a/mindspore/ops/_op_impl/tbe/binary_cross_entropy.py b/mindspore/ops/_op_impl/tbe/binary_cross_entropy.py index bbb4dcab0b..1f73d3bfe7 100644 --- a/mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +++ b/mindspore/ops/_op_impl/tbe/binary_cross_entropy.py @@ -28,10 +28,8 @@ binary_cross_entropy_op_info = TBERegOp("BinaryCrossEntropy") \ .input(1, "y", False, "required", "all") \ .input(2, "weight", False, "optional", "all") \ .output(0, "output", False, "required", "all") \ - .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ - .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ - .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ - .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .op_pattern("dynamicFormat") \ + .dtype_format(DataType.None_None, DataType.None_None, DataType.None_None, DataType.None_None) \ .get_op_info() diff --git a/mindspore/ops/_op_impl/tbe/lin_space.py b/mindspore/ops/_op_impl/tbe/lin_space.py index aed41e80d4..6e474c50ea 100644 --- a/mindspore/ops/_op_impl/tbe/lin_space.py +++ b/mindspore/ops/_op_impl/tbe/lin_space.py @@ -29,8 +29,8 @@ lin_space_op_info = TBERegOp("LinSpace") \ .input(2, "stop", False, "required", "all") \ .input(3, "num", False, "required", "all") \ .output(0, "output", False, "required", "all") \ - .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.I32_Default, - DataType.F32_Default,) \ + .dtype_format(DataType.F32_None, DataType.F32_None, DataType.F32_None, DataType.I32_None, + DataType.F32_None,) \ .get_op_info() diff --git a/mindspore/ops/_op_impl/tbe/mod.py b/mindspore/ops/_op_impl/tbe/mod.py index c8fecd697a..334a3e3820 100644 --- a/mindspore/ops/_op_impl/tbe/mod.py +++ b/mindspore/ops/_op_impl/tbe/mod.py @@ -26,16 +26,12 @@ mod_op_info = TBERegOp("Mod") \ .input(0, "x1", False, "required", "all") \ .input(1, "x2", False, "required", "all") \ .output(0, "y", False, "required", "all") \ - .dtype_format(DataType.I8_Default, DataType.I8_Default, DataType.I8_Default) \ - .dtype_format(DataType.I8_5HD, DataType.I8_5HD, DataType.I8_5HD) \ - .dtype_format(DataType.U8_Default, DataType.U8_Default, DataType.U8_Default) \ - .dtype_format(DataType.U8_5HD, DataType.U8_5HD, DataType.U8_5HD) \ - .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ - .dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD) \ - .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ - .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ - .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ - .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .op_pattern("broadcast") \ + .dtype_format(DataType.I8_None, DataType.I8_None, DataType.I8_None) \ + .dtype_format(DataType.U8_None, DataType.U8_None, DataType.U8_None) \ + .dtype_format(DataType.I32_None, DataType.I32_None, DataType.I32_None) \ + .dtype_format(DataType.F16_None, DataType.F16_None, DataType.F16_None) \ + .dtype_format(DataType.F32_None, DataType.F32_None, DataType.F32_None) \ .get_op_info() diff --git a/mindspore/ops/_op_impl/tbe/reduce_mean_d.py b/mindspore/ops/_op_impl/tbe/reduce_mean_d.py index e427b34869..a0890816d2 100644 --- a/mindspore/ops/_op_impl/tbe/reduce_mean_d.py +++ b/mindspore/ops/_op_impl/tbe/reduce_mean_d.py @@ -27,10 +27,11 @@ reduce_mean_d_op_info = TBERegOp("ReduceMeanD") \ .attr("keep_dims", "optional", "bool", "all") \ .input(0, "x", False, "required", "all") \ .output(0, "y", False, "required", "all") \ - .dtype_format(DataType.I8_Default, DataType.I8_Default) \ - .dtype_format(DataType.U8_Default, DataType.U8_Default) \ - .dtype_format(DataType.F16_Default, DataType.F16_Default) \ - .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .op_pattern("reduce") \ + .dtype_format(DataType.I8_None, DataType.I8_None) \ + .dtype_format(DataType.U8_None, DataType.U8_None) \ + .dtype_format(DataType.F16_None, DataType.F16_None) \ + .dtype_format(DataType.F32_None, DataType.F32_None) \ .get_op_info() diff --git a/mindspore/ops/_op_impl/tbe/softsign.py b/mindspore/ops/_op_impl/tbe/softsign.py index 9f1609bf0a..97f560c939 100644 --- a/mindspore/ops/_op_impl/tbe/softsign.py +++ b/mindspore/ops/_op_impl/tbe/softsign.py @@ -26,8 +26,8 @@ softsign_op_info = TBERegOp("Softsign") \ .op_pattern("formatAgnostic") \ .input(0, "x", False, "required", "all") \ .output(0, "y", False, "required", "all") \ - .dtype_format(DataType.F16_Default, DataType.F16_Default) \ - .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F16_None, DataType.F16_None) \ + .dtype_format(DataType.F32_None, DataType.F32_None) \ .get_op_info() diff --git a/mindspore/ops/_op_impl/tbe/splitv.py b/mindspore/ops/_op_impl/tbe/splitv.py index 29f65c7e87..6948524bd7 100644 --- a/mindspore/ops/_op_impl/tbe/splitv.py +++ b/mindspore/ops/_op_impl/tbe/splitv.py @@ -29,28 +29,7 @@ split_v_op_info = TBERegOp("SplitV") \ .input(0, "x", False, "required", "all") \ .output(0, "y", False, "dynamic", "all") \ .op_pattern("dynamicFormat") \ - .dtype_format(DataType.BOOL_Default, DataType.BOOL_Default) \ - .dtype_format(DataType.BOOL_NHWC, DataType.BOOL_NHWC) \ - .dtype_format(DataType.I8_Default, DataType.I8_Default) \ - .dtype_format(DataType.I8_NHWC, DataType.I8_NHWC) \ - .dtype_format(DataType.U8_Default, DataType.U8_Default) \ - .dtype_format(DataType.U8_NHWC, DataType.U8_NHWC) \ - .dtype_format(DataType.I16_Default, DataType.I16_Default) \ - .dtype_format(DataType.I16_NHWC, DataType.I16_NHWC) \ - .dtype_format(DataType.U16_Default, DataType.U16_Default) \ - .dtype_format(DataType.U16_NHWC, DataType.U16_NHWC) \ - .dtype_format(DataType.I32_Default, DataType.I32_Default) \ - .dtype_format(DataType.I32_NHWC, DataType.I32_NHWC) \ - .dtype_format(DataType.U32_Default, DataType.U32_Default) \ - .dtype_format(DataType.U32_NHWC, DataType.U32_NHWC) \ - .dtype_format(DataType.I64_Default, DataType.I64_Default) \ - .dtype_format(DataType.I64_NHWC, DataType.I64_NHWC) \ - .dtype_format(DataType.U64_Default, DataType.U64_Default) \ - .dtype_format(DataType.U64_NHWC, DataType.U64_NHWC) \ - .dtype_format(DataType.F16_Default, DataType.F16_Default) \ - .dtype_format(DataType.F16_NHWC, DataType.F16_NHWC) \ - .dtype_format(DataType.F32_Default, DataType.F32_Default) \ - .dtype_format(DataType.F32_NHWC, DataType.F32_NHWC) \ + .dtype_format(DataType.None_None, DataType.None_None) \ .get_op_info() diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index 8e9ecfea95..125a2a0fe0 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -3055,7 +3055,7 @@ class InplaceUpdate(PrimitiveWithInfer): raise ValueError(f'The value of indices must be in [0, {x_shape[0]}), but got {i}.') x_rank = len(x_shape) for idx in range(x_rank)[1:]: - validator.check("x dim %d" % idx, x_shape[idx], 'v dim %d' % idx, v_shape[idx], Rel.EQ, self.name) + validator.check('v dim %d' % idx, v_shape[idx], "x dim %d" % idx, x_shape[idx], Rel.EQ, self.name) return x_shape diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index d7fa77787e..b4a684d2f7 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -947,7 +947,7 @@ class InplaceAdd(PrimitiveWithInfer): raise ValueError(f'The value of indices must be in [0, {x_shape[0]}), but got {i}.') x_rank = len(x_shape) for idx in range(x_rank)[1:]: - validator.check("x dim %d" % idx, x_shape[idx], 'v dim %d' % idx, v_shape[idx], Rel.EQ, self.name) + validator.check('v dim %d' % idx, v_shape[idx], "x dim %d" % idx, x_shape[idx], Rel.EQ, self.name) return x_shape @@ -1005,7 +1005,7 @@ class InplaceSub(PrimitiveWithInfer): raise ValueError(f'The value of indices must be in [0, {x_shape[0]}), but got {i}.') x_rank = len(x_shape) for idx in range(x_rank)[1:]: - validator.check("x dim %d" % idx, x_shape[idx], 'v dim %d' % idx, v_shape[idx], Rel.EQ, self.name) + validator.check('v dim %d' % idx, v_shape[idx], "x dim %d" % idx, x_shape[idx], Rel.EQ, self.name) return x_shape