!4330 Add ops ReverseSequence for GE and Adapter some op info for VM.

Merge pull request !4330 from liuxiao93/ReverseSequence
pull/4330/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit cc9c27ae85

@ -213,6 +213,7 @@ const char kNameRange[] = "Range";
const char kNameSquareSumAll[] = "SquareSumAll";
const char kNameAscendQuant[] = "Quant";
const char kNameAscendDequant[] = "Dequant";
const char kNameReverseSequence[] = "ReverseSequence";
const char kNameCase[] = "Case";
// -----------------OpAdapter initialization--------------
@ -429,6 +430,7 @@ std::unordered_map<std::string, OpAdapterDescPtr> &DfGraphConvertor::get_adpt_ma
{string(kNameSquareSumAll), ADPT_DESC(SquareSumAll)},
{string(kNameAscendQuant), ADPT_DESC(AscendQuant)},
{string(kNameAscendDequant), ADPT_DESC(AscendDequant)},
{string(kNameReverseSequence), ADPT_DESC(ReverseSequence)},
{string(kNameCase), ADPT_DESC(Case)}};
#ifdef ENABLE_GE
adpt_map[string(kNamePrint)] = ADPT_DESC(Print);

@ -1340,6 +1340,12 @@ ATTR_MAP(CTCLoss) = {
{"ignore_longer_outputs_than_inputs", ATTR_DESC(ignore_longer_outputs_than_inputs, AnyTraits<bool>())}};
OUTPUT_MAP(CTCLoss) = {{0, OUTPUT_DESC(loss)}, {1, OUTPUT_DESC(gradient)}};
// ReverseSequence
INPUT_MAP(ReverseSequence) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(seq_lengths)}};
ATTR_MAP(ReverseSequence) = {{"seq_dim", ATTR_DESC(seq_dim, AnyTraits<int>())},
{"batch_dim", ATTR_DESC(batch_dim, AnyTraits<int>())}};
OUTPUT_MAP(ReverseSequence) = {{0, OUTPUT_DESC(y)}};
// AscendQuant
INPUT_MAP(AscendQuant) = {{1, INPUT_DESC(x)}};
ATTR_MAP(AscendQuant) = {{"scale", ATTR_DESC(scale, AnyTraits<float>())},

@ -501,6 +501,8 @@ DECLARE_OP_ADAPTER(L2Loss)
DECLARE_OP_USE_OUTPUT(L2Loss)
DECLARE_OP_ADAPTER(CTCLoss)
DECLARE_OP_USE_OUTPUT(CTCLoss)
DECLARE_OP_ADAPTER(ReverseSequence)
DECLARE_OP_USE_OUTPUT(ReverseSequence)
DECLARE_OP_ADAPTER(AscendQuant)
DECLARE_OP_USE_OUTPUT(AscendQuant)
DECLARE_OP_ADAPTER(AscendDequant)

@ -31,14 +31,6 @@ apply_adagrad_v2_d_op_info = TBERegOp("ApplyAdagradV2") \
.input(3, "grad", False, "required", "all") \
.output(0, "var", False, "required", "all") \
.output(1, "accum", False, "required", "all") \
.dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_Default, DataType.F16_5HD,
DataType.F16_5HD, DataType.F16_5HD) \
.dtype_format(DataType.F16_FracZ, DataType.F16_FracZ, DataType.F16_Default, DataType.F16_FracZ,
DataType.F16_FracZ, DataType.F16_FracZ) \
.dtype_format(DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0, DataType.F16_Default, DataType.F16_C1HWNCoC0,
DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0) \
.dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default,
DataType.F16_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_Default, DataType.F32_5HD,
DataType.F32_5HD, DataType.F32_5HD) \
.dtype_format(DataType.F32_FracZ, DataType.F32_FracZ, DataType.F32_Default, DataType.F32_FracZ,

@ -28,9 +28,7 @@ bias_add_grad_op_info = TBERegOp("BiasAdd") \
.input(1, "bias", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.op_pattern("dynamicFormat") \
.dtype_format(DataType.I32_None, DataType.I32_None, DataType.I32_None) \
.dtype_format(DataType.F16_None, DataType.F16_None, DataType.F16_None) \
.dtype_format(DataType.F32_None, DataType.F32_None, DataType.F32_None) \
.dtype_format(DataType.None_None, DataType.None_None, DataType.None_None) \
.get_op_info()

@ -29,6 +29,7 @@ bitwise_and_op_info = TBERegOp("BitwiseAnd") \
.op_pattern("broadcast") \
.dtype_format(DataType.I16_None, DataType.I16_None, DataType.I16_None) \
.dtype_format(DataType.U16_None, DataType.U16_None, DataType.U16_None) \
.dtype_format(DataType.I32_None, DataType.I32_None, DataType.I32_None) \
.get_op_info()

@ -29,6 +29,7 @@ bitwise_or_op_info = TBERegOp("BitwiseOr") \
.op_pattern("broadcast") \
.dtype_format(DataType.I16_None, DataType.I16_None, DataType.I16_None) \
.dtype_format(DataType.U16_None, DataType.U16_None, DataType.U16_None) \
.dtype_format(DataType.I32_None, DataType.I32_None, DataType.I32_None) \
.get_op_info()

@ -29,6 +29,7 @@ bitwise_xor_op_info = TBERegOp("BitwiseXor") \
.op_pattern("broadcast") \
.dtype_format(DataType.I16_None, DataType.I16_None, DataType.I16_None) \
.dtype_format(DataType.U16_None, DataType.U16_None, DataType.U16_None) \
.dtype_format(DataType.I32_None, DataType.I32_None, DataType.I32_None) \
.get_op_info()

@ -26,17 +26,18 @@ space_to_depth_op_info = TBERegOp("SpaceToDepth") \
.attr("block_size", "required", "int", "all") \
.attr("data_format", "optional", "str", "all") \
.input(0, "x", False, "required", "all") \
.input(1, "filter", False, "optional", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F16_NHWC, DataType.F16_NHWC) \
.dtype_format(DataType.F32_NHWC, DataType.F32_NHWC) \
.dtype_format(DataType.I8_NHWC, DataType.I8_NHWC) \
.dtype_format(DataType.I16_NHWC, DataType.I16_NHWC) \
.dtype_format(DataType.I32_NHWC, DataType.I32_NHWC) \
.dtype_format(DataType.I64_NHWC, DataType.I64_NHWC) \
.dtype_format(DataType.U8_NHWC, DataType.U8_NHWC) \
.dtype_format(DataType.U16_NHWC, DataType.U16_NHWC) \
.dtype_format(DataType.U32_NHWC, DataType.U32_NHWC) \
.dtype_format(DataType.U64_NHWC, DataType.U64_NHWC) \
.dtype_format(DataType.F16_5HD, DataType.F16_FracZ, DataType.F16_5HD) \
.dtype_format(DataType.F32_NHWC, DataType.F16_FracZ, DataType.F32_NHWC) \
.dtype_format(DataType.I8_NHWC, DataType.F16_FracZ, DataType.I8_NHWC) \
.dtype_format(DataType.I16_NHWC, DataType.F16_FracZ, DataType.I16_NHWC) \
.dtype_format(DataType.I32_NHWC, DataType.F16_FracZ, DataType.I32_NHWC) \
.dtype_format(DataType.I64_NHWC, DataType.F16_FracZ, DataType.I64_NHWC) \
.dtype_format(DataType.U8_NHWC, DataType.F16_FracZ, DataType.U8_NHWC) \
.dtype_format(DataType.U16_NHWC, DataType.F16_FracZ, DataType.U16_NHWC) \
.dtype_format(DataType.U32_NHWC, DataType.F16_FracZ, DataType.U32_NHWC) \
.dtype_format(DataType.U64_NHWC, DataType.F16_FracZ, DataType.U64_NHWC) \
.get_op_info()

@ -27,26 +27,8 @@ unpack_op_info = TBERegOp("Unpack") \
.attr("axis", "required", "int", "all") \
.input(0, "x", False, "required", "all") \
.output(0, "y", False, "dynamic", "all") \
.dtype_format(DataType.I8_Default, DataType.I8_Default) \
.dtype_format(DataType.I16_Default, DataType.I16_Default) \
.dtype_format(DataType.I32_Default, DataType.I32_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default) \
.dtype_format(DataType.U8_Default, DataType.U8_Default) \
.dtype_format(DataType.U16_Default, DataType.U16_Default) \
.dtype_format(DataType.U32_Default, DataType.U32_Default) \
.dtype_format(DataType.U64_Default, DataType.U64_Default) \
.dtype_format(DataType.F16_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.I8_5HD, DataType.I8_5HD) \
.dtype_format(DataType.I16_5HD, DataType.I16_5HD) \
.dtype_format(DataType.I32_5HD, DataType.I32_5HD) \
.dtype_format(DataType.I64_5HD, DataType.I64_5HD) \
.dtype_format(DataType.U8_5HD, DataType.U8_5HD) \
.dtype_format(DataType.U16_5HD, DataType.U16_5HD) \
.dtype_format(DataType.U32_5HD, DataType.U32_5HD) \
.dtype_format(DataType.U64_5HD, DataType.U64_5HD) \
.dtype_format(DataType.F16_5HD, DataType.F16_5HD) \
.dtype_format(DataType.F32_5HD, DataType.F32_5HD) \
.op_pattern("dynamicFormat") \
.dtype_format(DataType.None_None, DataType.None_None) \
.get_op_info()

@ -1132,7 +1132,7 @@ class SquaredDifference(_MathBinaryOp):
The inputs must be two tensors or one tensor and one scalar.
When the inputs are two tensors,
both dtypes cannot be bool, and the shapes of them could be broadcast.
dtypes of them cannot be both bool, and the shapes of them could be broadcast.
When the inputs are one tensor and one scalar,
the scalar only could be a constant.
@ -1833,7 +1833,7 @@ class TruncateDiv(_MathBinaryOp):
The inputs must be two tensors or one tensor and one scalar.
When the inputs are two tensors,
both dtypes cannot be bool, and the shapes of them could be broadcast.
dtypes of them cannot be both bool, and the shapes of them could be broadcast.
When the inputs are one tensor and one scalar,
the scalar only could be a constant.
@ -1862,7 +1862,7 @@ class TruncateMod(_MathBinaryOp):
The inputs must be two tensors or one tensor and one scalar.
When the inputs are two tensors,
both dtypes cannot be bool, and the shapes of them could be broadcast.
dtypes of them cannot be both bool, and the shapes of them could be broadcast.
When the inputs are one tensor and one scalar,
the scalar only could be a constant.
@ -2014,7 +2014,7 @@ class Xdivy(_MathBinaryOp):
The inputs must be two tensors or one tensor and one scalar.
When the inputs are two tensors,
both dtypes cannot be bool, and the shapes of them could be broadcast.
dtypes of them cannot be both bool, and the shapes of them could be broadcast.
When the inputs are one tensor and one scalar,
the scalar only could be a constant.
@ -2047,7 +2047,7 @@ class Xlogy(_MathBinaryOp):
The inputs must be two tensors or one tensor and one scalar.
When the inputs are two tensors,
both dtypes cannot be bool, and the shapes of them could be broadcast.
dtypes of them cannot be both bool, and the shapes of them could be broadcast.
When the inputs are one tensor and one scalar,
the scalar only could be a constant.
@ -3234,7 +3234,7 @@ class BitwiseAnd(_BitwiseBinaryOp):
Returns bitwise `and` of two tensors element-wise.
Inputs:
- **input_x1** (Tensor) - The input tensor with int16 or uint16 data type.
- **input_x1** (Tensor) - The input tensor with int16, int32 or uint16 data type.
- **input_x2** (Tensor) - The input tensor with same type as the `input_x1`.
Outputs:
@ -3254,7 +3254,7 @@ class BitwiseOr(_BitwiseBinaryOp):
Returns bitwise `or` of two tensors element-wise.
Inputs:
- **input_x1** (Tensor) - The input tensor with int16 or uint16 data type.
- **input_x1** (Tensor) - The input tensor with int16, int32 or uint16 data type.
- **input_x2** (Tensor) - The input tensor with same type as the `input_x1`.
Outputs:
@ -3274,7 +3274,7 @@ class BitwiseXor(_BitwiseBinaryOp):
Returns bitwise `xor` of two tensors element-wise.
Inputs:
- **input_x1** (Tensor) - The input tensor with int16 or uint16 data type.
- **input_x1** (Tensor) - The input tensor with int16, int32 or uint16 data type.
- **input_x2** (Tensor) - The input tensor with same type as the `input_x1`.
Outputs:
@ -3297,7 +3297,7 @@ class BesselI0e(PrimitiveWithInfer):
- **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
Outputs:
Tensor, has the same shape as `input_x`.
Tensor, has the same shape as `input_x`. Data type should be float16 or float32.
Examples:
>>> bessel_i0e = P.BesselI0e()
@ -3326,7 +3326,7 @@ class BesselI1e(PrimitiveWithInfer):
- **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
Outputs:
Tensor, has the same shape as `input_x`.
Tensor, has the same shape as `input_x`. Data type should be float16 or float32.
Examples:
>>> bessel_i1e = P.BesselI1e()

@ -1412,6 +1412,7 @@ class BiasAdd(PrimitiveWithInfer):
Inputs:
- **input_x** (Tensor) - Input value. The input shape can be 2-4 dimensions.
- **bias** (Tensor) - Bias value, with shape :math:`(C)`.
The shape of `bias` must be the same as `input_x` in second dimension.
Outputs:
Tensor, with the same shape and type as `input_x`.
@ -2341,7 +2342,7 @@ class OneHot(PrimitiveWithInfer):
Inputs:
- **indices** (Tensor) - A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
Data type must be int32.
Data type must be int32.
- **depth** (int) - A scalar defining the depth of the one hot dimension.
- **on_value** (Tensor) - A value to fill in output when `indices[j] = i`. With data type of float16 or float32.
- **off_value** (Tensor) - A value to fill in output when `indices[j] != i`.

Loading…
Cancel
Save