You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
graphengine/third_party/fwkacllib/inc/ops/array_ops.h

1159 lines
37 KiB

5 years ago
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
5 years ago
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*!
* \file array_ops.h
* \brief
*/
#ifndef OPS_BUILT_IN_OP_PROTO_INC_ARRAY_OPS_H_
#define OPS_BUILT_IN_OP_PROTO_INC_ARRAY_OPS_H_
5 years ago
#include "graph/operator_reg.h"
#include "graph/operator.h"
namespace ge {
/**
*@brief Applies lower_bound(sorted_search_values, values) along each row. \n
5 years ago
*@par Inputs:
*The input sorted_x and values can be one-dimensional vector. Inputs include:
5 years ago
* @li sorted_x:A `Tensor`. 2-D Tensor where each row is ordered.
* @li values:A `Tensor`. Must have the same type as `sorted_x`. \n
5 years ago
*@par Attributes:
*@li out_type:An optional `DType` from: `int32, int64`.
Defaults to `int32`. \n
5 years ago
*@par Outputs:
*y: A `Tensor` of type `out_type`. \n
5 years ago
*@attention Constraints:
*The implementation for LowerBound on Ascend uses AI CPU, with bad performance. \n
5 years ago
*@par Quantization supported or not
*Not supported
*@par Quantized inference supported or not
*Supported
*@par L2 convergence supported or not
*@par Multiple batches supported or not \n
*@par Third-party framework compatibility
*Compatible with tensorflow Operator LowerBound.
5 years ago
*/
REG_OP(LowerBound)
.INPUT(sorted_x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, \
DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
.INPUT(values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, \
DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
.OUTPUT(y, TensorType({DT_INT32, DT_INT64}))
.ATTR(out_type, Type, DT_INT32)
.OP_END_FACTORY_REG(LowerBound)
/**
*@brief Reverses variable length slices. \n
5 years ago
*@par Inputs:
*Input "x" is a k-dimensional tensor. Inputs "num_lower" and "num_upper"
are 0D scalars.
* @li x: A Tensor. The input to reverse.
* @li seq_lengths: A 1D Tensor of type int32 or int64. \n
5 years ago
*@par Attributes:
*@li seq_dim: An optional int. The dimension along which
reversal is performed.
*@li batch_dim: An optional int. Defaults to "0". The dimension along which
reversal is performed. \n
5 years ago
*@par Outputs:
*y: A rank k tensor. Has the same shape as input. The extracted banded tensor. \n
5 years ago
*@attention Constraints:
*ReverseSequence runs on the Ascend AI CPU, which delivers poor performance. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator ReverseSequence.
5 years ago
*/
REG_OP(ReverseSequence)
.INPUT(x,
TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
5 years ago
.INPUT(seq_lengths, TensorType({DT_INT32, DT_INT64}))
.OUTPUT(y,
TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
5 years ago
.REQUIRED_ATTR(seq_dim, Int)
.ATTR(batch_dim, Int, 0)
.OP_END_FACTORY_REG(ReverseSequence)
/**
*@brief Copies a tensor setting everything outside a central band in each innermost matrix. \n
5 years ago
*@par Inputs:
*Input "x" is a k-dimensional tensor. Inputs "num_lower" and "num_upper"
are 0D scalars.
* @li x: A rank k tensor.
* @li num_lower: A 0D tensor. Number of superdiagonals to keep. If negative,
keeps entire upper triangle.
* @li num_upper: A 0D tensor. Number of superdiagonals to keep. If negative,
keeps entire upper triangle. \n
5 years ago
*@par Outputs:
*y: A rank k tensor. Has the same shape as input. The extracted banded tensor. \n
5 years ago
*@attention Constraints:
*MatrixBandPart runs on the Ascend AI CPU, which delivers poor performance. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator MatrixBandPart.
5 years ago
*/
REG_OP(MatrixBandPart)
.INPUT(x, TensorType({ DT_INT8, DT_UINT8, \
DT_INT16, DT_UINT16, DT_INT32, DT_INT64,
DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_BOOL,
DT_COMPLEX64, DT_COMPLEX128 }))
5 years ago
.INPUT(num_lower, TensorType({ DT_INT32, DT_INT64 }))
.INPUT(num_upper, TensorType({ DT_INT32, DT_INT64 }))
.OUTPUT(y, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_BOOL,
DT_COMPLEX64, DT_COMPLEX128}))
5 years ago
.OP_END_FACTORY_REG(MatrixBandPart)
/**
*@brief Finds unique elements in a 1D tensor. \n
5 years ago
*@par Inputs:
*x: 1D tensor.
*Input "x" is a k-dimensional tensor. Inputs "num_lower" and "num_upper"
are 0D scalars. \n
5 years ago
*@par Attributes:
*out_idx: An optional DType from: "int32, int64".
Defaults to "int32". \n
5 years ago
*@par Outputs:
*@li y: A Tensor. Has the same type as "x".
*@li idx: A Tensor of type "out_idx".
*@li count: A Tensor of type "out_idx". \n
5 years ago
*@attention Constraints:
*UniqueWithCounts runs on the Ascend AI CPU, which delivers poor performance. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator UniqueWithCounts.
5 years ago
*/
REG_OP(UniqueWithCounts)
.INPUT(x, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_STRING }))
5 years ago
.OUTPUT(y, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_STRING }))
5 years ago
.OUTPUT(idx, TensorType({ DT_INT32, DT_INT64 }))
.OUTPUT(count, TensorType({ DT_INT32, DT_INT64 }))
.REQUIRED_ATTR(out_idx, Type)
.OP_END_FACTORY_REG(UniqueWithCounts)
/**
*@brief Finds unique elements in a 1D tensor. \n
5 years ago
*@par Inputs:
*x: 1D tensor.
*Input "x" is a k-dimensional tensor. Inputs "num_lower" and "num_upper"
are 0D scalars. \n
5 years ago
*@par Attributes:
*out_idx: An optional DType from: "int32, int64". Defaults to "int32". \n
5 years ago
*@par Outputs:
*@li y: "x" in the unique output "y".
*@li idx: A tensor the same size as "x". The index of each value of "x". \n
5 years ago
*@attention Constraints:
*Unique runs on the Ascend AI CPU, which delivers poor performance. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator Unique.
5 years ago
*/
REG_OP(Unique)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
.OUTPUT(idx, TensorType({DT_INT32, DT_INT64}))
.ATTR(out_idx, Type, DT_INT32)
.OP_END_FACTORY_REG(Unique)
/**
*@brief Finds unique elements in a 1D tensor. \n
5 years ago
*@par Inputs:
*Input "x" is a k-dimensional tensor. Inputs "num_lower" and "num_upper"
are 0D scalars.
*Including:
* @li x: 1D tensor.
* @li axis: A Tensor of type int32. Defaults to "None". \n
5 years ago
*@par Attributes:
*out_idx: An optional DType from: "int32, int64".
Defaults to "int32". \n
5 years ago
*@par Outputs:
*@li y: "x" in the unique output "y".
*@li idx: A tensor the same size as "x". The index of each value of "x". \n
5 years ago
*@attention Constraints:
*UniqueExt2 runs on the Ascend AI CPU, which delivers poor performance. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator UniqueExt2.
5 years ago
*/
REG_OP(UniqueExt2)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
.INPUT(axis, TensorType({DT_INT32, DT_INT64}))
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
.OUTPUT(idx, TensorType({DT_INT32, DT_INT64}))
.ATTR(out_idx, Type, DT_INT32)
.OP_END_FACTORY_REG(UniqueExt2)
/**
*@brief Computes the inverse permutation of a tensor. \n
5 years ago
*@par Inputs:
*x: A k-dimensional tensor. \n
5 years ago
*@par Outputs:
*y: A 1D tensor. \n
5 years ago
*@attention Constraints:
*InvertPermutation runs on the Ascend AI CPU, which delivers poor performance. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator InvertPermutation.
5 years ago
*/
REG_OP(InvertPermutation)
.INPUT(x, TensorType({DT_INT32, DT_INT64}))
.OUTPUT(y, TensorType({DT_INT32, DT_INT64}))
.OP_END_FACTORY_REG(InvertPermutation)
/**
*@brief Checks a tensor for NaN and Inf values. \n
5 years ago
*@par Inputs:
*x: A k-dimensional tensor. \n
5 years ago
*@par Attributes:
*message: Prefix of the error message. \n
5 years ago
*@par Outputs:
*y: The output tensor. \n
5 years ago
*@attention Constraints:
*CheckNumerics runs on the Ascend AI CPU, which delivers poor performance. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator CheckNumerics.
5 years ago
*/
REG_OP(CheckNumerics)
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
.OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
.REQUIRED_ATTR(message, String)
.OP_END_FACTORY_REG(CheckNumerics)
/**
*@brief Converts an array of flat indices into a tuple of coordinate arrays. \n
5 years ago
*@par Inputs:
*Input "indices" is a 0D or 1D tensor. Input "dims" is a 1D tensor.
* @li indices: A 0D or 1D int Tensor whose elements are indices into
the flattened version of an array of dimensions "dims".
* @li dims: A 1D int Tensor of the same type as "indices".
*The shape of the array to use for unraveling indices. \n
5 years ago
*@par Outputs:
*y: A Tensor. Has the same type as "indices". \n
5 years ago
*@attention Constraints:
*UnravelIndex runs on the Ascend AI CPU, which delivers poor performance. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator UnravelIndex.
5 years ago
*/
REG_OP(UnravelIndex)
.INPUT(indices, TensorType({DT_INT32, DT_INT64}))
.INPUT(dims, TensorType({DT_INT32, DT_INT64}))
.OUTPUT(y, TensorType({DT_INT32, DT_INT64}))
.OP_END_FACTORY_REG(UnravelIndex)
/**
*@brief Applies upper_bound(sorted_search_values, values) along each row. \n
5 years ago
*@par Inputs:
*Inputs "sorted_x" and "values" are 2D tensors.
* @li sorted_x: A 2D Tensor where each row is ordered.
* @li values: A 2D Tensor with the same numbers of rows as "sorted_x. \n
5 years ago
*@par Attributes:
*out_type: sets the optional out_type attribute to value. \n
5 years ago
*@par Outputs:
*y: A Tensor with the same shape as "values". \n
5 years ago
*@attention Constraints:
*UpperBound runs on the Ascend AI CPU, which delivers poor performance. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator UpperBound.
5 years ago
*/
REG_OP(UpperBound)
.INPUT(sorted_x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
.INPUT(values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
.OUTPUT(y, TensorType({DT_INT32, DT_INT64}))
.REQUIRED_ATTR(out_type, Type)
.OP_END_FACTORY_REG(UpperBound)
/**
*@brief Finds unique elements in a 1D tensor. \n
5 years ago
*@par Inputs:
*Inputs "x" and "axis" are 1D vectors.
* @li x: A 1D tensor.
* @li axis: A 1D tensor. \n
5 years ago
*@par Attributes:
*out_idx: An optional DType from: "int32, int64".
Defaults to "int32". \n
5 years ago
*@par Outputs:
*@li y: "x" in the unique output "y".
*@li idx: A tensor the same size as "x". The index of each value of "x".
*@li count: A tensor the same size as "x". The index of each value of "x". \n
5 years ago
*@attention Constraints:
*UniqueWithCountsExt2 runs on the Ascend AI CPU, which delivers poor performance. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator UniqueWithCountsExt2.
5 years ago
*/
REG_OP(UniqueWithCountsExt2)
.INPUT(x, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_STRING }))
5 years ago
.INPUT(axis, TensorType({ DT_INT32, DT_INT64 }))
.OUTPUT(y, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_STRING }))
5 years ago
.OUTPUT(idx, TensorType({ DT_INT32, DT_INT64 }))
.OUTPUT(count, TensorType({ DT_INT32, DT_INT64 }))
.REQUIRED_ATTR(out_idx, Type)
.OP_END_FACTORY_REG(UniqueWithCountsExt2)
/**
*@brief Fills the tensor with the mirror value. \n
5 years ago
*@par Inputs:
*Inputs "x" and "paddings" are 1D scalars.
* @li x: The tensor to be padded.
* @li paddings: A two-column matrix specifying the padding sizes.
The number of rows Has the same rank as "x". \n
5 years ago
*@par Attributes:
*mode: Either "REFLECT" or "SYMMETRIC". In reflect mode the padded regions
do not include the borders, while in symmetric mode the padded regions
do include the borders. \n
5 years ago
*@par Outputs:
*y: The padded tensor. \n
5 years ago
*@attention Constraints:
*MirrorPad runs on the Ascend AI CPU, which delivers poor performance. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator MirrorPad.
5 years ago
*/
REG_OP(MirrorPad)
.INPUT(x, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_BOOL, \
DT_COMPLEX64, DT_COMPLEX128 }))
5 years ago
.INPUT(paddings, TensorType({ DT_INT32, DT_INT64 }))
.OUTPUT(y, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_BOOL, \
DT_COMPLEX64, DT_COMPLEX128 }))
5 years ago
.REQUIRED_ATTR(mode, String)
.OP_END_FACTORY_REG(MirrorPad)
/**
*@brief Calculates the difference between two numbers or a list of strings. \n
5 years ago
*@par Inputs:
*Inputs "x" and "y" are 1D vectors.
* @li x: A Tensor. 1D. Values to keep.
* @li y: A Tensor. Must have the same type as x. 1D. Values to remove. \n
5 years ago
*@par Attributes:
*out_idx: An optional DType from: "int32, int64". Defaults to "int32". \n
5 years ago
*@par Outputs:
*@li out: A Tensor. Has the same type as "x".
*@li idx: A Tensor of type "out_idx". \n
5 years ago
*@attention Constraints:
*ListDiff runs on the Ascend AI CPU, which delivers poor performance. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator ListDiff.
5 years ago
*/
REG_OP(ListDiff)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_UINT8, DT_INT8,
DT_INT16, DT_UINT16, DT_INT32, DT_INT64}))
.INPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_UINT8, DT_INT8,
DT_INT16, DT_UINT16, DT_INT32, DT_INT64}))
.OUTPUT(out, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_UINT8, DT_INT8,
DT_INT16, DT_UINT16, DT_INT32, DT_INT64}))
.OUTPUT(idx, TensorType({DT_INT32, DT_INT64}))
.ATTR(out_idx, Type, DT_INT32)
.OP_END_FACTORY_REG(ListDiff)
/**
*@brief Create an empty tensor, using the shape and dtype specified in attributes. \n
*@par Attributes:
*@li dtype: Specify the data type of the empty tensor.
*@li shape: Specify the shape of the empty tensor. \n
*@par Outputs:
*y: The empty constant tensor. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator _ParallelConcatStart.
*/
REG_OP(_ParallelConcatStart)
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.ATTR(dtype, Type, DT_INT32)
.ATTR(shape, ListInt, {})
.OP_END_FACTORY_REG(_ParallelConcatStart)
5 years ago
/**
*@brief Creates a constant tensor from a tensor-like object. This operator is used for inference.
Operator Const has the same definition as operator Constant. \n
5 years ago
*@par Attributes:
*value: Required. The value and type of the resulting tensor, and no restrictions on type. \n
5 years ago
*@par Outputs:
*y: A constant tensor. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator Const.
5 years ago
*/
REG_OP(Const)
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
DT_UINT8, DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.ATTR(value, Tensor, Tensor())
5 years ago
.OP_END_FACTORY_REG(Const)
/**
*@brief Creates a constant tensor for training. \n
5 years ago
*@par Attributes:
*value: Required. The value and type of the resulting tensor, and no restrictions on type. \n
5 years ago
*@par Outputs:
*y: The constant tensor. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator Const.
5 years ago
*/
REG_OP(Constant)
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
DT_UINT8, DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.ATTR(value, Tensor, Tensor())
5 years ago
.OP_END_FACTORY_REG(Constant)
/**
*@brief Returns a copy of the input tensor. \n
5 years ago
*@par Inputs:
*x: A tensor. \n
5 years ago
*@par Outputs:
*y: A tensor. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator Snapshot.
5 years ago
*/
REG_OP(Snapshot)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
DT_UINT8, DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
DT_UINT8, DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.OP_END_FACTORY_REG(Snapshot)
/**
*@brief Gives a guarantee to the runtime that the input tensor is a constant. \n
5 years ago
*@par Inputs:
*x: A tensor. \n
5 years ago
*@par Outputs:
*y: The input tensor. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator GuaranteeConst.
5 years ago
*/
REG_OP(GuaranteeConst)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.OP_END_FACTORY_REG(GuaranteeConst)
/**
*@brief Returns the target shape for broadcasting shapes "x1" and "x2". \n
5 years ago
*@par Inputs:
*@li x1: A tensor of type int32 or int64. A shape.
*@li x2: A tensor of the same type as "x1". The other shape. \n
5 years ago
*@par Outputs:
*y: A tensor. The broadcasted shape. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator BroadcastArgs.
5 years ago
*/
REG_OP(BroadcastArgs)
.INPUT(x1, TensorType({DT_INT32, DT_INT64}))
.INPUT(x2, TensorType({DT_INT32, DT_INT64}))
.OUTPUT(y, TensorType({DT_INT32, DT_INT64}))
.OP_END_FACTORY_REG(BroadcastArgs)
/**
*@brief Outputs its input tensor as is and triggers an error if a gradient is requested. \n
5 years ago
*@par Inputs:
*x: A tensor. \n
5 years ago
*@par Attributes:
*message: Will be printed in the error at the attempt to request a gradient. \n
5 years ago
*@par Outputs:
*y: The input tensor. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator PreventGradient.
5 years ago
*/
REG_OP(PreventGradient)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.ATTR(message, String, "")
.OP_END_FACTORY_REG(PreventGradient)
/**
*@brief Returns the reduction indices for computing gradients of "x1" and "x2" with broadcast. \n
5 years ago
*@par Inputs:
*@li x1: A tensor of type int32 or int64.
*@li x2: A tensor of type int32 or int64.
"x2" has the same type as "x1". \n
5 years ago
*@par Outputs:
*@li y1: A tensor. Reduction indices of "x1".
*@li y2: A tensor. Reduction indices of "x2". \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator BroadcastGradientArgs.
5 years ago
*/
REG_OP(BroadcastGradientArgs)
.INPUT(x1, TensorType({DT_INT32, DT_INT64}))
.INPUT(x2, TensorType({DT_INT32, DT_INT64}))
.OUTPUT(y1, TensorType({DT_INT32, DT_INT64}))
.OUTPUT(y2, TensorType({DT_INT32, DT_INT64}))
.OP_END_FACTORY_REG(BroadcastGradientArgs)
/**
*@brief Stops gradient computation. None is returned for the node where the gradient computation is stopped.
*@par Inputs:
*x: A tensor. \n
5 years ago
*@par Outputs:
*y: The input tensor. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator StopGradient.
5 years ago
*/
REG_OP(StopGradient)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.OP_END_FACTORY_REG(StopGradient)
/**
*@brief Return a tensor with the same shape and contents as input. \n
5 years ago
*@par Inputs:
*x: A tensor. \n
5 years ago
*@par Outputs:
*y: A tensor. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator Identity.
5 years ago
*/
REG_OP(Identity)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.OP_END_FACTORY_REG(Identity)
/**
*@brief Returns a list of tensors with the same shapes and contents as the input tensors. \n
5 years ago
*@par Inputs:
*x: A list of input tensors. It's a dynamic input \n
5 years ago
*@par Outputs:
*y: A list of Tensor objects, with the same length as the input tensor list.
It's a dynamic output. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator IdentityN.
5 years ago
*/
REG_OP(IdentityN)
.DYNAMIC_INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.DYNAMIC_OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.OP_END_FACTORY_REG(IdentityN)
/**
*@brief Inserts a dimension of 1 into a tensor's shape. Only the tensor shape is changed, without changing the data. \n
5 years ago
*@par Inputs:
*@li x: A tensor.
*@li axis: The dimension index at which to expand. \n
5 years ago
*@par Outputs:
*y: A tensor. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator ExpandDims.
5 years ago
*/
REG_OP(ExpandDims)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8, DT_INT32,
DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.INPUT(axis, TensorType({DT_INT32, DT_INT64}))
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8, DT_INT32,
DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.OP_END_FACTORY_REG(ExpandDims)
/**
*@brief Inserts a dimension of 1 into a tensor's shape. Only the tensor shape is changed, without changing the data. \n
*@par Inputs:
*@li x: Original tensor.
*@li axis: List of ints. \n
*@par Outputs:
*y: Reshape tensor with same data as input. \n
*@par Third-party framework compatibility
*Compatible with the Onnx operator Unsqueeze.
*/
REG_OP(Unsqueeze)
.INPUT(x, TensorType({DT_FLOAT32, DT_INT32, DT_UINT8, DT_BOOL}))
.OUTPUT(y, TensorType({DT_FLOAT32, DT_INT32, DT_UINT8, DT_BOOL}))
.ATTR(axes, ListInt, {})
.OP_END_FACTORY_REG(Unsqueeze)
5 years ago
/**
*@brief Reshapes a tensor. Only the tensor shape is changed, without changing the data. \n
5 years ago
*@par Inputs:
*@li x: A tensor.
*@li shape: A tensor. Defines the shape of the output tensor. \n
5 years ago
*@par Attributes:
*@li axis: An optional int32 or int64. The first dimension to reshape. Defaults to "0".
*@li num_axes: An optional int32 or int64. The extent of the reshape. Defaults to "-1". \n
5 years ago
*@par Outputs:
*y: A tensor. \n
*@par Attention:
*This operator cannot be directly called by the acllopExecute API. \n
*@par Third-party framework compatibility
*@li Compatible with the TensorFlow operator Reshape.
*@li Compatible with the Caffe operator Reshape.
5 years ago
*/
REG_OP(Reshape)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8, DT_INT32,
DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.INPUT(shape, TensorType({DT_INT32, DT_INT64}))
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8, DT_INT32,
DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.ATTR(axis, Int, 0)
.ATTR(num_axes, Int, -1)
.OP_END_FACTORY_REG(Reshape)
/**
*@brief Removes dimensions of size 1 from the shape of a tensor. \n
5 years ago
*@par Inputs:
*x: A tensor. \n
5 years ago
*@par Attributes:
*axis: An optional list of int32 or int64. If not specified, squeezes all dimensions of size 1. If specified, only squeezes the dimensions listed. It is an error to squeeze a dimension that is not 1. \n
5 years ago
*@par Outputs:
*y: A tensor. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator Squeeze.
5 years ago
*/
REG_OP(Squeeze)
.INPUT(x, TensorType::ALL())
.OUTPUT(y, TensorType::ALL())
.ATTR(axis, ListInt, {})
.OP_END_FACTORY_REG(Squeeze)
/**
*@brief Returns an integer representing the rank of input tensor. The rank of a tensor is the number of indices required to uniquely select each element of the tensor, that is, the dimension size of the tensor. \n
5 years ago
*@par Inputs:
*x: A tensor. \n
5 years ago
*@par Outputs:
*y: A tensor. The rank of input tensor. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator Rank.
5 years ago
*/
REG_OP(Rank)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.OUTPUT(y, TensorType({DT_INT32}))
.OP_END_FACTORY_REG(Rank)
/**
*@brief Returns the size of a tensor, that is, an integer of the number of elements of the tensor. \n
5 years ago
*@par Inputs:
*x: A tensor. \n
5 years ago
*@par Attributes:
*out_type: An optional int32 or int64. The output data type. Defaults to "int32". \n
5 years ago
*@par Outputs:
*y: A tensor. The size of the input tensor. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator Size.
5 years ago
*/
REG_OP(Size)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.OUTPUT(y, TensorType({DT_INT32,DT_INT64}))
.ATTR(dtype, Int, DT_INT32)
5 years ago
.OP_END_FACTORY_REG(Size)
/**
*@brief Input data for other operators. \n
*@par Inputs:
*x: A tensor. \n
*@par Attributes:
*index: Index of the input tensor.The data type must be int32 or int64.
Assume that net has three data nodes, one should be set 0, another should
be set 1, and the left should be set 2. \n
*@par Outputs:
*y: A tensor. \n
*@par Third-party framework compatibility
*Compatible with the Caffe operator Data.
*/
5 years ago
REG_OP(Data)
.INPUT(x, TensorType::ALL())
.OUTPUT(y, TensorType::ALL())
5 years ago
.ATTR(index, Int, 0)
.OP_END_FACTORY_REG(Data)
/**
*@brief Inserts a placeholder for a tensor that will be always fed. \n
5 years ago
*@par Inputs:
*x: A tensor. \n
5 years ago
*@par Attributes:
*@li peerIndex: An integer type. The index of the corresponding "end" node connected to.
*@li parentId: A string, used to check if the nodes are from the saved parent node.
*@li parentOpType: A string. Op type of the original node.
*@li anchorIndex: An integer, used to check if the node is from the saved anchor. \n
5 years ago
*@par Outputs:
*y: The created placeholder tensor. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator PlaceHolder.
5 years ago
*/
REG_OP(PlaceHolder)
.INPUT(x, TensorType::ALL())
.OUTPUT(y, TensorType::ALL())
.ATTR(peerIndex, Int, 0) // the index of the corresponding 'end' node it's connected to
.ATTR(parentId, String, "") // check if these node are from save parent node
.ATTR(parentOpType, String, "") // op type of original node
.ATTR(anchorIndex, Int, 0) // check if these node are from save anchor
.OP_END_FACTORY_REG(PlaceHolder)
/**
*@brief Inserts a placeholder with default value for a tensor. \n
*@par Inputs:
*x: A tensor. \n
*@par Attributes:
*@li dtype: data type of tensor.
*@li shape: tensor shape. \n
*@par Outputs:
*y: The created placeholder tensor. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator PlaceholderWithDefault.
*/
REG_OP(PlaceholderWithDefault)
.INPUT(x, TensorType::ALL())
.OUTPUT(y, TensorType::ALL())
.REQUIRED_ATTR(shape, ListInt)
.OP_END_FACTORY_REG(PlaceholderWithDefault)
/**
*@brief Reads and returns the value of the input variable tensor. \n
*@par Inputs:
*x: A tensor. \n
*@par Attributes:
*dtype: An optional int32 or int64. The output data type. Defaults to int32. \n
*@par Outputs:
*y: A tensor. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator ReadVariableOp.
*/
REG_OP(ReadVariableOp)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.ATTR(dtype, Int, DT_INT32)
.OP_END_FACTORY_REG(ReadVariableOp)
/**
*@brief Mark outputs of one sub graph which partitioned by engine type.
*@par Inputs:
*x: A tensor. \n
*@par Outputs:
*y: A tensor. \n
*@par Attributes:
*@li peerIndex: The index of the corresponding 'placeholder' node it's connected to.
*@li parentOpType: Op type of original node.
*@par Restrictions:
*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
*/
5 years ago
REG_OP(End)
.INPUT(x, TensorType::ALL())
.OUTPUT(y, TensorType::ALL())
.ATTR(peerIndex, Int, 0)
.ATTR(parentOpType, String, "")
5 years ago
.OP_END_FACTORY_REG(End)
/**
*@brief Operations for writing summary data, for use in analysis and visualization.
*@par Inputs:
* One input:
*x: Collections of summary data.
*@par Restrictions:
*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
*/
5 years ago
REG_OP(Summary)
.INPUT(x, TensorType::ALL())
.OP_END_FACTORY_REG(Summary)
/**
*@brief Returns the shape of a tensor. \n
5 years ago
*@par Inputs:
*x: A tensor. \n
5 years ago
*@par Attributes:
*dtype: An optional int32 or int64. The output data type. Defaults to int32. \n
5 years ago
*@par Outputs:
*y: A tensor. The shape of the input tensor. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator Size.
5 years ago
*/
REG_OP(Shape)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.OUTPUT(y, TensorType({DT_INT32, DT_INT64}))
.ATTR(dtype, Int, DT_INT32)
5 years ago
.OP_END_FACTORY_REG(Shape)
/**
*@brief Returns shape of tensors. \n
5 years ago
*@par Inputs:
*x: A list of input tensors. It's a dynamic input. \n
5 years ago
*@par Attributes:
*dtype: An optional int32 or int64. The output data type. Defaults to "int32". \n
5 years ago
*@par Outputs:
*y: A list of tensors with the same length as the input list of tensors.
It's a dynamic output. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator ShapeN.
5 years ago
*/
REG_OP(ShapeN)
.DYNAMIC_INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.DYNAMIC_OUTPUT(y, TensorType({DT_INT32, DT_INT64}))
.ATTR(dtype, Int, DT_INT32)
5 years ago
.OP_END_FACTORY_REG(ShapeN)
/**
*@brief Creates a tensor with the given "shape" and "dtype". \n
5 years ago
*@par Inputs:
*shape: The shape of the output tensor. \n
5 years ago
*@par Attributes:
*@li dtype: Optional. The data type of the output tensor. Defaults to "int32".
*@li init: An optional bool. If true, initializes the returned tensor with the default value of "dtype". Defaults to "false". \n
5 years ago
*@par Outputs:
*y: A tensor. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator Empty.
5 years ago
*/
REG_OP(Empty)
.INPUT(shape, TensorType({DT_INT32}))
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
.ATTR(dtype, Int, DT_INT32)
.ATTR(init, Bool, 0)
.OP_END_FACTORY_REG(Empty)
/**
*@brief Gradient op for MirrorPad op. Folds a mirror-padded tensor. \n
5 years ago
*@par Inputs:
*Inputs "x" and "y" are 1D vectors.
* @li x: A Tensor. The input tensor to be folded.
* @li paddings: A Tensor of type int32 or int64. A two-column matrix
specifying the padding sizes. \n
5 years ago
*@par Attributes:
*mode: A string from: "REFLECT", "SYMMETRIC". The mode used in the MirrorPad op. \n
5 years ago
*@par Outputs:
*y: A Tensor. Has the same type as "x". \n
5 years ago
*@attention Constraints:
*MirrorPadGrad runs on the Ascend AI CPU, which delivers poor performance. \n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator MirrorPadGrad.
5 years ago
*/
REG_OP(MirrorPadGrad)
.INPUT(x, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
DT_COMPLEX64, DT_COMPLEX128 }))
5 years ago
.INPUT(paddings, TensorType({DT_INT32, DT_INT64}))
.OUTPUT(y, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
DT_COMPLEX64, DT_COMPLEX128 }))
5 years ago
.REQUIRED_ATTR(mode, String)
.OP_END_FACTORY_REG(MirrorPadGrad)
/**
*@brief Returns locations of nonzero / true values in a tensor. \n
*@par Inputs:
*Including:
*x: A Tensor. Must be one of the following types:
DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT8, DT_UINT8, DT_INT16,
DT_UINT16, DT_INT32, DT_UINT32, DT_INT64, DT_UINT64, DT_BOOL. \n
*@par Outputs:
*y: A Tensor of type DT_INT64. \n
*@attention Constraints:
*Where runs on the Ascend AI CPU, which delivers poor performance.\n
*@par Third-party framework compatibility
*Compatible with the TensorFlow operator Where.
*/
5 years ago
REG_OP(Where)
.INPUT(x, TensorType({DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT8, DT_UINT8, DT_INT16, \
DT_UINT16, DT_INT32, DT_UINT32, DT_INT64, DT_UINT64, DT_BOOL}))
.OUTPUT(y, TensorType({DT_INT64}))
.OP_END_FACTORY_REG(Where)
/**
*@brief Derived from the Caffe operator Split that splits an input blob to
* multiple output blobs for feeding a blob into multiple output layers.
*The Split node is removed from the graph after the split operation is completed. \n
*@par Inputs:
*x: A Tensor. Must be one of the following types:
fp16, fp32, int8, uint8, int16, uint16, int32, uint32, int64, uint64. \n
*@par Outputs:
*y: A Tensor. Has the same type as "x".It's required and the value should equal to output_num. \n
*@par Attributes:
*@li N: A required int. The parameter will get the number of dynamic outputs.
*/
REG_OP(Copy)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_UINT8, DT_INT16, \
DT_UINT16, DT_INT32, DT_UINT32, DT_INT64, DT_UINT64}))
.DYNAMIC_OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_UINT8, DT_INT16, \
DT_UINT16, DT_INT32, DT_UINT32, DT_INT64, DT_UINT64}))
.REQUIRED_ATTR(N, Int)
.OP_END_FACTORY_REG(Copy);
/**
*@brief Generates fingerprint values. \n
*@par Inputs:
*@li data: Must have rank 1 or higher.
*@li method: Fingerprint method used by this op. Currently available method is
`farmhash::fingerprint64`. \n
*@par Outputs:
y: A two-dimensional `Tensor` of type `tf.uint8`. The first dimension equals to
`data`'s first dimension, and the second dimension size depends on the
fingerprint algorithm. \n
*@par Third-party framework compatibility
* Compatible with TensorFlow Fingerprint operator.
*/
REG_OP(Fingerprint)
.INPUT(data, TensorType({DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT8, DT_UINT8, DT_INT16, \
DT_UINT16, DT_INT32, DT_UINT32, DT_INT64, DT_UINT64, DT_BOOL}))
.INPUT(method, TensorType({DT_STRING}))
.OUTPUT(y, TensorType({DT_UINT8}))
.OP_END_FACTORY_REG(Fingerprint)
/**
*@brief Change the shape of output according to the attr outShape
*
*@par Inputs:
*x: A Tensor. \n
*@par Outputs:
*y: A Tensor. Has the same type as "x".It's required and the value should equal to output_num. \n
*@par Attributes:
*outShape: The shape of output will be inferred according to the attribute
*/
REG_OP(TransShape)
.INPUT(x, TensorType::ALL())
.OUTPUT(y, TensorType::ALL())
.ATTR(outShape,ListInt ,{})
.OP_END_FACTORY_REG(TransShape);
/**
*@brief Computes the (possibly normalized) Levenshtein Edit Distance. \n
*@par Inputs:
*@li hypothesis_indices: The indices of the hypothesis list SparseTensor.
This is an N x R int64 matrix.
*@li hypothesis_shape: The values of the hypothesis list SparseTensor.
This is an N-length vector.
*@li hypothesis_shape: The shape of the hypothesis list SparseTensor.
This is an R-length vector.
*@li truth_indices: The indices of the truth list SparseTensor.
This is an M x R int64 matrix.
*@li truth_shape: The values of the truth list SparseTensor.
This is an M-length vector.
*@li truth_shape: The shape of the truth list SparseTensor.
This is an R-length vector
*@par Attributes:
*@li normalize: boolean (if true, edit distances are normalized by length of truth). \n
*@par Outputs:
*@li output: A dense float tensor with rank R - 1. \n
*@par Third-party framework compatibility
* Compatible with TensorFlow EditDistance operator.
*/
REG_OP(EditDistance)
.INPUT(hypothesis_indices, TensorType({DT_INT64}))
.INPUT(hypothesis_values, TensorType::BasicType())
.INPUT(hypothesis_shape, TensorType({DT_INT64}))
.INPUT(truth_indices, TensorType({DT_INT64}))
.INPUT(truth_values, TensorType::BasicType())
.INPUT(truth_shape, TensorType({DT_INT64}))
.ATTR(normalize, Bool, true)
.OUTPUT(output, TensorType({DT_FLOAT}))
.OP_END_FACTORY_REG(EditDistance)
5 years ago
} // namespace ge
#endif // OPS_BUILT_IN_OP_PROTO_INC_ARRAY_OPS_H_