update include files to 1.2

pull/1409/head
shenwei41 4 years ago
parent de47249a72
commit 5ddf2ac2b2

@ -56,6 +56,7 @@
#include <dirent.h>
#include <getopt.h>
#include <libgen.h>
#include <malloc.h>
#include <linux/types.h>
#include <linux/hdreg.h>

@ -550,6 +550,10 @@ MMPA_FUNC_VISIBILITY mmFileHandle mmShmOpen(const CHAR *name, INT32 oflag, mmMod
MMPA_FUNC_VISIBILITY INT32 mmShmUnlink(const CHAR *name);
MMPA_FUNC_VISIBILITY VOID *mmMmap(mmFd_t fd, mmSize_t size, mmOfft_t offset, mmFd_t *extra, INT32 prot, INT32 flags);
MMPA_FUNC_VISIBILITY INT32 mmMunMap(VOID *data, mmSize_t size, mmFd_t *extra);
MMPA_FUNC_VISIBILITY mmSize mmGetPageSize();
MMPA_FUNC_VISIBILITY VOID *mmAlignMalloc(mmSize mallocSize, mmSize alignSize);
MMPA_FUNC_VISIBILITY VOID mmAlignFree(VOID *addr);
#define MMPA_DLL_API
#ifdef __cplusplus

@ -557,6 +557,10 @@ MMPA_FUNC_VISIBILITY mmFileHandle mmShmOpen(const CHAR *name, INT32 oflag, mmMod
MMPA_FUNC_VISIBILITY INT32 mmShmUnlink(const CHAR *name);
MMPA_FUNC_VISIBILITY VOID *mmMmap(mmFd_t fd, mmSize_t size, mmOfft_t offset, mmFd_t *extra, INT32 prot, INT32 flags);
MMPA_FUNC_VISIBILITY INT32 mmMunMap(VOID *data, mmSize_t size, mmFd_t *extra);
MMPA_FUNC_VISIBILITY mmSize mmGetPageSize();
MMPA_FUNC_VISIBILITY VOID *mmAlignMalloc(mmSize mallocSize, mmSize alignSize);
MMPA_FUNC_VISIBILITY VOID mmAlignFree(VOID *addr);
#ifdef __cplusplus
#if __cplusplus
}

@ -369,7 +369,7 @@ REG_OP(MatrixSetDiagD)
* int64, complex64, qint8, quint8, qint32, uint16, complex128, half, uint32,
* uint64
*@li indices: An ND Tensor.
*Must be one of the following types: int32, int64
*Must be one of the following types: int32 or int64
*@li updates: An ND Tensor.
*Must be one of the following types: float16, float32, int8, uint8, double,
* int64, complex64, qint8, quint8, qint32, uint16, complex128, half, uint32,
@ -429,7 +429,7 @@ REG_OP(TensorScatterUpdate)
*@li var: An ND Tensor . \n
*Must be one of the following types: float16, float32, int32, int8, uint8
*@li indices: An ND Tensor of type int32 or int64.
*@li indices: An ND Tensor of type int32 or int64
*@li updates: An Tensor. format:NCHW, NHWC . \n
@ -447,10 +447,10 @@ REG_OP(TensorScatterUpdate)
* Compatible with the TensorFlow operator ScatterAdd.
*/
REG_OP(ScatterAdd)
.INPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(indices, TensorType::IndexNumberType())
.INPUT(updates, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.ATTR(use_locking, Bool, false)
.OP_END_FACTORY_REG(ScatterAdd)
@ -463,7 +463,7 @@ REG_OP(ScatterAdd)
*Must be one of the following types: float16, float, int32, int8, uint8
*@li indices: An ND Tensor.
*Must be one of the following types: int32
*Must be one of the following types: int32 or int64
*@li updates: An ND Tensor.
*Must be one of the following types: float16, float, int32, int8, uint8
@ -478,10 +478,10 @@ REG_OP(ScatterAdd)
* Compatible with the TensorFlow operator ScatterDiv.
*/
REG_OP(ScatterDiv)
.INPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(indices, TensorType({DT_INT32}))
.INPUT(updates, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(indices, TensorType::IndexNumberType())
.INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.ATTR(use_locking, Bool, false)
.OP_END_FACTORY_REG(ScatterDiv)
@ -493,7 +493,7 @@ REG_OP(ScatterDiv)
*@li var: An ND Tensor.
*Must be one of the following types: float16, float, int32, int8, uint8
*@li indices: An ND Tensor.
*Must be one of the following types: int32
*Must be one of the following types: int32 or int64
*@li updates: An ND Tensor.
*Must be one of the following types: float16, float, int32, int8, uint8
*@par Attributes:
@ -507,10 +507,10 @@ REG_OP(ScatterDiv)
* Compatible with the TensorFlow operator ScatterNdAdd.
*/
REG_OP(ScatterNdAdd)
.INPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(indices, TensorType::IndexNumberType())
.INPUT(updates, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.ATTR(use_locking, Bool, false)
.OP_END_FACTORY_REG(ScatterNdAdd)
@ -550,7 +550,7 @@ REG_OP(TensorScatterAdd)
*@li var: An ND Tensor.
*Must be one of the following types: float16, float, int32, int8, uint8
*@li indices: An ND Tensor.
*Must be one of the following types: int32, int64
*Must be one of the following types: int32 or int64
*@li updates: An ND Tensor.
*Must be one of the following types: float16, float, int32, int8, uint8
@ -565,10 +565,10 @@ REG_OP(TensorScatterAdd)
* Compatible with the TensorFlow operator ScatterNdSub.
*/
REG_OP(ScatterNdSub)
.INPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(indices, TensorType::IndexNumberType())
.INPUT(updates, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.ATTR(use_locking, Bool, false)
.OP_END_FACTORY_REG(ScatterNdSub)
@ -608,7 +608,7 @@ REG_OP(TensorScatterSub)
*@li var: An ND Tensor.
*Must be one of the following types: float16, float, int32, int8, uint8
*@li indices: An ND Tensor.
*Must be one of the following types: int32, int64
*Must be one of the following types: int32 or int64
*@li updates: An ND Tensor.
*Must be one of the following types: float16, float, int32, int8, uint8
*@par Attributes:
@ -622,10 +622,10 @@ REG_OP(TensorScatterSub)
* Compatible with the TensorFlow operator ScatterSub.
*/
REG_OP(ScatterSub)
.INPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(indices, TensorType::IndexNumberType())
.INPUT(updates, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.ATTR(use_locking, Bool, false)
.OP_END_FACTORY_REG(ScatterSub)
@ -796,7 +796,7 @@ REG_OP(ConfusionMatrix)
*@li var: An ND Tensor.
*Must be one of the following types: float16, float, int32, int8, uint8
*@li indices: An ND Tensor.
*Must be one of the following types: int32
*Must be one of the following types: int32 or int64
*@li updates: An ND Tensor . \n
*Must be one of the following types: float16, float, int32, int8, uint8
@ -813,7 +813,7 @@ REG_OP(ConfusionMatrix)
*/
REG_OP(ScatterMul)
.INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(indices, TensorType({DT_INT32}))
.INPUT(indices, TensorType::IndexNumberType())
.INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.ATTR(use_locking, Bool, false)
@ -826,13 +826,13 @@ REG_OP(ScatterMul)
*@par Inputs:
* Three inputs, including:
*@li var: An ND Tensor.
*Must be one of the following types: float16, float, int32
*Must be one of the following types: float16, float, int32, int8, uint8
*@li indices: An ND Tensor.
*Must be one of the following types: int32
*Must be one of the following types: int32 or int64
*@li updates: An ND Tensor.
*Must be one of the following types: float16, float, int32
*Must be one of the following types: float16, float, int32, int8, uint8
*@par Attributes:
*use_locking: An optional bool. Defaults to "False". If "True", the operation
@ -845,10 +845,10 @@ REG_OP(ScatterMul)
* Compatible with the TensorFlow operator ScatterMin.
*/
REG_OP(ScatterMin)
.INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32}))
.INPUT(indices, TensorType({DT_INT32}))
.INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32}))
.OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32}))
.INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(indices, TensorType::IndexNumberType())
.INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.ATTR(use_locking, Bool, false)
.OP_END_FACTORY_REG(ScatterMin)
@ -859,13 +859,13 @@ REG_OP(ScatterMin)
* Three inputs, including:
*@li var: An ND Tensor . \n
*Must be one of the following types: float16, float, int32
*Must be one of the following types: float16, float, int32, int8, uint8
*@li indices: An NCHW, NHWC, or ND Tensor . \n
*Must be one of the following types: int32
*Must be one of the following types: int32 or int64
*@li updates: An NCHW, NHWC, or ND Tensor . \n
*Must be one of the following types: float16, float, int32
*Must be one of the following types: float16, float, int32, int8, uint8
*@par Attributes:
*use_locking: An optional bool. Defaults to "False".
@ -878,10 +878,10 @@ REG_OP(ScatterMin)
* Compatible with the TensorFlow operator ScatterMax.
*/
REG_OP(ScatterMax)
.INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32}))
.INPUT(indices, TensorType({DT_INT32}))
.INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32}))
.OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32}))
.INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(indices, TensorType::IndexNumberType())
.INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.ATTR(use_locking, Bool, false)
.OP_END_FACTORY_REG(ScatterMax)
@ -895,7 +895,7 @@ REG_OP(ScatterMax)
*Must be one of the following types: float16, float, int32, int8, uint8
*@li indices: An ND Tensor . \n
*Must be one of the following types: int32
*Must be one of the following types: int32 or int64
*@li updates: An ND Tensor . \n
*Must be one of the following types: float16, float, int32, int8, uint8
@ -911,10 +911,10 @@ REG_OP(ScatterMax)
* Compatible with the TensorFlow operator ScatterUpdate.
*/
REG_OP(ScatterUpdate)
.INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8}))
.INPUT(indices, TensorType({DT_INT32}))
.INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8}))
.INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(indices, TensorType::IndexNumberType())
.INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.ATTR(use_locking, Bool, false)
.OP_END_FACTORY_REG(ScatterUpdate)

@ -1184,6 +1184,7 @@ REG_OP(MaxPool3DGrad)
.OUTPUT(y, TensorType::RealNumberType())
.REQUIRED_ATTR(ksize, ListInt)
.REQUIRED_ATTR(strides, ListInt)
.ATTR(padding, String, "SAME")
.REQUIRED_ATTR(pads, ListInt)
.ATTR(data_format, String, "NDHWC")
.OP_END_FACTORY_REG(MaxPool3DGrad)
@ -1678,6 +1679,22 @@ REG_OP(MaxPoolWithArgmaxV1)
.ATTR(dilation, ListInt, {1, 1, 1, 1})
.ATTR(ceil_mode, Bool, false)
.OP_END_FACTORY_REG(MaxPoolWithArgmaxV1)
// SubSample
REG_OP(SubSample)
.INPUT(labels, TensorType({DT_INT32}))
.OUTPUT(y, TensorType({DT_INT32}))
.REQUIRED_ATTR(batch_size_per_images, Int)
.REQUIRED_ATTR(positive_fraction, Float)
.OP_END_FACTORY_REG(SubSample)
// SubSampleLabels
REG_OP(SubSampleLabels)
.INPUT(labels, TensorType({DT_INT32}))
.INPUT(shuffle_matrix, TensorType({DT_INT32}))
.OUTPUT(y, TensorType({DT_INT32}))
.REQUIRED_ATTR(batch_size_per_images, Int)
.REQUIRED_ATTR(positive_fraction, Float)
.OP_END_FACTORY_REG(SubSampleLabels)
} // namespace ge
#endif // OPS_BUILT_IN_OP_PROTO_INC_NN_POOLING_OPS_H

@ -1006,9 +1006,9 @@ REG_OP(TopK)
*@par Inputs:
*Inputs including:
* @li indices: A required index tensor. Must be one of the following types: float32, float16, int32, int8, uint8.
* @li x: A required slice tensor. Must be one of the following types: float32, float16, int32, int8, uint8.
* @li shape: A required list of int32, specifying the output shape.
* @li indices: A required index tensor. Must be one of the following types: int32 or int64.
* @li x: A required slice tensor. Must be one of the following types: float32, float16, int32, int8, uint8...
* @li shape: A required list of int32 or int64, specifying the output shape.
*@par Outputs:
*y:A output Tensor with same datatype as "updates" . \n
@ -1019,7 +1019,7 @@ REG_OP(TopK)
* Compatible with the TensorFlow operator ScatterNd.
*/
REG_OP(ScatterNd)
.INPUT(indices, TensorType::BasicType())
.INPUT(indices, TensorType::IndexNumberType())
.INPUT(x, TensorType::BasicType())
.INPUT(shape, TensorType::IndexNumberType())
.OUTPUT(y, TensorType::BasicType())
@ -1032,11 +1032,11 @@ REG_OP(ScatterNd)
*@par Inputs:
*Inputs including:
* @li indices: A required index tensor. Must be one of the following types:
* float, float16, int32, int16. format:ND.
* int32 or int64. format:ND.
* @li x: A required slice tensor. Must be one of the following types:
* float, float16, int32, int16. format:ND.
* float16, float, int32, int8, uint8. format:ND.
*@par Attributes:
* @li shape: A required list of int32, specifying the output shape.
* @li shape: A required list of int32 or int64, specifying the output shape.
*@par Outputs:
*y: A Tensor. Has the same type as "x". format:ND . \n
@ -1051,8 +1051,8 @@ REG_OP(ScatterNd)
*/
REG_OP(ScatterNdD)
.INPUT(indices, TensorType::IndexNumberType())
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT16}))
.OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT16}))
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8}))
.OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8}))
.REQUIRED_ATTR(shape, ListInt)
.OP_END_FACTORY_REG(ScatterNdD)

@ -418,12 +418,8 @@ REG_OP(BatchToSpace)
* Warning: THIS FUNCTION IS DEPRECATED. Please use BatchToSpace instead.
*/
REG_OP(BatchToSpaceD)
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8,
DT_UINT16, DT_UINT32, DT_UINT64, DT_INT8, DT_INT16, DT_COMPLEX64,
DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32}))
.OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8,
DT_UINT16, DT_UINT32, DT_UINT64, DT_INT8, DT_INT16, DT_COMPLEX64,
DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32}))
.INPUT(x, TensorType::BasicType())
.OUTPUT(y, TensorType::BasicType())
.REQUIRED_ATTR(block_size, Int)
.REQUIRED_ATTR(crops, ListInt)
.OP_END_FACTORY_REG(BatchToSpaceD)

@ -59,6 +59,7 @@ typedef enum tagRtAicpuDeployType {
typedef enum tagRtFeatureType {
FEATURE_TYPE_MEMCPY = 0,
FEATURE_TYPE_MEMORY = 1,
FEATURE_TYPE_RSV
} rtFeatureType_t;
@ -72,6 +73,11 @@ typedef enum tagMemcpyInfo {
MEMCPY_INFO_RSV
} rtMemcpyInfo_t;
typedef enum tagMemoryInfo {
MEMORY_INFO_TS_4G_LIMITED = 0,
MEMORY_INFO_RSV
} rtMemoryInfo_t;
/**
* @ingroup dvrt_dev
* @brief get total device number.

Loading…
Cancel
Save