!1153 update_headerfiles_0225

From: @shenwei41
Reviewed-by: @lilongfei15,@xsmq
Signed-off-by: @xsmq
pull/1153/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit f37d94ba21

@ -174,6 +174,12 @@ typedef enum {
ACL_ERROR = 3,
} aclLogLevel;
typedef enum {
ACL_MEMTYPE_DEVICE = 0,
ACL_MEMTYPE_HOST = 1,
} aclMemType;
/**
* @ingroup AscendCL
* @brief Converts data of type aclFloat16 to data of type float
@ -594,6 +600,18 @@ ACL_FUNC_VISIBILITY aclError aclSetTensorDynamicInput(aclTensorDesc *desc, const
*/
ACL_FUNC_VISIBILITY aclError aclSetTensorConst(aclTensorDesc *desc, void *dataBuffer, size_t length);
/**
* @ingroup AscendCL
* @brief Set tensor memory type specified by the tensor description
*
* @param desc [OUT] pointer to the instance of aclTensorDesc
* @param memType [IN] ACL_MEMTYPE_DEVICE means device, ACL_MEMTYPE_HOST means host
*
* @retval ACL_SUCCESS The function is successfully executed.
* @retval OtherValues Failure
*/
ACL_FUNC_VISIBILITY aclError aclSetTensorPlaceMent(aclTensorDesc *desc, aclMemType memType);
/**
* @ingroup AscendCL
* @brief an interface for users to output APP logs

@ -1203,6 +1203,18 @@ ACL_FUNC_VISIBILITY aclError aclmdlDestroyConfigHandle(aclmdlConfigHandle *handl
ACL_FUNC_VISIBILITY aclError aclmdlSetConfigOpt(aclmdlConfigHandle *handle, aclmdlConfigAttr attr,
const void *attrValue, size_t valueSize);
/**
* @ingroup AscendCL
* @brief get real tensor name from modelDesc
*
* @param modelDesc [IN] pointer to modelDesc
* @param name [IN] tensor name
*
* @retval the pointer of real tensor name
* @retval Failure return NULL
*/
ACL_FUNC_VISIBILITY const char *aclmdlGetTensorRealName(const aclmdlDesc *modelDesc, const char *name);
#ifdef __cplusplus
}
#endif

@ -25,6 +25,8 @@
extern "C" {
#endif
#define ACL_EVENT_TIME_LINE 0x00000008u
typedef enum aclrtRunMode {
ACL_DEVICE,
ACL_HOST,
@ -425,6 +427,18 @@ ACL_FUNC_VISIBILITY aclError aclrtGetDeviceCount(uint32_t *count);
*/
ACL_FUNC_VISIBILITY aclError aclrtCreateEvent(aclrtEvent *event);
/**
* @ingroup AscendCL
* @brief create event instance with flag
*
* @param event [OUT] created event
* @param flag [IN] event flag
*
* @retval ACL_SUCCESS The function is successfully executed.
* @retval OtherValues Failure
*/
ACL_FUNC_VISIBILITY aclError aclrtCreateEventWithFlag(aclrtEvent *event, uint32_t flag);
/**
* @ingroup AscendCL
* @brief destroy event instance

@ -66,15 +66,14 @@ extern HcclResult HcclCommInitRootInfo(uint32_t nRanks, const HcclRootInfo *root
* @param sendBuf A pointer identifying the input data address of the operator.
* @param recvBuf A pointer identifying the output data address of the operator.
* @param count An integer(u64) identifying the number of the output data.
* @param dataType The data type of the operator, must be one of the following types: int8, int16, int32, float16,
* float32.
* @param dataType The data type of the operator, must be one of the following types: int8, int16, int32, float16, float32.
* @param op The reduction type of the operator, must be one of the following types: sum, min, max, prod.
* @param comm A pointer identifying the communication resource based on.
* @param stream A pointer identifying the stream information.
* @return HcclResult
*/
extern HcclResult HcclAllReduce(void *sendBuf, void *recvBuf, uint64_t count, HcclDataType dataType, HcclReduceOp op,
HcclComm comm, aclrtStream stream);
extern HcclResult HcclAllReduce(void *sendBuf, void *recvBuf, uint64_t count, HcclDataType dataType,
HcclReduceOp op, HcclComm comm, aclrtStream stream);
/**
* @brief Broadcast operator.
@ -116,8 +115,8 @@ extern HcclResult HcclReduceScatter(void *sendBuf, void *recvBuf, uint64_t recvC
* @param stream A pointer identifying the stream information.
* @return HcclResult
*/
extern HcclResult HcclAllGather(void *sendBuf, void *recvBuf, uint64_t sendCount, HcclDataType dataType, HcclComm comm,
aclrtStream stream);
extern HcclResult HcclAllGather(void *sendBuf, void *recvBuf, uint64_t sendCount, HcclDataType dataType,
HcclComm comm, aclrtStream stream);
/**
* @brief Destroy HCCL comm

@ -3781,6 +3781,32 @@ REG_OP(ArgMaxGradD)
.REQUIRED_ATTR(dimension, Int)
.OP_END_FACTORY_REG(ArgMaxGradD)
/**
*@brief Returns cosine similarity between x1 and x2,computed along dim. \n
*@par Inputs:
*Two inputs, including:
* @li input_x1: A tensor. Must be the following types:
* float32. \n
*@par Inputs:
*@li input_x2: A tensor. Must of the following types:
* float32. \n
*@par Outputs:
*@li output_y: A Tensor with the same type of input_x's. \n
*@par Third-party framework compatibility
*Compatible with the Pytorch operator CosineSimilarity. \n
*/
REG_OP(CosineSimilarity)
.INPUT(input_x1, TensorType({DT_FLOAT})) /* "First operand." */
.INPUT(input_x2, TensorType({DT_FLOAT})) /* "Second operand." */
.OUTPUT(output_y, TensorType({DT_FLOAT})) /* "Result, has same element type as two inputs" */
.ATTR(dim, Int, 1)
.ATTR(eps, Float, 1e-8)
.OP_END_FACTORY_REG(CosineSimilarity)
} // namespace ge
#endif // OPS_BUILT_IN_OP_PROTO_INC_ELEWISE_CALCULATION_OPS_H_

@ -952,6 +952,36 @@ REG_OP(Angle)
.ATTR(Tout, Type, DT_FLOAT)
.OP_END_FACTORY_REG(Angle)
/**
*@brief Computes the gradient of SoftMarginLossGrad. \n
*@par Inputs:
*Three inputs, including:
* @li predict: A tensor. Must be one of the following types:
* float16, float32. \n
* @li label: A tensor with same shape of predict. Must be one of the following types:
* float16, float32. \n
* @li dout: A tensor with same shpae of predcit. Must be one of the following types:
* float16, float32. \n
*@par Attributes:
* @li reduction: Specifies the reduction to apply to the output:
* 'none' | 'mean' | 'sum'. Default: 'mean'. \n
*@par Outputs:
* gradient: A Tensor with the same type of predict. \n
*@par Third-party framework compatibility
*Compatible with the Pytorch operator SoftMarginLoss Backward. \n
*/
REG_OP(SoftMarginLossGrad)
.INPUT(predict, TensorType({DT_FLOAT16,DT_FLOAT}))
.INPUT(label, TensorType({DT_FLOAT16,DT_FLOAT}))
.INPUT(dout, TensorType({DT_FLOAT16,DT_FLOAT}))
.OUTPUT(gradient, TensorType({DT_FLOAT16,DT_FLOAT}))
.ATTR(reduction, String, "mean")
.OP_END_FACTORY_REG(SoftMarginLossGrad)
} // namespace ge
#endif // OPS_BUILT_IN_OP_PROTO_INC_MATH_OPS_H_

@ -1205,6 +1205,34 @@ REG_OP(Centralization)
.ATTR(axes, ListInt, {-1})
.OP_END_FACTORY_REG(Centralization)
/**
*@brief Calculate the loss. Creates a criterion that optimizes a two-class classification
logistic loss between input_x and input_y (containing 1 or -1). \n
*@par Inputs:
*One inputs, including:
* @li input_x: A tensor. Must be one of the following types:
* float16, float32. \n
* @li input_y: A tensor. Must be one of the following types:
* float16, float32. \n
*@par Attributes:
*@li lambd: An optional string.Defaults to "mean". \n
*@par Outputs:
*output_z: while reduction == "none", A Tensor with the same type and shape of input_x's. \n
* while reduction == "sum" or "mean", A Tensor with the same type of input_x , shape of which is (1,)
*@par Third-party framework compatibility
*Compatible with the Pytorch operator SoftMarginLoss. \n
*/
REG_OP(SoftMarginLoss)
.INPUT(input_x, TensorType({DT_FLOAT, DT_FLOAT16}))
.INPUT(input_y, TensorType({DT_FLOAT, DT_FLOAT16}))
.ATTR(reduction, String, "mean")
.OUTPUT(output_z, TensorType({DT_FLOAT, DT_FLOAT16}))
.OP_END_FACTORY_REG(SoftMarginLoss)
/**
* @brief Computes gradients of sigmoid_cross_entropy_with_logits_v2.

@ -792,6 +792,34 @@ REG_OP(HardShrink)
.ATTR(lambd, Float, 0.5)
.OP_END_FACTORY_REG(HardShrink)
/**
*@brief Calculate the hard shrink grad function. \n
*
* Computes the gradient for the HardShrink: if x > lambda or x < -lambda, x,otherwise 0
*
*@par Inputs:
*Two inputs, including:
* @li gradients: A tensor. Must be one of the following types:
* float16, float32. \n
* @li features: A tensor. Must be one of the following types:
* float16, float32. \n
*
*@par Outputs:
*backprops: A Tensor with the same type and shape of features's. \n
*
*@par Attributes:
*@li lambda: An optional float.Defaults to 0.5. \n
*
*@par Third-party framework compatibility
*Compatible with the Pytorch operator Hardshrink_backward. \n
*/
REG_OP(HardShrinkGrad)
.INPUT(gradients, TensorType({DT_FLOAT16, DT_FLOAT}))
.INPUT(features, TensorType({DT_FLOAT16, DT_FLOAT}))
.OUTPUT(backprops, TensorType({DT_FLOAT16, DT_FLOAT}))
.ATTR(lambda, Float, 0.5)
.OP_END_FACTORY_REG(HardShrinkGrad)
/**
* @brief Calculate the hard sigmoid function. \n
@ -884,6 +912,36 @@ REG_OP(LogSigmoid)
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) /* "input:x" */
.OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) /* "output:y" */
.OP_END_FACTORY_REG(LogSigmoid)
/**
*@brief Calculate the backward outputs of the function "hard_sigmoid" \n
*@par Inputs:
*One inputs, including:
* @li grads: A tensor. Must be one of the following types:
* float16, float32. \n
* @li input_x: A tensor. Must be one of the following types:
* float16, float32. \n
*@par Outputs:
*One outputs, including:
* @li y: A tensor with the same type and shape of x's. \n
* @par Attributes:
* @li alpha: An optional float. Defaults to 0.16666666. \n
* @li beta: An optional float. Defaults to 0.5. \n
*@par Third-party framework compatibility
*Compatible with the Pytorch operator LogSigmoidGrad. \n
*/
REG_OP(HardSigmoidGrad)
.INPUT(grads, TensorType({DT_FLOAT, DT_FLOAT16}))
.INPUT(input_x, TensorType({DT_FLOAT, DT_FLOAT16}))
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16}))
.ATTR(alpha, Float, 0.16666666)
.ATTR(beta, Float, 0.5)
.OP_END_FACTORY_REG(HardSigmoidGrad)
} // namespace ge
#endif // OPS_BUILT_IN_OP_PROTO_INC_NONLINEAR_FUC_OPS_H_

@ -737,14 +737,51 @@ where ho/wo is do = (output_d + 2*padding_d - dilation_d*(kernel_d - 1) - 1)//st
* Compatible with Pytorch col2im/im2col_backward operator.
*/
REG_OP(Col2im)
.INPUT(x, TensorType({DT_FLOAT}))
.INPUT(output_size, TensorType({DT_INT32}))
.OUTPUT(y, TensorType({DT_FLOAT}))
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16}))
.INPUT(output_size, TensorType({DT_INT32, DT_INT32}))
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16}))
.REQUIRED_ATTR(kernel_size, ListInt)
.REQUIRED_ATTR(dilation, ListInt)
.REQUIRED_ATTR(padding, ListInt)
.REQUIRED_ATTR(stride, ListInt)
.OP_END_FACTORY_REG(Col2im)
/**
*@brief Generates a 2D or 3D flow field (sampling grid), given a batch of affine
matrices theta. \n
*@par Inputs:
*Input theta must be float16 or float, output_size must be int32 type.Inputs
include:
*@li theta: input batch of affine matrices with shape (N,2,3) for 2D or (N,3,4)
for 3D
*@li output_size: the target output image size. (N×C×H×W for 2D or N×C×D×H×W for
3D) Example: torch.Size((32, 3, 24, 24)) . \n
*@par Attributes:
*align_corners: if True, consider -1 and 1 to refer to the centers of the corner
pixels rather than the image corners.Refer to grid_sample() for a more complete
description. A grid generated by affine_grid() should be passed to grid_sample()
with the same setting for this option. Default: False \n
*@par Outputs:
*@li y: A 2-D integer tensor of shape [M] representing the
selected indices from the boxes tensor, where M <= max_output_size. \n
*@attention Constraints:
*Input theta must be float16 or float, output_size must be int32 type . \n
*@par Third-party framework compatibility
*Compatible with Pytorch affine_grid operator.
*/
REG_OP(AffineGrid)
.INPUT(theta, TensorType({DT_FLOAT16, DT_FLOAT}))
.INPUT(output_size, TensorType({DT_INT32}))
.OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
.ATTR(align_corners, Bool, false)
.OP_END_FACTORY_REG(AffineGrid)
} // namespace ge
#endif // OPS_BUILT_IN_OP_PROTO_INC_TRANSFORMATION_OPS_H_

@ -30,6 +30,11 @@ extern "C" {
#define RT_EVENT_DEFAULT (0x00)
#define RT_EVENT_WITH_FLAG (0x01)
#define RT_EVENT_DDSYNC_NS 0x01U
#define RT_EVENT_STREAM_MARK 0x02U
#define RT_EVENT_DDSYNC 0x04U
#define RT_EVENT_TIME_LINE 0x08U
/**
* @ingroup dvrt_event
* @brief create event instance

@ -141,7 +141,7 @@ enum {
IDEDD, /**< IDE daemon device */
IDEDH, /**< IDE daemon host */
HCCL, /**< HCCL */
FMK, /**< Framework */
FMK, /**< Adapter */
HIAIENGINE, /**< Matrix */
DVPP, /**< DVPP */
RUNTIME, /**< Runtime */
@ -162,11 +162,11 @@ enum {
MDCDEFAULT, /**< MDC undefine */
MDCSC, /**< MDC spatial cognition */
MDCPNC,
MLL,
MLL, /**< abandon */
DEVMM, /**< Dlog memory managent */
KERNEL, /**< Kernel */
LIBMEDIA, /**< Libmedia */
CCECPU, /**< ai cpu */
CCECPU, /**< aicpu shedule */
ASCENDDK, /**< AscendDK */
ROS, /**< ROS */
HCCP,
@ -179,7 +179,7 @@ enum {
TSDUMP, /**< TSDUMP module */
AICPU, /**< AICPU module */
LP, /**< LP module */
TDT,
TDT, /**< tsdaemon or aicpu shedule */
FE,
MD,
MB,

Loading…
Cancel
Save