update include headers

pull/1301/head
shenwei41 4 years ago
parent d9d99c3cf5
commit 0f490f37d4

@ -26,9 +26,9 @@ extern "C" {
#endif #endif
// Current version is 1.0.0 // Current version is 1.0.0
#define ACL_MAJOR_VERSION 1 #define ACL_MAJOR_VERSION 1
#define ACL_MINOR_VERSION 0 #define ACL_MINOR_VERSION 0
#define ACL_PATCH_VERSION 0 #define ACL_PATCH_VERSION 0
/** /**
* @ingroup AscendCL * @ingroup AscendCL
@ -70,4 +70,4 @@ ACL_FUNC_VISIBILITY aclError aclrtGetVersion(int32_t *majorVersion, int32_t *min
} }
#endif #endif
#endif // INC_EXTERNAL_ACL_ACL_H_ #endif // INC_EXTERNAL_ACL_ACL_H_

File diff suppressed because it is too large Load Diff

@ -82,6 +82,25 @@ REG_OP(Cholesky)
DT_FLOAT16, DT_COMPLEX64, DT_COMPLEX128})) DT_FLOAT16, DT_COMPLEX64, DT_COMPLEX128}))
.OP_END_FACTORY_REG(Cholesky) .OP_END_FACTORY_REG(Cholesky)
/**
*@brief Computes the outer product of two 1D vectors . \n
*@par Inputs:
*The input x and vec2 has to be a 1D vector.Inputs include:
*@li x:A Tensor. Must be one of the following types: float16, float32.
Shape is [N] . \n
*@li vec2:A Tensor. Must have the same type as x. Shape is [M] . \n
*@par Outputs:
*y:A Tensor. Has the same type as x . \n
*/
REG_OP(Ger)
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
.INPUT(vec2, TensorType({DT_FLOAT16, DT_FLOAT}))
.OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
.OP_END_FACTORY_REG(Ger)
/** /**
*@brief Computes the sign and the log of the absolute value of the determinant *@brief Computes the sign and the log of the absolute value of the determinant
of one or more square matrices . \n of one or more square matrices . \n

File diff suppressed because it is too large Load Diff

@ -525,6 +525,31 @@ REG_OP(LayerNorm)
.ATTR(epsilon, Float, 0.0000001) .ATTR(epsilon, Float, 0.0000001)
.OP_END_FACTORY_REG(LayerNorm) .OP_END_FACTORY_REG(LayerNorm)
/**
*@brief Returns a tensor where each sub-tensor of input along dimension
* dim is normalized such that the p-norm of the sub-tensor is lower than the value maxnorm. \n
*@par Inputs:
*One input, including:
* @li x: A Tensor. Must be one of the following types: float16, float32 . \n
*@par Attributes:
* @li p: Specify L_p norm, the type is float.
* @li dim: The processed dim, the type is int.
* @li maxnorm: Threshold for comparison, the type is float. \n
*@par Outputs:
*One outputs, including:
* @li y: shape and dtype of output, should be same shape and type as input.
*/
REG_OP(Renorm)
.INPUT(x, TensorType::BasicType())
.OUTPUT(y, TensorType::BasicType())
.REQUIRED_ATTR(p, Float)
.REQUIRED_ATTR(dim, Int)
.REQUIRED_ATTR(maxnorm, Float)
.OP_END_FACTORY_REG(Renorm)
/** /**
*@brief LayerNormGrad operator interface implementation *@brief LayerNormGrad operator interface implementation
* calculating: dy, x, variance, mean, gamma * calculating: dy, x, variance, mean, gamma

@ -397,8 +397,8 @@ No default value.
specifying the stride of the sliding window for each dimension of specifying the stride of the sliding window for each dimension of
the input tensor. No default value. the input tensor. No default value.
*@li padding: A required string type of float16. *@li padding: A required string type of float16.
*@li pads: A list type of int32. Default value {0, 0, 0}. *@li pads: A list type of int32. Default value {0,0,0,0,0,0}.
*@li dilation: A list type of int32. Default value {1, 1, 1}. *@li dilation: A list type of int32. Default value {1,1,1,1,1,1}.
*@li ceil_mode: A ceil mode number of int32 . Default value 0. *@li ceil_mode: A ceil mode number of int32 . Default value 0.
*@li data_format: An optional string. Defaults to "NDHWC" . \n *@li data_format: An optional string. Defaults to "NDHWC" . \n
@ -421,8 +421,8 @@ REG_OP(MaxPool3D)
.REQUIRED_ATTR(ksize, ListInt) .REQUIRED_ATTR(ksize, ListInt)
.REQUIRED_ATTR(strides, ListInt) .REQUIRED_ATTR(strides, ListInt)
.REQUIRED_ATTR(padding, String) .REQUIRED_ATTR(padding, String)
.ATTR(pads, ListInt, {0,0,0}) .ATTR(pads, ListInt, {0,0,0,0,0,0})
.ATTR(dilation, ListInt, {1,1,1}) .ATTR(dilation, ListInt, {1,1,1,1,1,1})
.ATTR(ceil_mode, Int, 0) .ATTR(ceil_mode, Int, 0)
.ATTR(data_format, String, "NDHWC") .ATTR(data_format, String, "NDHWC")
.OP_END_FACTORY_REG(MaxPool3D) .OP_END_FACTORY_REG(MaxPool3D)

@ -418,7 +418,7 @@ REG_OP(EmbeddingRankId)
*/ */
REG_OP(FillV2) REG_OP(FillV2)
.INPUT(dims, TensorType({DT_INT16, DT_INT32, DT_INT64})) .INPUT(dims, TensorType({DT_INT16, DT_INT32, DT_INT64}))
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_INT16, DT_INT32, DT_INT64})) .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_INT16, DT_INT32, DT_INT64}))
.ATTR(value, Float, 0) .ATTR(value, Float, 0)
.OP_END_FACTORY_REG(FillV2) .OP_END_FACTORY_REG(FillV2)
@ -437,7 +437,7 @@ REG_OP(FillV2)
* Compatible with the ONNX operator ConstantOfShape. * Compatible with the ONNX operator ConstantOfShape.
*/ */
REG_OP(FillV2D) REG_OP(FillV2D)
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_UINT8, DT_INT16, DT_INT32, DT_INT64})) .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_UINT8, DT_INT16, DT_INT32, DT_INT64}))
.ATTR(value, Float, 0) .ATTR(value, Float, 0)
.REQUIRED_ATTR(dims, ListInt) .REQUIRED_ATTR(dims, ListInt)
.OP_END_FACTORY_REG(FillV2D) .OP_END_FACTORY_REG(FillV2D)

Loading…
Cancel
Save