!1302 update include headers 0318

From: @shenwei41
Reviewed-by: @xsmq,@liucunwei
Signed-off-by: @liucunwei
pull/1302/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit 745153a252

@ -82,6 +82,25 @@ REG_OP(Cholesky)
DT_FLOAT16, DT_COMPLEX64, DT_COMPLEX128}))
.OP_END_FACTORY_REG(Cholesky)
/**
*@brief Computes the outer product of two 1D vectors . \n
*@par Inputs:
*The input x and vec2 has to be a 1D vector.Inputs include:
*@li x:A Tensor. Must be one of the following types: float16, float32.
Shape is [N] . \n
*@li vec2:A Tensor. Must have the same type as x. Shape is [M] . \n
*@par Outputs:
*y:A Tensor. Has the same type as x . \n
*/
REG_OP(Ger)
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
.INPUT(vec2, TensorType({DT_FLOAT16, DT_FLOAT}))
.OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
.OP_END_FACTORY_REG(Ger)
/**
*@brief Computes the sign and the log of the absolute value of the determinant
of one or more square matrices . \n

File diff suppressed because it is too large Load Diff

@ -525,6 +525,31 @@ REG_OP(LayerNorm)
.ATTR(epsilon, Float, 0.0000001)
.OP_END_FACTORY_REG(LayerNorm)
/**
*@brief Returns a tensor where each sub-tensor of input along dimension
* dim is normalized such that the p-norm of the sub-tensor is lower than the value maxnorm. \n
*@par Inputs:
*One input, including:
* @li x: A Tensor. Must be one of the following types: float16, float32 . \n
*@par Attributes:
* @li p: Specify L_p norm, the type is float.
* @li dim: The processed dim, the type is int.
* @li maxnorm: Threshold for comparison, the type is float. \n
*@par Outputs:
*One outputs, including:
* @li y: shape and dtype of output, should be same shape and type as input.
*/
REG_OP(Renorm)
.INPUT(x, TensorType::BasicType())
.OUTPUT(y, TensorType::BasicType())
.REQUIRED_ATTR(p, Float)
.REQUIRED_ATTR(dim, Int)
.REQUIRED_ATTR(maxnorm, Float)
.OP_END_FACTORY_REG(Renorm)
/**
*@brief LayerNormGrad operator interface implementation
* calculating: dy, x, variance, mean, gamma

@ -397,8 +397,8 @@ No default value.
specifying the stride of the sliding window for each dimension of
the input tensor. No default value.
*@li padding: A required string type of float16.
*@li pads: A list type of int32. Default value {0, 0, 0}.
*@li dilation: A list type of int32. Default value {1, 1, 1}.
*@li pads: A list type of int32. Default value {0,0,0,0,0,0}.
*@li dilation: A list type of int32. Default value {1,1,1,1,1,1}.
*@li ceil_mode: A ceil mode number of int32 . Default value 0.
*@li data_format: An optional string. Defaults to "NDHWC" . \n
@ -421,8 +421,8 @@ REG_OP(MaxPool3D)
.REQUIRED_ATTR(ksize, ListInt)
.REQUIRED_ATTR(strides, ListInt)
.REQUIRED_ATTR(padding, String)
.ATTR(pads, ListInt, {0,0,0})
.ATTR(dilation, ListInt, {1,1,1})
.ATTR(pads, ListInt, {0,0,0,0,0,0})
.ATTR(dilation, ListInt, {1,1,1,1,1,1})
.ATTR(ceil_mode, Int, 0)
.ATTR(data_format, String, "NDHWC")
.OP_END_FACTORY_REG(MaxPool3D)

@ -418,7 +418,7 @@ REG_OP(EmbeddingRankId)
*/
REG_OP(FillV2)
.INPUT(dims, TensorType({DT_INT16, DT_INT32, DT_INT64}))
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_INT16, DT_INT32, DT_INT64}))
.OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_INT16, DT_INT32, DT_INT64}))
.ATTR(value, Float, 0)
.OP_END_FACTORY_REG(FillV2)
@ -437,7 +437,7 @@ REG_OP(FillV2)
* Compatible with the ONNX operator ConstantOfShape.
*/
REG_OP(FillV2D)
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_UINT8, DT_INT16, DT_INT32, DT_INT64}))
.OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_UINT8, DT_INT16, DT_INT32, DT_INT64}))
.ATTR(value, Float, 0)
.REQUIRED_ATTR(dims, ListInt)
.OP_END_FACTORY_REG(FillV2D)

Loading…
Cancel
Save