update include headers

pull/1301/head
shenwei41 4 years ago
parent d9d99c3cf5
commit 0f490f37d4

File diff suppressed because it is too large Load Diff

@ -82,6 +82,25 @@ REG_OP(Cholesky)
DT_FLOAT16, DT_COMPLEX64, DT_COMPLEX128})) DT_FLOAT16, DT_COMPLEX64, DT_COMPLEX128}))
.OP_END_FACTORY_REG(Cholesky) .OP_END_FACTORY_REG(Cholesky)
/**
*@brief Computes the outer product of two 1D vectors . \n
*@par Inputs:
*The input x and vec2 has to be a 1D vector.Inputs include:
*@li x:A Tensor. Must be one of the following types: float16, float32.
Shape is [N] . \n
*@li vec2:A Tensor. Must have the same type as x. Shape is [M] . \n
*@par Outputs:
*y:A Tensor. Has the same type as x . \n
*/
REG_OP(Ger)
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
.INPUT(vec2, TensorType({DT_FLOAT16, DT_FLOAT}))
.OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
.OP_END_FACTORY_REG(Ger)
/** /**
*@brief Computes the sign and the log of the absolute value of the determinant *@brief Computes the sign and the log of the absolute value of the determinant
of one or more square matrices . \n of one or more square matrices . \n

@ -1592,7 +1592,6 @@ selected indices from the boxes tensor, where M <= max_output_size. \n
*Compatible with onnx NonMaxSuppression operator. *Compatible with onnx NonMaxSuppression operator.
*/ */
REG_OP(NonMaxSuppressionV7) REG_OP(NonMaxSuppressionV7)
.INPUT(boxes, TensorType({DT_FLOAT16, DT_FLOAT})) .INPUT(boxes, TensorType({DT_FLOAT16, DT_FLOAT}))
.INPUT(scores, TensorType({DT_FLOAT16, DT_FLOAT})) .INPUT(scores, TensorType({DT_FLOAT16, DT_FLOAT}))
@ -1649,6 +1648,84 @@ REG_OP(RoiExtractor)
.ATTR(aligned, Bool, true) .ATTR(aligned, Bool, true)
.OP_END_FACTORY_REG(RoiExtractor) .OP_END_FACTORY_REG(RoiExtractor)
/**
*@brief Performs Position Sensitive PS ROI Pooling . \n
*@par Inputs:
* Two inputs, including:
*@li x: An NC1HWC0 tensor of type float16 or float32, describing the feature
* map, dimension C1 must be equal to
* (int(output_dim+15)/C0))*group_size*group_size.
*@li rois: A tensor of type float16 or float32, with shape
* [batch, 5, rois_num], describing the ROIs, each ROI consists of five
* elements: "batch_id", "x1", "y1", "x2", and "y2", which "batch_id" indicates
* the index of the input feature map, "x1", "y1", "x2", or "y2" must be
* greater than or equal to "0.0" . \n
*@par Attributes:
*@li output_dim: A required int32, specifying the number of output channels,
* must be greater than 0.
*@li group_size: A required int32, specifying the number of groups to encode
* position-sensitive score maps, must be within the range (0, 128).
*@li spatial_scale: A required float32, scaling factor for mapping the input
* coordinates to the ROI coordinates . \n
*@par Outputs:
*y: An NC1HWC0 tensor of type float16 or float32, describing the result
* feature map . \n
*@attention Constraints:
* HC1HWC0: channel must be Group_size squared, rois_num is a multiple of 16
*/
REG_OP(PSROIPoolingV2)
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
.INPUT(rois, TensorType({DT_FLOAT16, DT_FLOAT}))
.OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
.REQUIRED_ATTR(spatial_scale, Float)
.REQUIRED_ATTR(output_dim, Int)
.REQUIRED_ATTR(group_size, Int)
.OP_END_FACTORY_REG(PSROIPoolingV2)
/**
*@brief Performs Position Sensitive PS ROI Pooling Grad . \n
*@par Inputs:
* Two inputs, including:
*@li x: An NC1HWC0 tensor of type float16 or float32, describing the result
* feature map . \n
*@li rois: A tensor of type float16 or float32, with shape
* [batch, 5, rois_num], describing the ROIs, each ROI consists of five
* elements: "batch_id", "x1", "y1", "x2", and "y2", which "batch_id" indicates
* the index of the input feature map, "x1", "y1", "x2", or "y2" must be
* greater than or equal to "0.0" . \n
*@par Attributes:
*@li output_dim: A required int32, specifying the number of output channels,
* must be greater than 0.
*@li group_size: A required int32, specifying the number of groups to encode
* position-sensitive score maps, must be within the range (0, 128).
*@li spatial_scale: A required float32, scaling factor for mapping the input
* coordinates to the ROI coordinates . \n
*@li input_size: A required listInt, mapping the gradinput size: (H, W)
*@par Outputs:
*y: An NC1HWC0 tensor of type float16 or float32, describing the feature
* map, dimension C1 must be equal to
* (int(output_dim+15)/C0))*group_size*group_size.
*@attention Constraints:
* HC1HWC0: channel must be Group_size squared, rois_num is a multiple of 16
*/
REG_OP(PSROIPoolingGradV2D)
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
.INPUT(rois, TensorType({DT_FLOAT16, DT_FLOAT}))
.OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
.REQUIRED_ATTR(spatial_scale, Float)
.REQUIRED_ATTR(output_dim, Int)
.REQUIRED_ATTR(group_size, Int)
.REQUIRED_ATTR(input_size, ListInt)
.OP_END_FACTORY_REG(PSROIPoolingGradV2D)
} // namespace ge } // namespace ge
#endif // OPS_BUILT_IN_OP_PROTO_INC_NN_DETECT_OPS_H_ #endif // OPS_BUILT_IN_OP_PROTO_INC_NN_DETECT_OPS_H_

@ -525,6 +525,31 @@ REG_OP(LayerNorm)
.ATTR(epsilon, Float, 0.0000001) .ATTR(epsilon, Float, 0.0000001)
.OP_END_FACTORY_REG(LayerNorm) .OP_END_FACTORY_REG(LayerNorm)
/**
*@brief Returns a tensor where each sub-tensor of input along dimension
* dim is normalized such that the p-norm of the sub-tensor is lower than the value maxnorm. \n
*@par Inputs:
*One input, including:
* @li x: A Tensor. Must be one of the following types: float16, float32 . \n
*@par Attributes:
* @li p: Specify L_p norm, the type is float.
* @li dim: The processed dim, the type is int.
* @li maxnorm: Threshold for comparison, the type is float. \n
*@par Outputs:
*One outputs, including:
* @li y: shape and dtype of output, should be same shape and type as input.
*/
REG_OP(Renorm)
.INPUT(x, TensorType::BasicType())
.OUTPUT(y, TensorType::BasicType())
.REQUIRED_ATTR(p, Float)
.REQUIRED_ATTR(dim, Int)
.REQUIRED_ATTR(maxnorm, Float)
.OP_END_FACTORY_REG(Renorm)
/** /**
*@brief LayerNormGrad operator interface implementation *@brief LayerNormGrad operator interface implementation
* calculating: dy, x, variance, mean, gamma * calculating: dy, x, variance, mean, gamma

@ -397,8 +397,8 @@ No default value.
specifying the stride of the sliding window for each dimension of specifying the stride of the sliding window for each dimension of
the input tensor. No default value. the input tensor. No default value.
*@li padding: A required string type of float16. *@li padding: A required string type of float16.
*@li pads: A list type of int32. Default value {0, 0, 0}. *@li pads: A list type of int32. Default value {0,0,0,0,0,0}.
*@li dilation: A list type of int32. Default value {1, 1, 1}. *@li dilation: A list type of int32. Default value {1,1,1,1,1,1}.
*@li ceil_mode: A ceil mode number of int32 . Default value 0. *@li ceil_mode: A ceil mode number of int32 . Default value 0.
*@li data_format: An optional string. Defaults to "NDHWC" . \n *@li data_format: An optional string. Defaults to "NDHWC" . \n
@ -421,8 +421,8 @@ REG_OP(MaxPool3D)
.REQUIRED_ATTR(ksize, ListInt) .REQUIRED_ATTR(ksize, ListInt)
.REQUIRED_ATTR(strides, ListInt) .REQUIRED_ATTR(strides, ListInt)
.REQUIRED_ATTR(padding, String) .REQUIRED_ATTR(padding, String)
.ATTR(pads, ListInt, {0,0,0}) .ATTR(pads, ListInt, {0,0,0,0,0,0})
.ATTR(dilation, ListInt, {1,1,1}) .ATTR(dilation, ListInt, {1,1,1,1,1,1})
.ATTR(ceil_mode, Int, 0) .ATTR(ceil_mode, Int, 0)
.ATTR(data_format, String, "NDHWC") .ATTR(data_format, String, "NDHWC")
.OP_END_FACTORY_REG(MaxPool3D) .OP_END_FACTORY_REG(MaxPool3D)

@ -418,7 +418,7 @@ REG_OP(EmbeddingRankId)
*/ */
REG_OP(FillV2) REG_OP(FillV2)
.INPUT(dims, TensorType({DT_INT16, DT_INT32, DT_INT64})) .INPUT(dims, TensorType({DT_INT16, DT_INT32, DT_INT64}))
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_INT16, DT_INT32, DT_INT64})) .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_INT16, DT_INT32, DT_INT64}))
.ATTR(value, Float, 0) .ATTR(value, Float, 0)
.OP_END_FACTORY_REG(FillV2) .OP_END_FACTORY_REG(FillV2)
@ -437,7 +437,7 @@ REG_OP(FillV2)
* Compatible with the ONNX operator ConstantOfShape. * Compatible with the ONNX operator ConstantOfShape.
*/ */
REG_OP(FillV2D) REG_OP(FillV2D)
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_UINT8, DT_INT16, DT_INT32, DT_INT64})) .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_UINT8, DT_INT16, DT_INT32, DT_INT64}))
.ATTR(value, Float, 0) .ATTR(value, Float, 0)
.REQUIRED_ATTR(dims, ListInt) .REQUIRED_ATTR(dims, ListInt)
.OP_END_FACTORY_REG(FillV2D) .OP_END_FACTORY_REG(FillV2D)

Loading…
Cancel
Save