!14445 update document of NMSWithMask

From: @mind-lh
Reviewed-by: @liangchenghui,@wuxuejian
Signed-off-by: @liangchenghui
pull/14445/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit 5c521492da

@ -3747,8 +3747,14 @@ class Asin(PrimitiveWithInfer):
class NMSWithMask(PrimitiveWithInfer):
"""
Selects some bounding boxes in descending order of score.
r"""
When object detection problem is performed in the computer vision field, object detection algorithm generates
a plurality of bounding boxes. Selects some bounding boxes in descending order of score. Use the box with the
highest score calculate the overlap between other boxes and the current box, and delete the box based on a
certain threshold(IOU). The IOU is as follows,
.. math::
\text{IOU} = \frac{\text{Area of Overlap}}{\text{Area of Union}}
Args:
iou_threshold (float): Specifies the threshold of overlap boxes with respect to
@ -3781,7 +3787,7 @@ class NMSWithMask(PrimitiveWithInfer):
Examples:
>>> bbox = np.array([[100.0, 100.0, 50.0, 68.0, 0.63], [150.0, 75.0, 165.0, 115.0, 0.55],
[12.0, 190.0, 288.0, 200.0, 0.9], [28.0, 130.0, 106.0, 172.0, 0.3]])
... [12.0, 190.0, 288.0, 200.0, 0.9], [28.0, 130.0, 106.0, 172.0, 0.3]])
>>> bbox[:, 2] += bbox[:, 0]
>>> bbox[:, 3] += bbox[:, 1]
>>> inputs = Tensor(bbox, mindspore.float32)

@ -4437,7 +4437,7 @@ class FusedSparseAdam(PrimitiveWithInfer):
>>> epsilon = Tensor(1e-8, mstype.float32)
>>> gradient = Tensor(np.random.rand(2, 1, 2), mstype.float32)
>>> indices = Tensor([0, 1], mstype.int32)
>>> net(beta1_power, beta2_power, lr, beta1, beta2, epsilon, gradient, indices)
>>> output = net(beta1_power, beta2_power, lr, beta1, beta2, epsilon, gradient, indices)
>>> print(net.var.asnumpy())
[[[0.9996963 0.9996977 ]]
[[0.99970144 0.9996992 ]]
@ -4585,7 +4585,7 @@ class FusedSparseLazyAdam(PrimitiveWithInfer):
>>> epsilon = Tensor(1e-8, mstype.float32)
>>> gradient = Tensor(np.random.rand(2, 1, 2), mstype.float32)
>>> indices = Tensor([0, 1], mstype.int32)
>>> net(beta1_power, beta2_power, lr, beta1, beta2, epsilon, gradient, indices)
>>> output = net(beta1_power, beta2_power, lr, beta1, beta2, epsilon, gradient, indices)
>>> print(net.var.asnumpy())
[[[0.9996866 0.9997078]]
[[0.9997037 0.9996869]]

@ -270,10 +270,10 @@ class PrimitiveWithCheck(Primitive):
... pass
... def check_shape(self, input_x):
... validator.check_int(len(input_x), 1, Rel.GE, 'input_x rank', self.name)
>>>
...
... def check_dtype(self, input_x):
... validator.check_subclass("input_x", input_x, mstype.tensor, self.name)
>>>
...
>>> # init a Primitive obj
>>> add = Flatten()
"""
@ -348,13 +348,13 @@ class PrimitiveWithInfer(Primitive):
... @prim_attr_register
... def __init__(self):
... pass
>>>
...
... def infer_shape(self, x, y):
... return x # output shape same as first input 'x'
>>>
...
... def infer_dtype(self, x, y):
... return x # output type same as first input 'x'
>>>
...
>>> # init a Primitive obj
>>> add = Add()
"""

Loading…
Cancel
Save