!8718 Add labels to python files

From: @JunYuLiu
Reviewed-by: 
Signed-off-by:
pull/8718/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit d4ebd7bf4a

File diff suppressed because it is too large Load Diff

@ -33,6 +33,9 @@ class ReduceOp:
- MAX: Take the maximum.
- MIN: Take the minimum.
- PROD: Take the product.
Supported Platforms:
``Ascend`` ``GPU``
"""
SUM = "sum"
MAX = "max"
@ -67,6 +70,9 @@ class AllReduce(PrimitiveWithInfer):
Tensor, has the same shape of the input, i.e., :math:`(x_1, x_2, ..., x_R)`.
The contents depend on the specified operation.
Supported Platforms:
``Ascend`` ``GPU``
Examples:
>>> from mindspore.communication import init
>>> from mindspore import Tensor
@ -243,6 +249,9 @@ class AllGather(PrimitiveWithInfer):
Tensor. If the number of devices in the group is N,
then the shape of output is :math:`(N, x_1, x_2, ..., x_R)`.
Supported Platforms:
``Ascend`` ``GPU``
Examples:
>>> import mindspore.ops.operations as P
>>> import mindspore.nn as nn
@ -356,6 +365,9 @@ class ReduceScatter(PrimitiveWithInfer):
TypeError: If any of operation and group is not a string.
ValueError: If the first dimension of the input cannot be divided by the rank size.
Supported Platforms:
``GPU``
Examples:
>>> from mindspore import Tensor
>>> from mindspore.communication import init
@ -474,6 +486,9 @@ class Broadcast(PrimitiveWithInfer):
Raises:
TypeError: If root_rank is not a integer or group is not a string.
Supported Platforms:
``Ascend``
Examples:
>>> from mindspore import Tensor
>>> from mindspore.communication import init

@ -49,6 +49,9 @@ class ControlDepend(Primitive):
Outputs:
This operation has no actual data output, it will be used to setup the order of relative operations.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> class Net(nn.Cell):
... def __init__(self):

@ -48,6 +48,9 @@ class ScalarSummary(PrimitiveWithInfer):
- **name** (str) - The name of the input variable, it must not be an empty string.
- **value** (Tensor) - The value of scalar, and the shape of value must be [] or [1].
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> class SummaryDemo(nn.Cell):
... def __init__(self,):
@ -87,6 +90,9 @@ class ImageSummary(PrimitiveWithInfer):
- **name** (str) - The name of the input variable, it must not be an empty string.
- **value** (Tensor) - The value of image, the rank of tensor must be 4.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> class Net(nn.Cell):
... def __init__(self):
@ -125,6 +131,9 @@ class TensorSummary(PrimitiveWithInfer):
- **name** (str) - The name of the input variable.
- **value** (Tensor) - The value of tensor, and the rank of tensor must be greater than 0.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> class SummaryDemo(nn.Cell):
... def __init__(self,):
@ -164,6 +173,9 @@ class HistogramSummary(PrimitiveWithInfer):
- **name** (str) - The name of the input variable.
- **value** (Tensor) - The value of tensor, and the rank of tensor must be greater than 0.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> class SummaryDemo(nn.Cell):
... def __init__(self,):
@ -208,6 +220,9 @@ class InsertGradientOf(PrimitiveWithInfer):
Outputs:
Tensor, returns `input_x` directly. `InsertGradientOf` does not affect the forward result.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> def clip_gradient(dx):
... ret = dx
@ -319,6 +334,9 @@ class Print(PrimitiveWithInfer):
- **input_x** (Union[Tensor, str]) - The graph node to attach to. The input supports
multiple strings and tensors which are separated by ','.
Supported Platforms:
``Ascend``
Examples:
>>> class PrintDemo(nn.Cell):
... def __init__(self):

@ -53,6 +53,9 @@ class CropAndResize(PrimitiveWithInfer):
Outputs:
A 4-D tensor of shape [num_boxes, crop_height, crop_width, depth] with type: float32.
Supported Platforms:
``Ascend``
Examples:
>>> class CropAndResizeNet(nn.Cell):
... def __init__(self, crop_size):

@ -32,6 +32,9 @@ class ScalarCast(PrimitiveWithInfer):
Outputs:
Scalar. The type is the same as the python type corresponding to `input_y`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> scalar_cast = P.ScalarCast()
>>> output = scalar_cast(255.0, mindspore.int32)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -37,6 +37,9 @@ class Assign(PrimitiveWithCheck):
Outputs:
Tensor, has the same type as original `variable`.
Supported Platforms:
``Ascend`` ``GPU``
Examples:
>>> class Net(nn.Cell):
... def __init__(self):
@ -120,6 +123,9 @@ class BoundingBoxEncode(PrimitiveWithInfer):
Outputs:
Tensor, encoded bounding boxes.
Supported Platforms:
``Ascend`` ``GPU``
Examples:
>>> anchor_box = Tensor([[4,1,2,1],[2,2,2,3]],mindspore.float32)
>>> groundtruth_box = Tensor([[3,1,2,2],[1,2,1,4]],mindspore.float32)
@ -173,6 +179,9 @@ class BoundingBoxDecode(PrimitiveWithInfer):
Outputs:
Tensor, decoded boxes.
Supported Platforms:
``Ascend`` ``GPU``
Examples:
>>> anchor_box = Tensor([[4,1,2,1],[2,2,2,3]],mindspore.float32)
>>> deltas = Tensor([[3,1,2,2],[1,2,1,4]],mindspore.float32)
@ -228,6 +237,9 @@ class CheckValid(PrimitiveWithInfer):
Outputs:
Tensor, with shape of (N,) and dtype of bool.
Supported Platforms:
``Ascend`` ``GPU``
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
@ -297,6 +309,9 @@ class IOU(PrimitiveWithInfer):
Raises:
KeyError: When `mode` is not 'iou' or 'iof'.
Supported Platforms:
``Ascend`` ``GPU``
Examples:
>>> iou = P.IOU()
>>> anchor_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]), mindspore.float16)
@ -344,6 +359,9 @@ class MakeRefKey(Primitive):
Outputs:
RefKeyType, made from the Parameter name.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> from mindspore.ops import functional as F
>>> class Net(nn.Cell):
@ -545,6 +563,9 @@ class PopulationCount(PrimitiveWithInfer):
Outputs:
Tensor, with the sam shape as the input.
Supported Platforms:
``Ascend``
Examples:
>>> population_count = P.PopulationCount()
>>> x_input = Tensor([0, 1, 3], mindspore.int16)

@ -34,6 +34,9 @@ class StandardNormal(PrimitiveWithInfer):
Outputs:
Tensor. The shape is the same as the input `shape`. The dtype is float32.
Supported Platforms:
``Ascend`` ``GPU``
Examples:
>>> shape = (4, 16)
>>> stdnormal = P.StandardNormal(seed=2)
@ -81,6 +84,9 @@ class StandardLaplace(PrimitiveWithInfer):
Outputs:
Tensor. The shape that the input 'shape' denotes. The dtype is float32.
Supported Platforms:
``Ascend``
Examples:
>>> shape = (4, 16)
>>> stdlaplace = P.StandardLaplace(seed=2)
@ -133,6 +139,9 @@ class Gamma(PrimitiveWithInfer):
Tensor. The shape must be the broadcasted shape of Input "shape" and shapes of alpha and beta.
The dtype is float32.
Supported Platforms:
``Ascend``
Examples:
>>> shape = (2, 2)
>>> alpha = Tensor(1.0, mstype.float32)
@ -189,6 +198,9 @@ class Poisson(PrimitiveWithInfer):
Tensor. Its shape must be the broadcasted shape of `shape` and the shape of `mean`.
The dtype is int32.
Supported Platforms:
``Ascend``
Examples:
>>> shape = (4, 16)
>>> mean = Tensor(5.0, mstype.float32)
@ -244,6 +256,9 @@ class UniformInt(PrimitiveWithInfer):
Outputs:
Tensor. The shape is the same as the input 'shape', and the data type is int32.
Supported Platforms:
``Ascend`` ``GPU``
Examples:
>>> shape = (2, 4)
>>> minval = Tensor(1, mstype.int32)
@ -296,6 +311,9 @@ class UniformReal(PrimitiveWithInfer):
Outputs:
Tensor. The shape that the input 'shape' denotes. The dtype is float32.
Supported Platforms:
``Ascend`` ``GPU``
Examples:
>>> shape = (2, 2)
>>> uniformreal = P.UniformReal(seed=2)
@ -350,6 +368,9 @@ class RandomChoiceWithMask(PrimitiveWithInfer):
- **index** (Tensor) - The output shape is 2-D.
- **mask** (Tensor) - The output shape is 1-D.
Supported Platforms:
``Ascend`` ``GPU``
Examples:
>>> rnd_choice_mask = P.RandomChoiceWithMask()
>>> input_x = Tensor(np.ones(shape=[240000, 4]).astype(np.bool))
@ -394,6 +415,9 @@ class RandomCategorical(PrimitiveWithInfer):
Outputs:
- **output** (Tensor) - The output Tensor with shape [batch_size, num_samples].
Supported Platforms:
``Ascend`` ``GPU``
Examples:
>>> class Net(nn.Cell):
... def __init__(self, num_sample):
@ -469,6 +493,9 @@ class Multinomial(PrimitiveWithInfer):
Outputs:
Tensor with the same rows as input, each row has num_samples sampled indices.
Supported Platforms:
``GPU``
Examples:
>>> input = Tensor([0., 9., 4., 0.], mstype.float32)
>>> multinomial = P.Multinomial(seed=10)

Loading…
Cancel
Save