unify some examples in mindpore front

pull/266/head
guohongzilong 5 years ago
parent c75f75a3e1
commit 50ed76bc0c

@ -256,7 +256,7 @@ class LayerNorm(Cell):
Tensor, the normalized and scaled offset tensor, has the same shape and data type as the `input_x`. Tensor, the normalized and scaled offset tensor, has the same shape and data type as the `input_x`.
Examples: Examples:
>>> x = Tensor(np.ones([20, 5, 10, 10], np.float32)) >>> x = Tensor(np.ones([20, 5, 10, 10]), mindspore.float32)
>>> shape1 = x.shape()[1:] >>> shape1 = x.shape()[1:]
>>> m = nn.LayerNorm(shape1, begin_norm_axis=1, begin_params_axis=1) >>> m = nn.LayerNorm(shape1, begin_norm_axis=1, begin_params_axis=1)
>>> m(x) >>> m(x)

@ -104,7 +104,7 @@ class MaxPool2d(_PoolNd):
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples: Examples:
>>> pool = MaxPool2d(kernel_size=3, stride=1) >>> pool = nn.MaxPool2d(kernel_size=3, stride=1)
>>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32) >>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32)
[[[[1. 5. 5. 1.] [[[[1. 5. 5. 1.]
[0. 3. 4. 8.] [0. 3. 4. 8.]
@ -186,7 +186,7 @@ class AvgPool2d(_PoolNd):
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples: Examples:
>>> pool = AvgPool2d(kernel_size=3, strides=1) >>> pool = nn.AvgPool2d(kernel_size=3, strides=1)
>>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32) >>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32)
[[[[5. 5. 9. 9.] [[[[5. 5. 9. 9.]
[8. 4. 3. 0.] [8. 4. 3. 0.]

@ -284,7 +284,7 @@ class SoftmaxCrossEntropyExpand(Cell):
Tensor, a scalar tensor including the mean loss. Tensor, a scalar tensor including the mean loss.
Examples: Examples:
>>> loss = SoftmaxCrossEntropyExpand(sparse=True) >>> loss = nn.SoftmaxCrossEntropyExpand(sparse=True)
>>> input_data = Tensor(np.ones([64, 512]), dtype=mindspore.float32) >>> input_data = Tensor(np.ones([64, 512]), dtype=mindspore.float32)
>>> label = Tensor(np.ones([64]), dtype=mindspore.int32) >>> label = Tensor(np.ones([64]), dtype=mindspore.int32)
>>> loss(input_data, label) >>> loss(input_data, label)

@ -83,7 +83,7 @@ def get_metric_fn(name, *args, **kwargs):
Metric object, class instance of the metric method. Metric object, class instance of the metric method.
Examples: Examples:
>>> metric = get_metric_fn('precision', eval_type='classification') >>> metric = nn.get_metric_fn('precision', eval_type='classification')
""" """
if name not in __factory__: if name not in __factory__:
raise KeyError("Unknown Metric:", name) raise KeyError("Unknown Metric:", name)

@ -97,7 +97,7 @@ class MSE(Metric):
Examples: Examples:
>>> x = Tensor(np.array([0.1, 0.2, 0.6, 0.9]), mindspore.float32) >>> x = Tensor(np.array([0.1, 0.2, 0.6, 0.9]), mindspore.float32)
>>> y = Tensor(np.array([0.1, 0.25, 0.5, 0.9]), mindspore.float32) >>> y = Tensor(np.array([0.1, 0.25, 0.5, 0.9]), mindspore.float32)
>>> error = MSE() >>> error = nn.MSE()
>>> error.clear() >>> error.clear()
>>> error.update(x, y) >>> error.update(x, y)
>>> result = error.eval() >>> result = error.eval()

@ -51,9 +51,9 @@ class ControlDepend(Primitive):
>>> class Net(nn.Cell): >>> class Net(nn.Cell):
>>> def __init__(self): >>> def __init__(self):
>>> super(Net, self).__init__() >>> super(Net, self).__init__()
>>> self.global_step = Parameter(initializer(0, [1]), name="global_step") >>> self.global_step = mindspore.Parameter(initializer(0, [1]), name="global_step")
>>> self.rate = 0.2 >>> self.rate = 0.2
>>> self.control_depend = ControlDepend() >>> self.control_depend = P.ControlDepend()
>>> >>>
>>> def construct(self, x): >>> def construct(self, x):
>>> data = self.rate * self.global_step + x >>> data = self.rate * self.global_step + x
@ -92,7 +92,7 @@ class GeSwitch(PrimitiveWithInfer):
>>> super(Net, self).__init__() >>> super(Net, self).__init__()
>>> self.square = P.Square() >>> self.square = P.Square()
>>> self.add = P.TensorAdd() >>> self.add = P.TensorAdd()
>>> self.value = Tensor(np.full((1), 3, dtype=np.float32)) >>> self.value = Tensor(np.full((1), 3), mindspore.float32)
>>> self.switch = P.GeSwitch() >>> self.switch = P.GeSwitch()
>>> self.merge = P.Merge() >>> self.merge = P.Merge()
>>> self.less = P.Less() >>> self.less = P.Less()

@ -133,7 +133,7 @@ class AssignAdd(PrimitiveWithInfer):
>>> def __init__(self): >>> def __init__(self):
>>> super(Net, self).__init__() >>> super(Net, self).__init__()
>>> self.AssignAdd = P.AssignAdd() >>> self.AssignAdd = P.AssignAdd()
>>> self.variable = Parameter(initializer(1, [1], mindspore.int64), name="global_step") >>> self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int64), name="global_step")
>>> >>>
>>> def construct(self, x): >>> def construct(self, x):
>>> self.AssignAdd(self.variable, x) >>> self.AssignAdd(self.variable, x)
@ -176,7 +176,7 @@ class AssignSub(PrimitiveWithInfer):
>>> def __init__(self): >>> def __init__(self):
>>> super(Net, self).__init__() >>> super(Net, self).__init__()
>>> self.AssignSub = P.AssignSub() >>> self.AssignSub = P.AssignSub()
>>> self.variable = Parameter(initializer(1, [1], mindspore.int64), name="global_step") >>> self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int64), name="global_step")
>>> >>>
>>> def construct(self, x): >>> def construct(self, x):
>>> self.AssignSub(self.variable, x) >>> self.AssignSub(self.variable, x)

@ -154,7 +154,7 @@ class ReLU(PrimitiveWithInfer):
Tensor, with the same type and shape as the `input_x`. Tensor, with the same type and shape as the `input_x`.
Examples: Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]], np.float32)) >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> relu = P.ReLU() >>> relu = P.ReLU()
>>> result = relu(input_x) >>> result = relu(input_x)
[[0, 4.0, 0.0], [2.0, 0.0, 9.0]] [[0, 4.0, 0.0], [2.0, 0.0, 9.0]]
@ -187,7 +187,7 @@ class ReLU6(PrimitiveWithInfer):
Tensor, with the same type and shape as the `input_x`. Tensor, with the same type and shape as the `input_x`.
Examples: Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]], np.float32)) >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> relu6 = P.ReLU6() >>> relu6 = P.ReLU6()
>>> result = relu6(input_x) >>> result = relu6(input_x)
""" """
@ -221,7 +221,7 @@ class Elu(PrimitiveWithInfer):
Tensor, has the same shape and data type as `input_x`. Tensor, has the same shape and data type as `input_x`.
Examples: Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]], np.float32)) >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> elu = P.Elu() >>> elu = P.Elu()
>>> result = elu(input_x) >>> result = elu(input_x)
Tensor([[-0.632 4.0 -0.999] Tensor([[-0.632 4.0 -0.999]

@ -76,7 +76,7 @@ class BoundingBoxEncode(PrimitiveWithInfer):
Tensor, encoded bounding boxes. Tensor, encoded bounding boxes.
Examples: Examples:
>>> boundingbox_encode = BoundingBoxEncode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0)) >>> boundingbox_encode = P.BoundingBoxEncode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0))
>>> delta_box = boundingbox_encode(anchor_box, groundtruth_box) >>> delta_box = boundingbox_encode(anchor_box, groundtruth_box)
""" """
@ -119,7 +119,7 @@ class BoundingBoxDecode(PrimitiveWithInfer):
Tensor, decoded boxes. Tensor, decoded boxes.
Examples: Examples:
>>> boundingbox_decode = BoundingBoxDecode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0), >>> boundingbox_decode = P.BoundingBoxDecode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0),
max_shape=(768, 1280), wh_ratio_clip=0.016) max_shape=(768, 1280), wh_ratio_clip=0.016)
>>> bbox = boundingbox_decode(anchor_box, deltas) >>> bbox = boundingbox_decode(anchor_box, deltas)
""" """
@ -208,7 +208,7 @@ class IOU(PrimitiveWithInfer):
KeyError: When `mode` is not 'iou' or 'iof'. KeyError: When `mode` is not 'iou' or 'iof'.
Examples: Examples:
>>> iou = IOU() >>> iou = P.IOU()
>>> anchor_boxes = Tensor(np.random.randint(1,5, [10, 4])) >>> anchor_boxes = Tensor(np.random.randint(1,5, [10, 4]))
>>> gt_boxes = Tensor(np.random.randint(1,5, [3, 4])) >>> gt_boxes = Tensor(np.random.randint(1,5, [3, 4]))
>>> iou(anchor_boxes, gt_boxes) >>> iou(anchor_boxes, gt_boxes)
@ -255,15 +255,15 @@ class MakeRefKey(Primitive):
>>> class Net(nn.Cell): >>> class Net(nn.Cell):
>>> def __init__(self): >>> def __init__(self):
>>> super(Net, self).__init__() >>> super(Net, self).__init__()
>>> self.y = Parameter(Tensor(np.ones([6, 8, 10], np.int32)), name="y") >>> self.y = mindspore.Parameter(Tensor(np.ones([6, 8, 10]), mindspore.int32), name="y")
>>> self.make_ref_key = MakeRefKey("y") >>> self.make_ref_key = P.MakeRefKey("y")
>>> >>>
>>> def construct(self, x): >>> def construct(self, x):
>>> key = self.make_ref_key() >>> key = self.make_ref_key()
>>> ref = F.make_ref(key, x, self.y) >>> ref = F.make_ref(key, x, self.y)
>>> return ref * x >>> return ref * x
>>> >>>
>>> x = Tensor(np.ones([3, 4, 5], np.int32)) >>> x = Tensor(np.ones([3, 4, 5]), mindspore.int32)
>>> net = Net() >>> net = Net()
>>> net(x) >>> net(x)
""" """

@ -44,7 +44,7 @@ class RandomChoiceWithMask(PrimitiveWithInfer):
- **mask** (Tensor) - The output has shape 1-D. - **mask** (Tensor) - The output has shape 1-D.
Examples: Examples:
>>> rnd_choice_mask = RandomChoiceWithMask() >>> rnd_choice_mask = P.RandomChoiceWithMask()
>>> input_x = Tensor(np.ones(shape=[240000, 4]), mindspore.bool_) >>> input_x = Tensor(np.ones(shape=[240000, 4]), mindspore.bool_)
>>> output_y, output_mask = rnd_choice_mask(input_x) >>> output_y, output_mask = rnd_choice_mask(input_x)
""" """

Loading…
Cancel
Save