diff --git a/mindspore/common/api.py b/mindspore/common/api.py index 8443ef6a46..265361c3de 100644 --- a/mindspore/common/api.py +++ b/mindspore/common/api.py @@ -233,25 +233,25 @@ def ms_function(fn=None, obj=None, input_signature=None): Examples: >>> from mindspore.ops import functional as F - >>> + ... >>> def tensor_add(x, y): - >>> z = F.tensor_add(x, y) - >>> return z - >>> + ... z = F.tensor_add(x, y) + ... return z + ... >>> @ms_function - >>> def tensor_add_with_dec(x, y): - >>> z = F.tensor_add(x, y) - >>> return z - >>> + ... def tensor_add_with_dec(x, y): + ... z = F.tensor_add(x, y) + ... return z + ... >>> @ms_function(input_signature=(MetaTensor(mindspore.float32, (1, 1, 3, 3)), - >>> MetaTensor(mindspore.float32, (1, 1, 3, 3)))) - >>> def tensor_add_with_sig(x, y): - >>> z = F.tensor_add(x, y) - >>> return z - >>> + ... MetaTensor(mindspore.float32, (1, 1, 3, 3)))) + ... def tensor_add_with_sig(x, y): + ... z = F.tensor_add(x, y) + ... return z + ... >>> x = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32)) >>> y = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32)) - >>> + ... >>> tensor_add_graph = ms_function(fn=tensor_add) >>> out = tensor_add_graph(x, y) >>> out = tensor_add_with_dec(x, y) diff --git a/mindspore/common/tensor.py b/mindspore/common/tensor.py index 86e7a3852f..ffc84f04f2 100644 --- a/mindspore/common/tensor.py +++ b/mindspore/common/tensor.py @@ -51,7 +51,7 @@ class Tensor(Tensor_): >>> assert isinstance(t1, Tensor) >>> assert t1.shape == (1, 2, 3) >>> assert t1.dtype == mindspore.float32 - >>> + ... >>> # initialize a tensor with a float scalar >>> t2 = Tensor(0.1) >>> assert isinstance(t2, Tensor) diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index 1265f6656b..4a8ffcbf4e 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -4541,6 +4541,9 @@ class GatherD(PrimitiveWithInfer): Outputs: Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`. + Supported Platforms: + ``Ascend`` ``GPU`` + Examples: >>> x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.int32) >>> index = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int32) diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 82a409196d..3181aef222 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -3599,6 +3599,9 @@ class AdamNoUpdateParam(PrimitiveWithInfer): Tensor, whose shape and data type are the same with `gradient`, is a value that should be added to the parameter to be updated. + Supported Platforms: + ``CPU`` + Examples: >>> import numpy as np >>> import mindspore as ms @@ -4064,6 +4067,9 @@ class FusedSparseProximalAdagrad(PrimitiveWithInfer): - **var** (Tensor) - A Tensor with shape (1,). - **accum** (Tensor) - A Tensor with shape (1,). + Supported Platforms: + ``CPU`` + Examples: >>> import numpy as np >>> import mindspore.nn as nn @@ -4958,6 +4964,9 @@ class ApplyProximalAdagrad(PrimitiveWithInfer): - **var** (Tensor) - The same shape and data type as `var`. - **accum** (Tensor) - The same shape and data type as `accum`. + Supported Platforms: + ``Ascend`` + Examples: >>> import numpy as np >>> import mindspore.nn as nn