diff --git a/mindspore/nn/layer/activation.py b/mindspore/nn/layer/activation.py index 230bd3398c..e9025ea256 100644 --- a/mindspore/nn/layer/activation.py +++ b/mindspore/nn/layer/activation.py @@ -72,7 +72,7 @@ class Softmax(Cell): >>> softmax = nn.Softmax() >>> output = softmax(input_x) >>> print(output) - [0.03168 0.01166 0.0861 0.636 0.2341] + [0.03168 0.01166 0.0861 0.636 0.2341 ] """ def __init__(self, axis=-1): @@ -179,7 +179,7 @@ class ReLU(Cell): >>> relu = nn.ReLU() >>> output = relu(input_x) >>> print(output) - [0. 2. 0. 2. 0.] + [0. 2. 0. 2. 0.] """ def __init__(self): @@ -209,7 +209,7 @@ class ReLU6(Cell): >>> relu6 = nn.ReLU6() >>> output = relu6(input_x) >>> print(output) - [0. 0. 0. 2. 1.] + [0. 0. 0. 2. 1.] """ def __init__(self): @@ -248,7 +248,7 @@ class LeakyReLU(Cell): >>> output = leaky_relu(input_x) >>> print(output) [[-0.2 4. -1.6] - [ 2 -1. 9.]] + [ 2 -1. 9. ]] """ def __init__(self, alpha=0.2): @@ -292,7 +292,7 @@ class Tanh(Cell): >>> tanh = nn.Tanh() >>> output = tanh(input_x) >>> print(output) - [0.7617 0.964 0.995 0.964 0.7617] + [0.7617 0.964 0.995 0.964 0.7617] """ def __init__(self): @@ -356,7 +356,7 @@ class Sigmoid(Cell): >>> sigmoid = nn.Sigmoid() >>> output = sigmoid(input_x) >>> print(output) - [0.2688 0.11914 0.5 0.881 0.7305] + [0.2688 0.11914 0.5 0.881 0.7305 ] """ def __init__(self): @@ -517,10 +517,9 @@ class LogSigmoid(Cell): Examples: >>> net = nn.LogSigmoid() >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32) - >>> logsigmoid = net(input_x) - >>> print(logsigmoid) - [-3.1326166e-01, -1.2692806e-01, -4.8587345e-02] - + >>> output = net(input_x) + >>> print(output) + [-0.31326166 -0.12692806 -0.04858734] """ def __init__(self): diff --git a/mindspore/nn/layer/basic.py b/mindspore/nn/layer/basic.py index 0aabe41d88..a6c55bc429 100644 --- a/mindspore/nn/layer/basic.py +++ b/mindspore/nn/layer/basic.py @@ -78,10 +78,10 @@ class Dropout(Cell): >>> net.set_train() >>> output = net(x) >>> print(output) - [[[0., 1.25, 0.], - [1.25, 1.25, 1.25]], - [[1.25, 1.25, 1.25], - [1.25, 1.25, 1.25]]] + [[[0. 1.25 0. ] + [1.25 1.25 1.25]] + [[1.25 1.25 1.25] + [1.25 1.25 1.25]]] """ def __init__(self, keep_prob=0.5, dtype=mstype.float32): @@ -320,8 +320,8 @@ class ClipByNorm(Cell): >>> net = nn.ClipByNorm() >>> input = Tensor(np.random.randint(0, 10, [4, 16]), mindspore.float32) >>> clip_norm = Tensor(np.array([100]).astype(np.float32)) - >>> result = net(input, clip_norm).shape - >>> print(result) + >>> output = net(input, clip_norm) + >>> print(output.shape) (4, 16) """ @@ -392,7 +392,7 @@ class Norm(Cell): >>> input = Tensor(np.random.randint(0, 10, [2, 4]), mindspore.float32) >>> output = net(input) >>> print(output) - [2.236068 9.848858 4. 5.656854] + [7.81025 6.708204 0. 8.602325] """ def __init__(self, axis=(), keep_dims=False): @@ -514,7 +514,12 @@ class Pad(Cell): ... return self.pad(x) >>> x = np.random.random(size=(2, 3)).astype(np.float32) >>> pad = Net() - >>> ms_output = pad(Tensor(x)) + >>> output = pad(Tensor(x)) + >>> print(output) + [[0. 0. 0. 0. 0. 0. ] + [0. 0. 0.82691735 0.36147234 0.70918983 0. ] + [0. 0. 0.7842975 0.44726616 0.4353459 0. ] + [0. 0. 0. 0. 0. 0. ]] """ def __init__(self, paddings, mode="CONSTANT"): @@ -574,9 +579,8 @@ class Unfold(Cell): >>> net = Unfold(ksizes=[1, 2, 2, 1], strides=[1, 2, 2, 1], rates=[1, 2, 2, 1]) >>> image = Tensor(np.ones([2, 3, 6, 6]), dtype=mstype.float16) >>> output = net(image) - >>> print(output) - [[[[1, 1] [1, 1]] [[1, 1], [1, 1]] [[1, 1] [1, 1]], [[1, 1] [1, 1]], [[1, 1] [1, 1]], - [[1, 1], [1, 1]]]] + >>> print(output.shape) + (2, 12, 2, 2) """ def __init__(self, ksizes, strides, rates, padding="valid"): @@ -627,8 +631,8 @@ class MatrixDiag(Cell): Examples: >>> x = Tensor(np.array([1, -1]), mstype.float32) >>> matrix_diag = nn.MatrixDiag() - >>> result = matrix_diag(x) - >>> print(result) + >>> output = matrix_diag(x) + >>> print(output) [[1. 0.] [0. -1.]] """ @@ -659,9 +663,11 @@ class MatrixDiagPart(Cell): Examples: >>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32) >>> matrix_diag_part = nn.MatrixDiagPart() - >>> result = matrix_diag_part(x) - >>> print(result) - [[-1., 1.], [-1., 1.], [-1., 1.]] + >>> output = matrix_diag_part(x) + >>> print(output) + [[-1. 1.] + [-1. 1.] + [-1. 1.]] """ def __init__(self): super(MatrixDiagPart, self).__init__() @@ -692,9 +698,14 @@ class MatrixSetDiag(Cell): >>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32) >>> diagonal = Tensor([[-1., 2.], [-1., 1.], [-1., 1.]], mindspore.float32) >>> matrix_set_diag = nn.MatrixSetDiag() - >>> result = matrix_set_diag(x, diagonal) - >>> print(result) - [[[-1, 0], [0, 2]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]] + >>> output = matrix_set_diag(x, diagonal) + >>> print(output) + [[[-1. 0.] + [ 0. 2.]] + [[-1. 0.] + [ 0. 1.]] + [[-1. 0.] + [ 0. 1.]]] """ def __init__(self): super(MatrixSetDiag, self).__init__() diff --git a/mindspore/nn/layer/container.py b/mindspore/nn/layer/container.py index dccbbd94e6..1e602143f3 100644 --- a/mindspore/nn/layer/container.py +++ b/mindspore/nn/layer/container.py @@ -85,7 +85,6 @@ class SequentialCell(Cell): >>> bn = nn.BatchNorm2d(2) >>> relu = nn.ReLU() >>> seq = nn.SequentialCell([conv, bn, relu]) - >>> >>> x = Tensor(np.random.random((1, 3, 4, 4)), dtype=mindspore.float32) >>> output = seq(x) >>> print(output) @@ -158,10 +157,10 @@ class SequentialCell(Cell): >>> x = Tensor(np.ones([1, 3, 4, 4]), dtype=mindspore.float32) >>> output = seq(x) >>> print(output) - [[[[0.12445523 0.12445523] - [0.12445523 0.12445523]] - [[0. 0. ] - [0. 0. ]]]] + [[[[0.08789019 0.08789019] + [0.08789019 0.08789019]] + [[0.07690391 0.07690391] + [0.07690391 0.07690391]]]] """ if _valid_cell(cell): self._cells[str(len(self))] = cell @@ -195,9 +194,11 @@ class CellList(_CellListBase, Cell): >>> x = Tensor(np.random.random((1, 3, 4, 4)), dtype=mindspore.float32) >>> # not same as nn.SequentialCell, `cell_ls(x)` is not correct >>> cell_ls - CellList< (0): Conv2d - (1): BatchNorm2d - (2): ReLU<> > + CellList< + (0): Conv2d + (1): BatchNorm2d + (2): ReLU<> + > """ def __init__(self, *args): _CellListBase.__init__(self) diff --git a/mindspore/nn/layer/image.py b/mindspore/nn/layer/image.py index 4e90124f4e..7eae40d25f 100644 --- a/mindspore/nn/layer/image.py +++ b/mindspore/nn/layer/image.py @@ -52,13 +52,14 @@ class ImageGradients(Cell): Examples: >>> net = nn.ImageGradients() - >>> image = Tensor(np.array([[[[1,2],[3,4]]]]), dtype=mstype.int32) + >>> image = Tensor(np.array([[[[1,2],[3,4]]]]), dtype=mindspore.int32) >>> output = net(image) >>> print(output) - [[[[2,2] - [0,0]]]] - [[[[1,0] - [1,0]]]] + (Tensor(shape=[1, 1, 2, 2], dtype=Int32, value= + [[[[2, 2], + [0, 0]]]]), Tensor(shape=[1, 1, 2, 2], dtype=Int32, value= + [[[[1, 0], + [1, 0]]]])) """ def __init__(self): super(ImageGradients, self).__init__() @@ -214,8 +215,8 @@ class SSIM(Cell): >>> net = nn.SSIM() >>> img1 = Tensor(np.random.random((1,3,16,16)), mindspore.float32) >>> img2 = Tensor(np.random.random((1,3,16,16)), mindspore.float32) - >>> ssim = net(img1, img2) - >>> print(ssim) + >>> output = net(img1, img2) + >>> print(output) [0.12174469] """ def __init__(self, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03): @@ -290,11 +291,11 @@ class MSSSIM(Cell): Examples: >>> net = nn.MSSSIM(power_factors=(0.033, 0.033, 0.033)) - >>> img1 = Tensor(np.random.random((1, 3, 128, 128))) - >>> img2 = Tensor(np.random.random((1, 3, 128, 128))) - >>> result = net(img1, img2) - >>> print(result) - [0.20930639] + >>> img1 = Tensor(np.random.random((1,3,128,128))) + >>> img2 = Tensor(np.random.random((1,3,128,128))) + >>> output = net(img1, img2) + >>> print(output) + [0.22965115] """ def __init__(self, max_val=1.0, power_factors=(0.0448, 0.2856, 0.3001, 0.2363, 0.1333), filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03): @@ -382,9 +383,9 @@ class PSNR(Cell): >>> net = nn.PSNR() >>> img1 = Tensor(np.random.random((1,3,16,16))) >>> img2 = Tensor(np.random.random((1,3,16,16))) - >>> psnr = net(img1, img2) - >>> print(psnr) - [7.8297315] + >>> output = net(img1, img2) + >>> print(output) + [7.7229595] """ def __init__(self, max_val=1.0): super(PSNR, self).__init__() @@ -452,8 +453,7 @@ class CentralCrop(Cell): >>> net = nn.CentralCrop(central_fraction=0.5) >>> image = Tensor(np.random.random((4, 3, 4, 4)), mindspore.float32) >>> output = net(image) - >>> result = output.shape - >>> print(result) + >>> print(output.shape) (4, 3, 2, 2) """ diff --git a/mindspore/nn/layer/math.py b/mindspore/nn/layer/math.py index 3543fa5bab..06054de395 100644 --- a/mindspore/nn/layer/math.py +++ b/mindspore/nn/layer/math.py @@ -64,8 +64,7 @@ class ReduceLogSumExp(Cell): >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) >>> op = nn.ReduceLogSumExp(1, keep_dims=True) >>> output = op(input_x) - >>> result = output.shape - >>> print(reuslt) + >>> print(output.shape) (3, 1, 5, 6) """ @@ -101,9 +100,9 @@ class Range(Cell): Examples: >>> net = nn.Range(1, 8, 2) - >>> out = net() - >>> print(out) - [1, 3, 5, 7] + >>> output = net() + >>> print(output) + [1 3 5 7] """ def __init__(self, start, limit=None, delta=1): @@ -157,7 +156,7 @@ class LinSpace(Cell): >>> linspace = nn.LinSpace(1, 10, 5) >>> output = linspace() >>> print(output) - [1, 3.25, 5.5, 7.75, 10] + [ 1. 3.25 5.5 7.75 10. ] """ def __init__(self, start, stop, num): @@ -230,6 +229,7 @@ class LGamma(Cell): >>> input_x = Tensor(np.array([2, 3, 4]).astype(np.float32)) >>> op = nn.LGamma() >>> output = op(input_x) + >>> print(output) [3.5762787e-07 6.9314754e-01 1.7917603e+00] """ @@ -830,9 +830,13 @@ class Moments(Cell): Examples: >>> net = nn.Moments(axis=3, keep_dims=True) >>> input_x = Tensor(np.array([[[[1, 2, 3, 4], [3, 4, 5, 6]]]]), mindspore.float32) - >>> mean, var = net(input_x) - mean: [[[[2.5], [4.5]]]] - var: [[[[1.25], [1.25]]]] + >>> output = net(input_x) + >>> print(output) + (Tensor(shape=[1, 1, 2, 1], dtype=Float32, value= + [[[[ 2.50000000e+00], + [ 4.50000000e+00]]]]), Tensor(shape=[1, 1, 2, 1], dtype=Float32, value= + [[[[ 1.25000000e+00], + [ 1.25000000e+00]]]])) """ def __init__(self, axis=None, keep_dims=None): diff --git a/mindspore/nn/layer/normalization.py b/mindspore/nn/layer/normalization.py index 548a88e26e..e67a3383a7 100644 --- a/mindspore/nn/layer/normalization.py +++ b/mindspore/nn/layer/normalization.py @@ -285,12 +285,11 @@ class BatchNorm1d(_BatchNorm): Examples: >>> net = nn.BatchNorm1d(num_features=4) - >>> input = Tensor(np.random.randint(0, 255, [3, 4]), mindspore.float32) - >>> result = net(input) - >>> print(result) - [[ 57.99971 50.99974 220.99889 222.99889 ] - [106.99947 193.99902 77.99961 101.99949 ] - [ 85.99957 188.99905 46.99976 226.99887 ]] + >>> input = Tensor(np.random.randint(0, 255, [2, 4]), mindspore.float32) + >>> output = net(input) + >>> print(output) + [[210.99895 136.99931 89.99955 240.9988 ] + [ 87.99956 157.9992 89.99955 42.999786]] """ def __init__(self, @@ -371,23 +370,15 @@ class BatchNorm2d(_BatchNorm): Examples: >>> net = nn.BatchNorm2d(num_features=3) - >>> input = Tensor(np.random.randint(0, 255, [1, 3, 4, 4]), mindspore.float32) - >>> result = net(input) - >>> print(result) - [[[[148.99925 148.99925 178.9991 77.99961 ] - [ 41.99979 97.99951 157.9992 94.99953 ] - [ 87.99956 158.9992 50.99974 179.9991 ] - [146.99927 27.99986 119.9994 253.99873 ]] - - [[178.9991 187.99905 190.99904 88.99956 ] - [213.99893 158.9992 13.99993 200.999 ] - [224.99887 56.99971 246.99876 239.9988 ] - [ 97.99951 34.99983 28.99986 57.99971 ]] - - [[ 14.99993 31.99984 136.99931 207.99896 ] - [180.9991 28.99986 23.99988 71.99964 ] - [112.99944 36.99981 213.99893 71.99964 ] - [ 8.99996 162.99919 157.9992 41.99979 ]]]] + >>> input = Tensor(np.random.randint(0, 255, [1, 3, 2, 2]), mindspore.float32) + >>> output = net(input) + >>> print(output) + [[[[128.99936 53.99973] + [191.99904 183.99908]] + [[146.99927 182.99908] + [184.99907 120.9994 ]] + [[ 33.99983 234.99883] + [188.99905 11.99994]]]] """ def __init__(self, @@ -618,7 +609,7 @@ class GroupNorm(Cell): [[[[0. 0. 0. 0.] [0. 0. 0. 0.] [0. 0. 0. 0.] - [0. 0. 0. 0.]], + [0. 0. 0. 0.]] [[0. 0. 0. 0.] [0. 0. 0. 0.] [0. 0. 0. 0.] diff --git a/mindspore/nn/layer/pooling.py b/mindspore/nn/layer/pooling.py index daafe8a8a9..08828e34df 100644 --- a/mindspore/nn/layer/pooling.py +++ b/mindspore/nn/layer/pooling.py @@ -107,19 +107,7 @@ class MaxPool2d(_PoolNd): Examples: >>> pool = nn.MaxPool2d(kernel_size=3, stride=1) >>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32) - >>> print(x) - [[[[1. 5. 5. 1.] - [0. 3. 4. 8.] - [4. 2. 7. 6.] - [4. 9. 0. 1.]] - [[3. 6. 2. 6.] - [4. 4. 7. 8.] - [0. 0. 4. 0.] - [1. 8. 7. 0.]]]] >>> output = pool(x) - >>> reuslt = output.shape - >>> print(result) - (1, 2, 2, 2) >>> print(output) [[[[7. 8.] [9. 9.]] @@ -272,19 +260,7 @@ class AvgPool2d(_PoolNd): Examples: >>> pool = nn.AvgPool2d(kernel_size=3, stride=1) >>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32) - >>> print(x) - [[[[5. 5. 9. 9.] - [8. 4. 3. 0.] - [2. 7. 1. 2.] - [1. 8. 3. 3.]] - [[6. 8. 2. 4.] - [3. 0. 2. 1.] - [0. 8. 9. 7.] - [2. 1. 4. 9.]]]] >>> output = pool(x) - >>> result = output.shape - >>> print(result) - (1, 2, 2, 2) >>> print(output) [[[[4.888889 4.4444447] [4.111111 3.4444444]] diff --git a/mindspore/nn/layer/quant.py b/mindspore/nn/layer/quant.py index fd6b64e368..fead4d6ed9 100644 --- a/mindspore/nn/layer/quant.py +++ b/mindspore/nn/layer/quant.py @@ -234,9 +234,10 @@ class FakeQuantWithMinMaxObserver(UniformQuantObserver): Examples: >>> fake_quant = nn.FakeQuantWithMinMaxObserver() >>> input = Tensor(np.array([[1, 2, 1], [-2, 0, -1]]), mindspore.float32) - >>> result = fake_quant(input) - >>> print(result) - [[0.9882355, 1.9764705, 0.9882355], [-1.9764705, 0. , -0.9882355]] + >>> output = fake_quant(input) + >>> print(output) + [[ 0.9882355 1.9764705 0.9882355] + [-1.9764705 0. -0.9882355]] """ def __init__(self, @@ -589,11 +590,10 @@ class Conv2dBnFoldQuant(Cell): Examples: >>> qconfig = compression.quant.create_quant_config() >>> conv2d_bnfold = nn.Conv2dBnFoldQuant(1, 6, kernel_size=(2, 2), stride=(1, 1), pad_mode="valid", - >>> quant_config=qconfig) + ... quant_config=qconfig) >>> input = Tensor(np.random.randint(-2, 2, (2, 1, 3, 3)), mindspore.float32) - >>> result = conv2d_bnfold(input) - >>> output = result.shape - >>> print(output) + >>> output = conv2d_bnfold(input) + >>> print(output.shape) (2, 6, 2, 2) """ @@ -775,11 +775,10 @@ class Conv2dBnWithoutFoldQuant(Cell): Examples: >>> qconfig = compression.quant.create_quant_config() >>> conv2d_no_bnfold = nn.Conv2dBnWithoutFoldQuant(1, 6, kernel_size=(2, 2), stride=(1, 1), pad_mode="valid", - >>> quant_config=qconfig) + ... quant_config=qconfig) >>> input = Tensor(np.random.randint(-2, 2, (2, 1, 3, 3)), mstype.float32) - >>> result = conv2d_no_bnfold(input) - >>> output = result.shape - >>> print(output) + >>> output = conv2d_no_bnfold(input) + >>> print(output.shape) (2, 6, 2, 2) """ @@ -897,11 +896,10 @@ class Conv2dQuant(Cell): Examples: >>> qconfig = compression.quant.create_quant_config() >>> conv2d_quant = nn.Conv2dQuant(1, 6, kernel_size= (2, 2), stride=(1, 1), pad_mode="valid", - >>> quant_config=qconfig) + ... quant_config=qconfig) >>> input = Tensor(np.random.randint(-2, 2, (2, 1, 3, 3)), mindspore.float32) - >>> result = conv2d_quant(input) - >>> output = result.shape - >>> print(output) + >>> output = conv2d_quant(input) + >>> print(output.shape) (2, 6, 2, 2) """ @@ -1106,9 +1104,10 @@ class ActQuant(_QuantActivation): >>> qconfig = compression.quant.create_quant_config() >>> act_quant = nn.ActQuant(nn.ReLU(), quant_config=qconfig) >>> input = Tensor(np.array([[1, 2, -1], [-2, 0, -1]]), mindspore.float32) - >>> result = act_quant(input) - >>> print(result) - [[0.9882355, 1.9764705, 0.], [0., 0., 0.]] + >>> output = act_quant(input) + >>> print(output) + [[0.9882355 1.9764705 0. ] + [0. 0. 0. ]] """ def __init__(self, @@ -1168,9 +1167,10 @@ class TensorAddQuant(Cell): >>> add_quant = nn.TensorAddQuant(quant_config=qconfig) >>> input_x1 = Tensor(np.array([[1, 2, 1], [-2, 0, -1]]), mindspore.float32) >>> input_x2 = Tensor(np.ones((2, 3)), mindspore.float32) - >>> result = add_quant(input_x1, input_x2) - >>> print(result) - [[1.9764705, 3.011765, 1.9764705], [-0.9882355, 0.9882355, 0.]] + >>> output = add_quant(input_x1, input_x2) + >>> print(output) + [[ 1.9764705 3.011765 1.9764705] + [-0.9882355 0.9882355 0. ]] """ def __init__(self, @@ -1215,9 +1215,10 @@ class MulQuant(Cell): >>> mul_quant = nn.MulQuant(quant_config=qconfig) >>> input_x1 = Tensor(np.array([[1, 2, 1], [-2, 0, -1]]), mindspore.float32) >>> input_x2 = Tensor(np.ones((2, 3)) * 2, mindspore.float32) - >>> result = mul_quant(input_x1, input_x2) - >>> print(result) - [[1.9764705, 4.0000005, 1.9764705], [-4., 0., -1.9764705]] + >>> output = mul_quant(input_x1, input_x2) + >>> print(output) + [[ 1.9764705 4.0000005 1.9764705] + [-4. 0. -1.9764705]] """ def __init__(self, diff --git a/mindspore/nn/loss/loss.py b/mindspore/nn/loss/loss.py index 3af96d2246..8e2e4f5b50 100644 --- a/mindspore/nn/loss/loss.py +++ b/mindspore/nn/loss/loss.py @@ -95,7 +95,8 @@ class L1Loss(_Loss): >>> loss = nn.L1Loss() >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32) >>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32) - >>> loss(input_data, target_data) + >>> output = loss(input_data, target_data) + >>> print(output) 0.33333334 """ def __init__(self, reduction='mean'): @@ -183,7 +184,9 @@ class SmoothL1Loss(_Loss): >>> loss = nn.SmoothL1Loss() >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32) >>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32) - >>> loss(input_data, target_data) + >>> output = loss(input_data, target_data) + >>> print(output) + [0. 0. 0.5] """ def __init__(self, beta=1.0): super(SmoothL1Loss, self).__init__() @@ -236,7 +239,9 @@ class SoftmaxCrossEntropyWithLogits(_Loss): >>> logits = Tensor(np.random.randint(0, 9, [1, 10]), mindspore.float32) >>> labels_np = np.ones([1,]).astype(np.int32) >>> labels = Tensor(labels_np) - >>> loss(logits, labels) + >>> output = loss(logits, labels) + >>> print(output) + [5.6924148] """ def __init__(self, sparse=False, @@ -299,7 +304,7 @@ class SampledSoftmaxLoss(_Loss): >>> labels = Tensor([0, 1, 2]) >>> inputs = Tensor(np.random.randint(0, 9, [3, 10]), mindspore.float32) >>> output = loss(weights, biases, labels, inputs) - >>> print(output) # output is ranndom + >>> print(output) [ 4.0181947 46.050743 7.0009117] """ @@ -557,7 +562,7 @@ class CosineEmbeddingLoss(_Loss): >>> cosine_embedding_loss = nn.CosineEmbeddingLoss() >>> output = cosine_embedding_loss(x1, x2, y) >>> print(output) - [0.0003426671] + [0.0003426075] """ def __init__(self, margin=0.0, reduction="mean"): super(CosineEmbeddingLoss, self).__init__(reduction) diff --git a/mindspore/nn/metrics/topk.py b/mindspore/nn/metrics/topk.py index bf1ea05939..9767f87a8b 100644 --- a/mindspore/nn/metrics/topk.py +++ b/mindspore/nn/metrics/topk.py @@ -39,7 +39,9 @@ class TopKCategoricalAccuracy(Metric): >>> topk = nn.TopKCategoricalAccuracy(3) >>> topk.clear() >>> topk.update(x, y) - >>> result = topk.eval() + >>> output = topk.eval() + >>> print(output) + 0.6666666666666666 """ def __init__(self, k): super(TopKCategoricalAccuracy, self).__init__() @@ -103,7 +105,9 @@ class Top1CategoricalAccuracy(TopKCategoricalAccuracy): >>> topk = nn.Top1CategoricalAccuracy() >>> topk.clear() >>> topk.update(x, y) - >>> result = topk.eval() + >>> output = topk.eval() + >>> print(output) + 0.0 """ def __init__(self): super(Top1CategoricalAccuracy, self).__init__(1) @@ -121,7 +125,9 @@ class Top5CategoricalAccuracy(TopKCategoricalAccuracy): >>> topk = nn.Top5CategoricalAccuracy() >>> topk.clear() >>> topk.update(x, y) - >>> result = topk.eval() + >>> output = topk.eval() + >>> print(output) + 1.0 """ def __init__(self): super(Top5CategoricalAccuracy, self).__init__(5) diff --git a/mindspore/nn/probability/bijector/exp.py b/mindspore/nn/probability/bijector/exp.py index 9cb2159590..62b782911b 100644 --- a/mindspore/nn/probability/bijector/exp.py +++ b/mindspore/nn/probability/bijector/exp.py @@ -45,6 +45,7 @@ class Exp(PowerTransform): ... ans2 = self.s1.inverse(value) ... ans3 = self.s1.forward_log_jacobian(value) ... ans4 = self.s1.inverse_log_jacobian(value) + ... """ def __init__(self, diff --git a/mindspore/nn/probability/bijector/gumbel_cdf.py b/mindspore/nn/probability/bijector/gumbel_cdf.py index 1579111251..3b027d6ee0 100644 --- a/mindspore/nn/probability/bijector/gumbel_cdf.py +++ b/mindspore/nn/probability/bijector/gumbel_cdf.py @@ -53,6 +53,7 @@ class GumbelCDF(Bijector): ... ans2 = self.gum.inverse(value) ... ans3 = self.gum.forward_log_jacobian(value) ... ans4 = self.gum.inverse_log_jacobian(value) + ... """ def __init__(self, diff --git a/mindspore/nn/probability/bijector/power_transform.py b/mindspore/nn/probability/bijector/power_transform.py index edd5901011..5603213419 100644 --- a/mindspore/nn/probability/bijector/power_transform.py +++ b/mindspore/nn/probability/bijector/power_transform.py @@ -57,6 +57,7 @@ class PowerTransform(Bijector): ... ans2 = self.s1.inverse(value) ... ans3 = self.s1.forward_log_jacobian(value) ... ans4 = self.s1.inverse_log_jacobian(value) + ... """ def __init__(self, diff --git a/mindspore/nn/probability/bijector/scalar_affine.py b/mindspore/nn/probability/bijector/scalar_affine.py index eeb1721529..d4c306996c 100644 --- a/mindspore/nn/probability/bijector/scalar_affine.py +++ b/mindspore/nn/probability/bijector/scalar_affine.py @@ -53,6 +53,7 @@ class ScalarAffine(Bijector): ... ans2 = self.s1.inverse(value) ... ans3 = self.s1.forward_log_jacobian(value) ... ans4 = self.s1.inverse_log_jacobian(value) + ... """ def __init__(self, diff --git a/mindspore/nn/probability/distribution/bernoulli.py b/mindspore/nn/probability/distribution/bernoulli.py index d048cdadb9..c97ea84d83 100644 --- a/mindspore/nn/probability/distribution/bernoulli.py +++ b/mindspore/nn/probability/distribution/bernoulli.py @@ -50,62 +50,63 @@ class Bernoulli(Distribution): >>> >>> # To use the Bernoulli distribution in a network. >>> class net(Cell): - >>> def __init__(self): - >>> super(net, self).__init__(): - >>> self.b1 = msd.Bernoulli(0.5, dtype=mstype.int32) - >>> self.b2 = msd.Bernoulli(dtype=mstype.int32) - >>> - >>> # All the following calls in construct are valid. - >>> def construct(self, value, probs_b, probs_a): - >>> - >>> # Private interfaces of probability functions corresponding to public interfaces, including - >>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows. - >>> # Args: - >>> # value (Tensor): the value to be evaluated. - >>> # probs1 (Tensor): the probability of success. Default: self.probs. - >>> - >>> # Examples of `prob`. - >>> # Similar calls can be made to other probability functions - >>> # by replacing `prob` by the name of the function. - >>> ans = self.b1.prob(value) - >>> # Evaluate `prob` with respect to distribution b. - >>> ans = self.b1.prob(value, probs_b) - >>> # `probs` must be passed in during function calls. - >>> ans = self.b2.prob(value, probs_a) - >>> - >>> - >>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. - >>> # Args: - >>> # probs1 (Tensor): the probability of success. Default: self.probs. - >>> - >>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar. - >>> ans = self.b1.mean() # return 0.5 - >>> ans = self.b1.mean(probs_b) # return probs_b - >>> # `probs` must be passed in during function calls. - >>> ans = self.b2.mean(probs_a) - >>> - >>> - >>> # Interfaces of `kl_loss` and `cross_entropy` are the same as follows: - >>> # Args: - >>> # dist (str): the name of the distribution. Only 'Bernoulli' is supported. - >>> # probs1_b (Tensor): the probability of success of distribution b. - >>> # probs1_a (Tensor): the probability of success of distribution a. Default: self.probs. - >>> - >>> # Examples of kl_loss. `cross_entropy` is similar. - >>> ans = self.b1.kl_loss('Bernoulli', probs_b) - >>> ans = self.b1.kl_loss('Bernoulli', probs_b, probs_a) - >>> # An additional `probs_a` must be passed in. - >>> ans = self.b2.kl_loss('Bernoulli', probs_b, probs_a) - >>> - >>> - >>> # Examples of `sample`. - >>> # Args: - >>> # shape (tuple): the shape of the sample. Default: (). - >>> # probs1 (Tensor): the probability of success. Default: self.probs. - >>> ans = self.b1.sample() - >>> ans = self.b1.sample((2,3)) - >>> ans = self.b1.sample((2,3), probs_b) - >>> ans = self.b2.sample((2,3), probs_a) + ... def __init__(self): + ... super(net, self).__init__(): + ... self.b1 = msd.Bernoulli(0.5, dtype=mstype.int32) + ... self.b2 = msd.Bernoulli(dtype=mstype.int32) + ... + ... # All the following calls in construct are valid. + ... def construct(self, value, probs_b, probs_a): + ... + ... # Private interfaces of probability functions corresponding to public interfaces, including + ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows. + ... # Args: + ... # value (Tensor): the value to be evaluated. + ... # probs1 (Tensor): the probability of success. Default: self.probs. + ... + ... # Examples of `prob`. + ... # Similar calls can be made to other probability functions + ... # by replacing `prob` by the name of the function. + ... ans = self.b1.prob(value) + ... # Evaluate `prob` with respect to distribution b. + ... ans = self.b1.prob(value, probs_b) + ... # `probs` must be passed in during function calls. + ... ans = self.b2.prob(value, probs_a) + ... + ... + ... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. + ... # Args: + ... # probs1 (Tensor): the probability of success. Default: self.probs. + ... + ... # Examples of `mean`. `sd`, `var`, and `entropy` are similar. + ... ans = self.b1.mean() # return 0.5 + ... ans = self.b1.mean(probs_b) # return probs_b + ... # `probs` must be passed in during function calls. + ... ans = self.b2.mean(probs_a) + ... + ... + ... # Interfaces of `kl_loss` and `cross_entropy` are the same as follows: + ... # Args: + ... # dist (str): the name of the distribution. Only 'Bernoulli' is supported. + ... # probs1_b (Tensor): the probability of success of distribution b. + ... # probs1_a (Tensor): the probability of success of distribution a. Default: self.probs. + ... + ... # Examples of kl_loss. `cross_entropy` is similar. + ... ans = self.b1.kl_loss('Bernoulli', probs_b) + ... ans = self.b1.kl_loss('Bernoulli', probs_b, probs_a) + ... # An additional `probs_a` must be passed in. + ... ans = self.b2.kl_loss('Bernoulli', probs_b, probs_a) + ... + ... + ... # Examples of `sample`. + ... # Args: + ... # shape (tuple): the shape of the sample. Default: (). + ... # probs1 (Tensor): the probability of success. Default: self.probs. + ... ans = self.b1.sample() + ... ans = self.b1.sample((2,3)) + ... ans = self.b1.sample((2,3), probs_b) + ... ans = self.b2.sample((2,3), probs_a) + ... """ def __init__(self, diff --git a/mindspore/nn/probability/distribution/categorical.py b/mindspore/nn/probability/distribution/categorical.py index ea98bbaaa0..ce451f16ce 100644 --- a/mindspore/nn/probability/distribution/categorical.py +++ b/mindspore/nn/probability/distribution/categorical.py @@ -46,59 +46,60 @@ class Categorical(Distribution): >>> >>> # To use a Categorical distribution in a network >>> class net(Cell): - >>> def __init__(self, probs): - >>> super(net, self).__init__(): - >>> self.ca = msd.Categorical(probs=[0.2, 0.8], dtype=mstype.int32) - >>> self.ca1 = msd.Categorical(dtype=mstype.int32) - >>> - >>> # All the following calls in construct are valid - >>> def construct(self, value): - >>> - >>> # Private interfaces of probability functions corresponding to public interfaces, including - >>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows. - >>> # Args: - >>> # value (Tensor): the value to be evaluated. - >>> # probs (Tensor): event probabilities. Default: self.probs. - >>> - >>> # Examples of `prob`. - >>> # Similar calls can be made to other probability functions - >>> # by replacing `prob` by the name of the function. - >>> ans = self.ca.prob(value) - >>> # Evaluate `prob` with respect to distribution b. - >>> ans = self.ca.prob(value, probs_b) - >>> # `probs` must be passed in during function calls. - >>> ans = self.ca1.prob(value, probs_a) - >>> - >>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. - >>> # Args: - >>> # probs (Tensor): event probabilities. Default: self.probs. - >>> - >>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar. - >>> ans = self.ca.mean() # return 0.8 - >>> ans = self.ca.mean(probs_b) - >>> # `probs` must be passed in during function calls. - >>> ans = self.ca1.mean(probs_a) - >>> - >>> # Interfaces of `kl_loss` and `cross_entropy` are the same as follows: - >>> # Args: - >>> # dist (str): the name of the distribution. Only 'Categorical' is supported. - >>> # probs_b (Tensor): event probabilities of distribution b. - >>> # probs (Tensor): event probabilities of distribution a. Default: self.probs. - >>> - >>> # Examples of kl_loss. `cross_entropy` is similar. - >>> ans = self.ca.kl_loss('Categorical', probs_b) - >>> ans = self.ca.kl_loss('Categorical', probs_b, probs_a) - >>> # An additional `probs` must be passed in. - >>> ans = self.ca1.kl_loss('Categorical', probs_b, probs_a) - >>> - >>> # Examples of `sample`. - >>> # Args: - >>> # shape (tuple): the shape of the sample. Default: (). - >>> # probs (Tensor): event probabilities. Default: self.probs. - >>> ans = self.ca.sample() - >>> ans = self.ca.sample((2,3)) - >>> ans = self.ca.sample((2,3), probs_b) - >>> ans = self.ca1.sample((2,3), probs_a) + ... def __init__(self, probs): + ... super(net, self).__init__(): + ... self.ca = msd.Categorical(probs=[0.2, 0.8], dtype=mstype.int32) + ... self.ca1 = msd.Categorical(dtype=mstype.int32) + ... + ... # All the following calls in construct are valid + ... def construct(self, value): + ... + ... # Private interfaces of probability functions corresponding to public interfaces, including + ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows. + ... # Args: + ... # value (Tensor): the value to be evaluated. + ... # probs (Tensor): event probabilities. Default: self.probs. + ... + ... # Examples of `prob`. + ... # Similar calls can be made to other probability functions + ... # by replacing `prob` by the name of the function. + ... ans = self.ca.prob(value) + ... # Evaluate `prob` with respect to distribution b. + ... ans = self.ca.prob(value, probs_b) + ... # `probs` must be passed in during function calls. + ... ans = self.ca1.prob(value, probs_a) + ... + ... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. + ... # Args: + ... # probs (Tensor): event probabilities. Default: self.probs. + ... + ... # Examples of `mean`. `sd`, `var`, and `entropy` are similar. + ... ans = self.ca.mean() # return 0.8 + ... ans = self.ca.mean(probs_b) + ... # `probs` must be passed in during function calls. + ... ans = self.ca1.mean(probs_a) + ... + ... # Interfaces of `kl_loss` and `cross_entropy` are the same as follows: + ... # Args: + ... # dist (str): the name of the distribution. Only 'Categorical' is supported. + ... # probs_b (Tensor): event probabilities of distribution b. + ... # probs (Tensor): event probabilities of distribution a. Default: self.probs. + ... + ... # Examples of kl_loss. `cross_entropy` is similar. + ... ans = self.ca.kl_loss('Categorical', probs_b) + ... ans = self.ca.kl_loss('Categorical', probs_b, probs_a) + ... # An additional `probs` must be passed in. + ... ans = self.ca1.kl_loss('Categorical', probs_b, probs_a) + ... + ... # Examples of `sample`. + ... # Args: + ... # shape (tuple): the shape of the sample. Default: (). + ... # probs (Tensor): event probabilities. Default: self.probs. + ... ans = self.ca.sample() + ... ans = self.ca.sample((2,3)) + ... ans = self.ca.sample((2,3), probs_b) + ... ans = self.ca1.sample((2,3), probs_a) + ... """ def __init__(self, diff --git a/mindspore/nn/probability/distribution/exponential.py b/mindspore/nn/probability/distribution/exponential.py index 907888f191..0c349ba228 100644 --- a/mindspore/nn/probability/distribution/exponential.py +++ b/mindspore/nn/probability/distribution/exponential.py @@ -52,62 +52,63 @@ class Exponential(Distribution): >>> >>> # To use an Exponential distribution in a network. >>> class net(Cell): - >>> def __init__(self): - >>> super(net, self).__init__(): - >>> self.e1 = msd.Exponential(0.5, dtype=mstype.float32) - >>> self.e2 = msd.Exponential(dtype=mstype.float32) - >>> - >>> # All the following calls in construct are valid. - >>> def construct(self, value, rate_b, rate_a): - >>> - >>> # Private interfaces of probability functions corresponding to public interfaces, including - >>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows. - >>> # Args: - >>> # value (Tensor): the value to be evaluated. - >>> # rate (Tensor): the rate of the distribution. Default: self.rate. - >>> - >>> # Examples of `prob`. - >>> # Similar calls can be made to other probability functions - >>> # by replacing `prob` by the name of the function. - >>> ans = self.e1.prob(value) - >>> # Evaluate with respect to distribution b. - >>> ans = self.e1.prob(value, rate_b) - >>> # `rate` must be passed in during function calls. - >>> ans = self.e2.prob(value, rate_a) - >>> - >>> - >>> # Functions `mean`, `sd`, 'var', and 'entropy' have the same arguments as follows. - >>> # Args: - >>> # rate (Tensor): the rate of the distribution. Default: self.rate. - >>> - >>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar. - >>> ans = self.e1.mean() # return 2 - >>> ans = self.e1.mean(rate_b) # return 1 / rate_b - >>> # `rate` must be passed in during function calls. - >>> ans = self.e2.mean(rate_a) - >>> - >>> - >>> # Interfaces of `kl_loss` and `cross_entropy` are the same. - >>> # Args: - >>> # dist (str): The name of the distribution. Only 'Exponential' is supported. - >>> # rate_b (Tensor): the rate of distribution b. - >>> # rate_a (Tensor): the rate of distribution a. Default: self.rate. - >>> - >>> # Examples of `kl_loss`. `cross_entropy` is similar. - >>> ans = self.e1.kl_loss('Exponential', rate_b) - >>> ans = self.e1.kl_loss('Exponential', rate_b, rate_a) - >>> # An additional `rate` must be passed in. - >>> ans = self.e2.kl_loss('Exponential', rate_b, rate_a) - >>> - >>> - >>> # Examples of `sample`. - >>> # Args: - >>> # shape (tuple): the shape of the sample. Default: () - >>> # probs1 (Tensor): the rate of the distribution. Default: self.rate. - >>> ans = self.e1.sample() - >>> ans = self.e1.sample((2,3)) - >>> ans = self.e1.sample((2,3), rate_b) - >>> ans = self.e2.sample((2,3), rate_a) + ... def __init__(self): + ... super(net, self).__init__(): + ... self.e1 = msd.Exponential(0.5, dtype=mstype.float32) + ... self.e2 = msd.Exponential(dtype=mstype.float32) + ... + ... # All the following calls in construct are valid. + ... def construct(self, value, rate_b, rate_a): + ... + ... # Private interfaces of probability functions corresponding to public interfaces, including + ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows. + ... # Args: + ... # value (Tensor): the value to be evaluated. + ... # rate (Tensor): the rate of the distribution. Default: self.rate. + ... + ... # Examples of `prob`. + ... # Similar calls can be made to other probability functions + ... # by replacing `prob` by the name of the function. + ... ans = self.e1.prob(value) + ... # Evaluate with respect to distribution b. + ... ans = self.e1.prob(value, rate_b) + ... # `rate` must be passed in during function calls. + ... ans = self.e2.prob(value, rate_a) + ... + ... + ... # Functions `mean`, `sd`, 'var', and 'entropy' have the same arguments as follows. + ... # Args: + ... # rate (Tensor): the rate of the distribution. Default: self.rate. + ... + ... # Examples of `mean`. `sd`, `var`, and `entropy` are similar. + ... ans = self.e1.mean() # return 2 + ... ans = self.e1.mean(rate_b) # return 1 / rate_b + ... # `rate` must be passed in during function calls. + ... ans = self.e2.mean(rate_a) + ... + ... + ... # Interfaces of `kl_loss` and `cross_entropy` are the same. + ... # Args: + ... # dist (str): The name of the distribution. Only 'Exponential' is supported. + ... # rate_b (Tensor): the rate of distribution b. + ... # rate_a (Tensor): the rate of distribution a. Default: self.rate. + ... + ... # Examples of `kl_loss`. `cross_entropy` is similar. + ... ans = self.e1.kl_loss('Exponential', rate_b) + ... ans = self.e1.kl_loss('Exponential', rate_b, rate_a) + ... # An additional `rate` must be passed in. + ... ans = self.e2.kl_loss('Exponential', rate_b, rate_a) + ... + ... + ... # Examples of `sample`. + ... # Args: + ... # shape (tuple): the shape of the sample. Default: () + ... # probs1 (Tensor): the rate of the distribution. Default: self.rate. + ... ans = self.e1.sample() + ... ans = self.e1.sample((2,3)) + ... ans = self.e1.sample((2,3), rate_b) + ... ans = self.e2.sample((2,3), rate_a) + ... """ def __init__(self, diff --git a/mindspore/nn/probability/distribution/geometric.py b/mindspore/nn/probability/distribution/geometric.py index ec1e9c94e3..1f1677a0ae 100644 --- a/mindspore/nn/probability/distribution/geometric.py +++ b/mindspore/nn/probability/distribution/geometric.py @@ -53,62 +53,63 @@ class Geometric(Distribution): >>> >>> # To use a Geometric distribution in a network. >>> class net(Cell): - >>> def __init__(self): - >>> super(net, self).__init__(): - >>> self.g1 = msd.Geometric(0.5, dtype=mstype.int32) - >>> self.g2 = msd.Geometric(dtype=mstype.int32) - >>> - >>> # The following calls are valid in construct. - >>> def construct(self, value, probs_b, probs_a): - >>> - >>> # Private interfaces of probability functions corresponding to public interfaces, including - >>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows. - >>> # Args: - >>> # value (Tensor): the value to be evaluated. - >>> # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs. - >>> - >>> # Examples of `prob`. - >>> # Similar calls can be made to other probability functions - >>> # by replacing `prob` by the name of the function. - >>> ans = self.g1.prob(value) - >>> # Evaluate with respect to distribution b. - >>> ans = self.g1.prob(value, probs_b) - >>> # `probs` must be passed in during function calls. - >>> ans = self.g2.prob(value, probs_a) - >>> - >>> - >>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. - >>> # Args: - >>> # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs. - >>> - >>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar. - >>> ans = self.g1.mean() # return 1.0 - >>> ans = self.g1.mean(probs_b) - >>> # Probs must be passed in during function calls - >>> ans = self.g2.mean(probs_a) - >>> - >>> - >>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same. - >>> # Args: - >>> # dist (str): the name of the distribution. Only 'Geometric' is supported. - >>> # probs1_b (Tensor): the probability of success of a Bernoulli trail of distribution b. - >>> # probs1_a (Tensor): the probability of success of a Bernoulli trail of distribution a. Default: self.probs. - >>> - >>> # Examples of `kl_loss`. `cross_entropy` is similar. - >>> ans = self.g1.kl_loss('Geometric', probs_b) - >>> ans = self.g1.kl_loss('Geometric', probs_b, probs_a) - >>> # An additional `probs` must be passed in. - >>> ans = self.g2.kl_loss('Geometric', probs_b, probs_a) - >>> - >>> - >>> # Examples of `sample`. - >>> # Args: - >>> # shape (tuple): the shape of the sample. Default: () - >>> # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs. - >>> ans = self.g1.sample() - >>> ans = self.g1.sample((2,3)) - >>> ans = self.g1.sample((2,3), probs_b) - >>> ans = self.g2.sample((2,3), probs_a) + ... def __init__(self): + ... super(net, self).__init__(): + ... self.g1 = msd.Geometric(0.5, dtype=mstype.int32) + ... self.g2 = msd.Geometric(dtype=mstype.int32) + ... + ... # The following calls are valid in construct. + ... def construct(self, value, probs_b, probs_a): + ... + ... # Private interfaces of probability functions corresponding to public interfaces, including + ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows. + ... # Args: + ... # value (Tensor): the value to be evaluated. + ... # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs. + ... + ... # Examples of `prob`. + ... # Similar calls can be made to other probability functions + ... # by replacing `prob` by the name of the function. + ... ans = self.g1.prob(value) + ... # Evaluate with respect to distribution b. + ... ans = self.g1.prob(value, probs_b) + ... # `probs` must be passed in during function calls. + ... ans = self.g2.prob(value, probs_a) + ... + ... + ... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. + ... # Args: + ... # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs. + ... + ... # Examples of `mean`. `sd`, `var`, and `entropy` are similar. + ... ans = self.g1.mean() # return 1.0 + ... ans = self.g1.mean(probs_b) + ... # Probs must be passed in during function calls + ... ans = self.g2.mean(probs_a) + ... + ... + ... # Interfaces of 'kl_loss' and 'cross_entropy' are the same. + ... # Args: + ... # dist (str): the name of the distribution. Only 'Geometric' is supported. + ... # probs1_b (Tensor): the probability of success of a Bernoulli trail of distribution b. + ... # probs1_a (Tensor): the probability of success of a Bernoulli trail of distribution a. Default: self.probs. + ... + ... # Examples of `kl_loss`. `cross_entropy` is similar. + ... ans = self.g1.kl_loss('Geometric', probs_b) + ... ans = self.g1.kl_loss('Geometric', probs_b, probs_a) + ... # An additional `probs` must be passed in. + ... ans = self.g2.kl_loss('Geometric', probs_b, probs_a) + ... + ... + ... # Examples of `sample`. + ... # Args: + ... # shape (tuple): the shape of the sample. Default: () + ... # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs. + ... ans = self.g1.sample() + ... ans = self.g1.sample((2,3)) + ... ans = self.g1.sample((2,3), probs_b) + ... ans = self.g2.sample((2,3), probs_a) + ... """ def __init__(self, diff --git a/mindspore/nn/probability/distribution/gumbel.py b/mindspore/nn/probability/distribution/gumbel.py index 97f2b46f20..a8135f185c 100644 --- a/mindspore/nn/probability/distribution/gumbel.py +++ b/mindspore/nn/probability/distribution/gumbel.py @@ -50,47 +50,48 @@ class Gumbel(TransformedDistribution): >>> >>> # To use a Gumbel distribution in a network. >>> class net(Cell): - >>> def __init__(self): - >>> super(net, self).__init__(): - >>> self.g1 = msd.Gumbel(0.0, 1.0, dtype=mstype.float32) - >>> - >>> # The following calls are valid in construct. - >>> def construct(self, value, loc_b, scale_b): - >>> - >>> # Private interfaces of probability functions corresponding to public interfaces, including - >>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same - >>> # arguments as follows. - >>> # Args: - >>> # value (Tensor): the value to be evaluated. - >>> - >>> # Examples of `prob`. - >>> # Similar calls can be made to other probability functions - >>> # by replacing 'prob' by the name of the function. - >>> ans = self.g1.prob(value) - >>> - >>> # Functions `mean`, `mode`, sd`, `var`, and `entropy` do not take in any argument. - >>> ans = self.g1.mean() - >>> ans = self.g1.mode() - >>> ans = self.g1.sd() - >>> ans = self.g1.entropy() - >>> ans = self.g1.var() - >>> - >>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same: - >>> # Args: - >>> # dist (str): the type of the distributions. Only "Gumbel" is supported. - >>> # loc_b (Tensor): the loc of distribution b. - >>> # scale_b (Tensor): the scale distribution b. - >>> - >>> # Examples of `kl_loss`. `cross_entropy` is similar. - >>> ans = self.g1.kl_loss('Gumbel', loc_b, scale_b) - >>> ans = self.g1.cross_entropy('Gumbel', loc_b, scale_b) - >>> - >>> # Examples of `sample`. - >>> # Args: - >>> # shape (tuple): the shape of the sample. Default: () - >>> - >>> ans = self.g1.sample() - >>> ans = self.g1.sample((2,3)) + ... def __init__(self): + ... super(net, self).__init__(): + ... self.g1 = msd.Gumbel(0.0, 1.0, dtype=mstype.float32) + ... + ... # The following calls are valid in construct. + ... def construct(self, value, loc_b, scale_b): + ... + ... # Private interfaces of probability functions corresponding to public interfaces, including + ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same + ... # arguments as follows. + ... # Args: + ... # value (Tensor): the value to be evaluated. + ... + ... # Examples of `prob`. + ... # Similar calls can be made to other probability functions + ... # by replacing 'prob' by the name of the function. + ... ans = self.g1.prob(value) + ... + ... # Functions `mean`, `mode`, sd`, `var`, and `entropy` do not take in any argument. + ... ans = self.g1.mean() + ... ans = self.g1.mode() + ... ans = self.g1.sd() + ... ans = self.g1.entropy() + ... ans = self.g1.var() + ... + ... # Interfaces of 'kl_loss' and 'cross_entropy' are the same: + ... # Args: + ... # dist (str): the type of the distributions. Only "Gumbel" is supported. + ... # loc_b (Tensor): the loc of distribution b. + ... # scale_b (Tensor): the scale distribution b. + ... + ... # Examples of `kl_loss`. `cross_entropy` is similar. + ... ans = self.g1.kl_loss('Gumbel', loc_b, scale_b) + ... ans = self.g1.cross_entropy('Gumbel', loc_b, scale_b) + ... + ... # Examples of `sample`. + ... # Args: + ... # shape (tuple): the shape of the sample. Default: () + ... + ... ans = self.g1.sample() + ... ans = self.g1.sample((2,3)) + ... """ def __init__(self, diff --git a/mindspore/nn/probability/distribution/log_normal.py b/mindspore/nn/probability/distribution/log_normal.py index c82e79f75c..c6c9f518b9 100644 --- a/mindspore/nn/probability/distribution/log_normal.py +++ b/mindspore/nn/probability/distribution/log_normal.py @@ -53,75 +53,76 @@ class LogNormal(msd.TransformedDistribution): >>> >>> # To use a LogNormal distribution in a network. >>> class net(Cell): - >>> def __init__(self): - >>> super(net, self).__init__(): - >>> self.n1 = msd.LogNormal(0.0, 1.0, dtype=mstype.float32) - >>> self.n2 = msd.LogNormal(dtype=mstype.float32) - >>> - >>> # The following calls are valid in construct. - >>> def construct(self, value, loc_b, scale_b, loc_a, scale_a): - >>> - >>> # Private interfaces of probability functions corresponding to public interfaces, including - >>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same - >>> # arguments as follows. - >>> # Args: - >>> # value (Tensor): the value to be evaluated. - >>> # loc (Tensor): the loc of distribution. Default: None. If `loc` is passed in as None, - >>> # the mean of the underlying Normal distribution will be used. - >>> # scale (Tensor): the scale of distribution. Default: None. If `scale` is passed in as None, - >>> # the standard deviation of the underlying Normal distribution will be used. - >>> - >>> # Examples of `prob`. - >>> # Similar calls can be made to other probability functions - >>> # by replacing 'prob' by the name of the function. - >>> ans = self.n1.prob(value) - >>> # Evaluate with respect to distribution b. - >>> ans = self.n1.prob(value, loc_b, scale_b) - >>> # `loc` and `scale` must be passed in during function calls since they were not passed in construct. - >>> ans = self.n2.prob(value, loc_a, scale_a) - >>> - >>> - >>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. - >>> # Args: - >>> # loc (Tensor): the loc of distribution. Default: None. If `loc` is passed in as None, - >>> # the mean of the underlying Normal distribution will be used. - >>> # scale (Tensor): the scale of distribution. Default: None. If `scale` is passed in as None, - >>> # the standard deviation of the underlying Normal distribution will be used. - >>> - >>> # Example of `mean`. `sd`, `var`, and `entropy` are similar. - >>> ans = self.n1.mean() # return 0.0 - >>> ans = self.n1.mean(loc_b, scale_b) # return mean_b - >>> # `loc` and `scale` must be passed in during function calls since they were not passed in construct. - >>> ans = self.n2.mean(loc_a, scale_a) - >>> - >>> - >>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same: - >>> # Args: - >>> # dist (str): the type of the distributions. Only "Normal" is supported. - >>> # loc_b (Tensor): the loc of distribution b. - >>> # scale_b (Tensor): the scale distribution b. - >>> # loc_a (Tensor): the loc of distribution a. Default: None. If `loc` is passed in as None, - >>> # the mean of the underlying Normal distribution will be used. - >>> # scale_a (Tensor): the scale distribution a. Default: None. If `scale` is passed in as None, - >>> # the standard deviation of the underlying Normal distribution will be used. - >>> - >>> # Examples of `kl_loss`. `cross_entropy` is similar. - >>> ans = self.n1.kl_loss('Normal', loc_b, scale_b) - >>> ans = self.n1.kl_loss('Normal', loc_b, scale_b, loc_a, scale_a) - >>> # Additional `loc` and `scale` must be passed in since they were not passed in construct. - >>> ans = self.n2.kl_loss('Normal', loc_b, scale_b, loc_a, scale_a) - >>> - >>> # Examples of `sample`. - >>> # Args: - >>> # shape (tuple): the shape of the sample. Default: () - >>> # loc (Tensor): the loc of the distribution. Default: None. If `loc` is passed in as None, - >>> # the mean of the underlying Normal distribution will be used. - >>> # scale (Tensor): the scale of the distribution. Default: None. If `scale` is passed in as None, - >>> # the standard deviation of the underlying Normal distribution will be used. - >>> ans = self.n1.sample() - >>> ans = self.n1.sample((2,3)) - >>> ans = self.n1.sample((2,3), loc_b, scale_b) - >>> ans = self.n2.sample((2,3), loc_a, scale_a) + ... def __init__(self): + ... super(net, self).__init__(): + ... self.n1 = msd.LogNormal(0.0, 1.0, dtype=mstype.float32) + ... self.n2 = msd.LogNormal(dtype=mstype.float32) + ... + ... # The following calls are valid in construct. + ... def construct(self, value, loc_b, scale_b, loc_a, scale_a): + ... + ... # Private interfaces of probability functions corresponding to public interfaces, including + ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same + ... # arguments as follows. + ... # Args: + ... # value (Tensor): the value to be evaluated. + ... # loc (Tensor): the loc of distribution. Default: None. If `loc` is passed in as None, + ... # the mean of the underlying Normal distribution will be used. + ... # scale (Tensor): the scale of distribution. Default: None. If `scale` is passed in as None, + ... # the standard deviation of the underlying Normal distribution will be used. + ... + ... # Examples of `prob`. + ... # Similar calls can be made to other probability functions + ... # by replacing 'prob' by the name of the function. + ... ans = self.n1.prob(value) + ... # Evaluate with respect to distribution b. + ... ans = self.n1.prob(value, loc_b, scale_b) + ... # `loc` and `scale` must be passed in during function calls since they were not passed in construct. + ... ans = self.n2.prob(value, loc_a, scale_a) + ... + ... + ... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. + ... # Args: + ... # loc (Tensor): the loc of distribution. Default: None. If `loc` is passed in as None, + ... # the mean of the underlying Normal distribution will be used. + ... # scale (Tensor): the scale of distribution. Default: None. If `scale` is passed in as None, + ... # the standard deviation of the underlying Normal distribution will be used. + ... + ... # Example of `mean`. `sd`, `var`, and `entropy` are similar. + ... ans = self.n1.mean() # return 0.0 + ... ans = self.n1.mean(loc_b, scale_b) # return mean_b + ... # `loc` and `scale` must be passed in during function calls since they were not passed in construct. + ... ans = self.n2.mean(loc_a, scale_a) + ... + ... + ... # Interfaces of 'kl_loss' and 'cross_entropy' are the same: + ... # Args: + ... # dist (str): the type of the distributions. Only "Normal" is supported. + ... # loc_b (Tensor): the loc of distribution b. + ... # scale_b (Tensor): the scale distribution b. + ... # loc_a (Tensor): the loc of distribution a. Default: None. If `loc` is passed in as None, + ... # the mean of the underlying Normal distribution will be used. + ... # scale_a (Tensor): the scale distribution a. Default: None. If `scale` is passed in as None, + ... # the standard deviation of the underlying Normal distribution will be used. + ... + ... # Examples of `kl_loss`. `cross_entropy` is similar. + ... ans = self.n1.kl_loss('Normal', loc_b, scale_b) + ... ans = self.n1.kl_loss('Normal', loc_b, scale_b, loc_a, scale_a) + ... # Additional `loc` and `scale` must be passed in since they were not passed in construct. + ... ans = self.n2.kl_loss('Normal', loc_b, scale_b, loc_a, scale_a) + ... + ... # Examples of `sample`. + ... # Args: + ... # shape (tuple): the shape of the sample. Default: () + ... # loc (Tensor): the loc of the distribution. Default: None. If `loc` is passed in as None, + ... # the mean of the underlying Normal distribution will be used. + ... # scale (Tensor): the scale of the distribution. Default: None. If `scale` is passed in as None, + ... # the standard deviation of the underlying Normal distribution will be used. + ... ans = self.n1.sample() + ... ans = self.n1.sample((2,3)) + ... ans = self.n1.sample((2,3), loc_b, scale_b) + ... ans = self.n2.sample((2,3), loc_a, scale_a) + ... """ def __init__(self, diff --git a/mindspore/nn/probability/distribution/logistic.py b/mindspore/nn/probability/distribution/logistic.py index 1033f4de95..3fb4232e60 100644 --- a/mindspore/nn/probability/distribution/logistic.py +++ b/mindspore/nn/probability/distribution/logistic.py @@ -53,50 +53,51 @@ class Logistic(Distribution): >>> >>> # To use a Normal distribution in a network. >>> class net(Cell): - >>> def __init__(self): - >>> super(net, self).__init__(): - >>> self.l1 = msd.Logistic(0.0, 1.0, dtype=mstype.float32) - >>> self.l2 = msd.Logistic(dtype=mstype.float32) - >>> - >>> # The following calls are valid in construct. - >>> def construct(self, value, loc_b, scale_b, loc_a, scale_a): - >>> - >>> # Private interfaces of probability functions corresponding to public interfaces, including - >>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows. - >>> # Args: - >>> # value (Tensor): the value to be evaluated. - >>> # loc (Tensor): the location of the distribution. Default: self.loc. - >>> # scale (Tensor): the scale of the distribution. Default: self.scale. - >>> - >>> # Examples of `prob`. - >>> # Similar calls can be made to other probability functions - >>> # by replacing 'prob' by the name of the function - >>> ans = self.l1.prob(value) - >>> # Evaluate with respect to distribution b. - >>> ans = self.l1.prob(value, loc_b, scale_b) - >>> # `loc` and `scale` must be passed in during function calls - >>> ans = self.l2.prob(value, loc_a, scale_a) - >>> - >>> # Functions `mean`, `mode`, `sd`, `var`, and `entropy` have the same arguments. - >>> # Args: - >>> # loc (Tensor): the location of the distribution. Default: self.loc. - >>> # scale (Tensor): the scale of the distribution. Default: self.scale. - >>> - >>> # Example of `mean`. `mode`, `sd`, `var`, and `entropy` are similar. - >>> ans = self.l1.mean() # return 0.0 - >>> ans = self.l1.mean(loc_b, scale_b) # return loc_b - >>> # `loc` and `scale` must be passed in during function calls. - >>> ans = self.l2.mean(loc_a, scale_a) - >>> - >>> # Examples of `sample`. - >>> # Args: - >>> # shape (tuple): the shape of the sample. Default: () - >>> # loc (Tensor): the location of the distribution. Default: self.loc. - >>> # scale (Tensor): the scale of the distribution. Default: self.scale. - >>> ans = self.l1.sample() - >>> ans = self.l1.sample((2,3)) - >>> ans = self.l1.sample((2,3), loc_b, scale_b) - >>> ans = self.l2.sample((2,3), loc_a, scale_a) + ... def __init__(self): + ... super(net, self).__init__(): + ... self.l1 = msd.Logistic(0.0, 1.0, dtype=mstype.float32) + ... self.l2 = msd.Logistic(dtype=mstype.float32) + ... + ... # The following calls are valid in construct. + ... def construct(self, value, loc_b, scale_b, loc_a, scale_a): + ... + ... # Private interfaces of probability functions corresponding to public interfaces, including + ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows. + ... # Args: + ... # value (Tensor): the value to be evaluated. + ... # loc (Tensor): the location of the distribution. Default: self.loc. + ... # scale (Tensor): the scale of the distribution. Default: self.scale. + ... + ... # Examples of `prob`. + ... # Similar calls can be made to other probability functions + ... # by replacing 'prob' by the name of the function + ... ans = self.l1.prob(value) + ... # Evaluate with respect to distribution b. + ... ans = self.l1.prob(value, loc_b, scale_b) + ... # `loc` and `scale` must be passed in during function calls + ... ans = self.l2.prob(value, loc_a, scale_a) + ... + ... # Functions `mean`, `mode`, `sd`, `var`, and `entropy` have the same arguments. + ... # Args: + ... # loc (Tensor): the location of the distribution. Default: self.loc. + ... # scale (Tensor): the scale of the distribution. Default: self.scale. + ... + ... # Example of `mean`. `mode`, `sd`, `var`, and `entropy` are similar. + ... ans = self.l1.mean() # return 0.0 + ... ans = self.l1.mean(loc_b, scale_b) # return loc_b + ... # `loc` and `scale` must be passed in during function calls. + ... ans = self.l2.mean(loc_a, scale_a) + ... + ... # Examples of `sample`. + ... # Args: + ... # shape (tuple): the shape of the sample. Default: () + ... # loc (Tensor): the location of the distribution. Default: self.loc. + ... # scale (Tensor): the scale of the distribution. Default: self.scale. + ... ans = self.l1.sample() + ... ans = self.l1.sample((2,3)) + ... ans = self.l1.sample((2,3), loc_b, scale_b) + ... ans = self.l2.sample((2,3), loc_a, scale_a) + ... """ def __init__(self, diff --git a/mindspore/nn/probability/distribution/normal.py b/mindspore/nn/probability/distribution/normal.py index 6a4949084e..f37aeacde4 100644 --- a/mindspore/nn/probability/distribution/normal.py +++ b/mindspore/nn/probability/distribution/normal.py @@ -53,66 +53,67 @@ class Normal(Distribution): >>> >>> # To use a Normal distribution in a network. >>> class net(Cell): - >>> def __init__(self): - >>> super(net, self).__init__(): - >>> self.n1 = msd.Nomral(0.0, 1.0, dtype=mstype.float32) - >>> self.n2 = msd.Normal(dtype=mstype.float32) - >>> - >>> # The following calls are valid in construct. - >>> def construct(self, value, mean_b, sd_b, mean_a, sd_a): - >>> - >>> # Private interfaces of probability functions corresponding to public interfaces, including - >>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows. - >>> # Args: - >>> # value (Tensor): the value to be evaluated. - >>> # mean (Tensor): the mean of distribution. Default: self._mean_value. - >>> # sd (Tensor): the standard deviation of distribution. Default: self._sd_value. - >>> - >>> # Examples of `prob`. - >>> # Similar calls can be made to other probability functions - >>> # by replacing 'prob' by the name of the function - >>> ans = self.n1.prob(value) - >>> # Evaluate with respect to distribution b. - >>> ans = self.n1.prob(value, mean_b, sd_b) - >>> # `mean` and `sd` must be passed in during function calls - >>> ans = self.n2.prob(value, mean_a, sd_a) - >>> - >>> - >>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. - >>> # Args: - >>> # mean (Tensor): the mean of distribution. Default: self._mean_value. - >>> # sd (Tensor): the standard deviation of distribution. Default: self._sd_value. - >>> - >>> # Example of `mean`. `sd`, `var`, and `entropy` are similar. - >>> ans = self.n1.mean() # return 0.0 - >>> ans = self.n1.mean(mean_b, sd_b) # return mean_b - >>> # `mean` and `sd` must be passed in during function calls. - >>> ans = self.n2.mean(mean_a, sd_a) - >>> - >>> - >>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same: - >>> # Args: - >>> # dist (str): the type of the distributions. Only "Normal" is supported. - >>> # mean_b (Tensor): the mean of distribution b. - >>> # sd_b (Tensor): the standard deviation distribution b. - >>> # mean_a (Tensor): the mean of distribution a. Default: self._mean_value. - >>> # sd_a (Tensor): the standard deviation distribution a. Default: self._sd_value. - >>> - >>> # Examples of `kl_loss`. `cross_entropy` is similar. - >>> ans = self.n1.kl_loss('Normal', mean_b, sd_b) - >>> ans = self.n1.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a) - >>> # Additional `mean` and `sd` must be passed in. - >>> ans = self.n2.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a) - >>> - >>> # Examples of `sample`. - >>> # Args: - >>> # shape (tuple): the shape of the sample. Default: () - >>> # mean (Tensor): the mean of the distribution. Default: self._mean_value. - >>> # sd (Tensor): the standard deviation of the distribution. Default: self._sd_value. - >>> ans = self.n1.sample() - >>> ans = self.n1.sample((2,3)) - >>> ans = self.n1.sample((2,3), mean_b, sd_b) - >>> ans = self.n2.sample((2,3), mean_a, sd_a) + ... def __init__(self): + ... super(net, self).__init__(): + ... self.n1 = msd.Nomral(0.0, 1.0, dtype=mstype.float32) + ... self.n2 = msd.Normal(dtype=mstype.float32) + ... + ... # The following calls are valid in construct. + ... def construct(self, value, mean_b, sd_b, mean_a, sd_a): + ... + ... # Private interfaces of probability functions corresponding to public interfaces, including + ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows. + ... # Args: + ... # value (Tensor): the value to be evaluated. + ... # mean (Tensor): the mean of distribution. Default: self._mean_value. + ... # sd (Tensor): the standard deviation of distribution. Default: self._sd_value. + ... + ... # Examples of `prob`. + ... # Similar calls can be made to other probability functions + ... # by replacing 'prob' by the name of the function + ... ans = self.n1.prob(value) + ... # Evaluate with respect to distribution b. + ... ans = self.n1.prob(value, mean_b, sd_b) + ... # `mean` and `sd` must be passed in during function calls + ... ans = self.n2.prob(value, mean_a, sd_a) + ... + ... + ... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. + ... # Args: + ... # mean (Tensor): the mean of distribution. Default: self._mean_value. + ... # sd (Tensor): the standard deviation of distribution. Default: self._sd_value. + ... + ... # Example of `mean`. `sd`, `var`, and `entropy` are similar. + ... ans = self.n1.mean() # return 0.0 + ... ans = self.n1.mean(mean_b, sd_b) # return mean_b + ... # `mean` and `sd` must be passed in during function calls. + ... ans = self.n2.mean(mean_a, sd_a) + ... + ... + ... # Interfaces of 'kl_loss' and 'cross_entropy' are the same: + ... # Args: + ... # dist (str): the type of the distributions. Only "Normal" is supported. + ... # mean_b (Tensor): the mean of distribution b. + ... # sd_b (Tensor): the standard deviation distribution b. + ... # mean_a (Tensor): the mean of distribution a. Default: self._mean_value. + ... # sd_a (Tensor): the standard deviation distribution a. Default: self._sd_value. + ... + ... # Examples of `kl_loss`. `cross_entropy` is similar. + ... ans = self.n1.kl_loss('Normal', mean_b, sd_b) + ... ans = self.n1.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a) + ... # Additional `mean` and `sd` must be passed in. + ... ans = self.n2.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a) + ... + ... # Examples of `sample`. + ... # Args: + ... # shape (tuple): the shape of the sample. Default: () + ... # mean (Tensor): the mean of the distribution. Default: self._mean_value. + ... # sd (Tensor): the standard deviation of the distribution. Default: self._sd_value. + ... ans = self.n1.sample() + ... ans = self.n1.sample((2,3)) + ... ans = self.n1.sample((2,3), mean_b, sd_b) + ... ans = self.n2.sample((2,3), mean_a, sd_a) + ... """ def __init__(self, diff --git a/mindspore/nn/probability/distribution/transformed_distribution.py b/mindspore/nn/probability/distribution/transformed_distribution.py index 927420291c..6602f12758 100644 --- a/mindspore/nn/probability/distribution/transformed_distribution.py +++ b/mindspore/nn/probability/distribution/transformed_distribution.py @@ -54,19 +54,20 @@ class TransformedDistribution(Distribution): >>> import mindspore.nn.probability.distribution as msd >>> import mindspore.nn.probability.bijector as msb >>> ln = msd.TransformedDistribution(msb.Exp(), - >>> msd.Normal(0.0, 1.0, dtype=mstype.float32)) - >>> + ... msd.Normal(0.0, 1.0, dtype=mstype.float32)) + ... >>> # To use a transformed distribution in a network. >>> class net(Cell): - >>> def __init__(self): - >>> super(net, self).__init__(): - >>> self.ln = msd.TransformedDistribution(msb.Exp(), - >>> msd.Normal(0.0, 1.0, dtype=mstype.float32)) - >>> - >>> def construct(self, value): - >>> # Similar calls can be made to other functions - >>> # by replacing 'sample' by the name of the function. - >>> ans = self.ln.sample(shape=(2, 3)) + ... def __init__(self): + ... super(net, self).__init__(): + ... self.ln = msd.TransformedDistribution(msb.Exp(), + ... msd.Normal(0.0, 1.0, dtype=mstype.float32)) + ... + ... def construct(self, value): + ... # Similar calls can be made to other functions + ... # by replacing 'sample' by the name of the function. + ... ans = self.ln.sample(shape=(2, 3)) + ... """ def __init__(self, diff --git a/mindspore/nn/probability/distribution/uniform.py b/mindspore/nn/probability/distribution/uniform.py index 3759349ec1..e94deeac3e 100644 --- a/mindspore/nn/probability/distribution/uniform.py +++ b/mindspore/nn/probability/distribution/uniform.py @@ -52,66 +52,67 @@ class Uniform(Distribution): >>> >>> # To use a Uniform distribution in a network. >>> class net(Cell): - >>> def __init__(self) - >>> super(net, self).__init__(): - >>> self.u1 = msd.Uniform(0.0, 1.0, dtype=mstype.float32) - >>> self.u2 = msd.Uniform(dtype=mstype.float32) - >>> - >>> # All the following calls in construct are valid. - >>> def construct(self, value, low_b, high_b, low_a, high_a): - >>> - >>> # Private interfaces of probability functions corresponding to public interfaces, including - >>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments. - >>> # Args: - >>> # value (Tensor): the value to be evaluated. - >>> # low (Tensor): the lower bound of distribution. Default: self.low. - >>> # high (Tensor): the higher bound of distribution. Default: self.high. - >>> - >>> # Examples of `prob`. - >>> # Similar calls can be made to other probability functions - >>> # by replacing 'prob' by the name of the function. - >>> ans = self.u1.prob(value) - >>> # Evaluate with respect to distribution b. - >>> ans = self.u1.prob(value, low_b, high_b) - >>> # `high` and `low` must be passed in during function calls. - >>> ans = self.u2.prob(value, low_a, high_a) - >>> - >>> - >>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. - >>> # Args: - >>> # low (Tensor): the lower bound of distribution. Default: self.low. - >>> # high (Tensor): the higher bound of distribution. Default: self.high. - >>> - >>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar. - >>> ans = self.u1.mean() # return 0.5 - >>> ans = self.u1.mean(low_b, high_b) # return (low_b + high_b) / 2 - >>> # `high` and `low` must be passed in during function calls. - >>> ans = self.u2.mean(low_a, high_a) - >>> - >>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same. - >>> # Args: - >>> # dist (str): the type of the distributions. Should be "Uniform" in this case. - >>> # low_b (Tensor): the lower bound of distribution b. - >>> # high_b (Tensor): the upper bound of distribution b. - >>> # low_a (Tensor): the lower bound of distribution a. Default: self.low. - >>> # high_a (Tensor): the upper bound of distribution a. Default: self.high. - >>> - >>> # Examples of `kl_loss`. `cross_entropy` is similar. - >>> ans = self.u1.kl_loss('Uniform', low_b, high_b) - >>> ans = self.u1.kl_loss('Uniform', low_b, high_b, low_a, high_a) - >>> # Additional `high` and `low` must be passed in. - >>> ans = self.u2.kl_loss('Uniform', low_b, high_b, low_a, high_a) - >>> - >>> - >>> # Examples of `sample`. - >>> # Args: - >>> # shape (tuple): the shape of the sample. Default: () - >>> # low (Tensor): the lower bound of the distribution. Default: self.low. - >>> # high (Tensor): the upper bound of the distribution. Default: self.high. - >>> ans = self.u1.sample() - >>> ans = self.u1.sample((2,3)) - >>> ans = self.u1.sample((2,3), low_b, high_b) - >>> ans = self.u2.sample((2,3), low_a, high_a) + ... def __init__(self) + ... super(net, self).__init__(): + ... self.u1 = msd.Uniform(0.0, 1.0, dtype=mstype.float32) + ... self.u2 = msd.Uniform(dtype=mstype.float32) + ... + ... # All the following calls in construct are valid. + ... def construct(self, value, low_b, high_b, low_a, high_a): + ... + ... # Private interfaces of probability functions corresponding to public interfaces, including + ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments. + ... # Args: + ... # value (Tensor): the value to be evaluated. + ... # low (Tensor): the lower bound of distribution. Default: self.low. + ... # high (Tensor): the higher bound of distribution. Default: self.high. + ... + ... # Examples of `prob`. + ... # Similar calls can be made to other probability functions + ... # by replacing 'prob' by the name of the function. + ... ans = self.u1.prob(value) + ... # Evaluate with respect to distribution b. + ... ans = self.u1.prob(value, low_b, high_b) + ... # `high` and `low` must be passed in during function calls. + ... ans = self.u2.prob(value, low_a, high_a) + ... + ... + ... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. + ... # Args: + ... # low (Tensor): the lower bound of distribution. Default: self.low. + ... # high (Tensor): the higher bound of distribution. Default: self.high. + ... + ... # Examples of `mean`. `sd`, `var`, and `entropy` are similar. + ... ans = self.u1.mean() # return 0.5 + ... ans = self.u1.mean(low_b, high_b) # return (low_b + high_b) / 2 + ... # `high` and `low` must be passed in during function calls. + ... ans = self.u2.mean(low_a, high_a) + ... + ... # Interfaces of 'kl_loss' and 'cross_entropy' are the same. + ... # Args: + ... # dist (str): the type of the distributions. Should be "Uniform" in this case. + ... # low_b (Tensor): the lower bound of distribution b. + ... # high_b (Tensor): the upper bound of distribution b. + ... # low_a (Tensor): the lower bound of distribution a. Default: self.low. + ... # high_a (Tensor): the upper bound of distribution a. Default: self.high. + ... + ... # Examples of `kl_loss`. `cross_entropy` is similar. + ... ans = self.u1.kl_loss('Uniform', low_b, high_b) + ... ans = self.u1.kl_loss('Uniform', low_b, high_b, low_a, high_a) + ... # Additional `high` and `low` must be passed in. + ... ans = self.u2.kl_loss('Uniform', low_b, high_b, low_a, high_a) + ... + ... + ... # Examples of `sample`. + ... # Args: + ... # shape (tuple): the shape of the sample. Default: () + ... # low (Tensor): the lower bound of the distribution. Default: self.low. + ... # high (Tensor): the upper bound of the distribution. Default: self.high. + ... ans = self.u1.sample() + ... ans = self.u1.sample((2,3)) + ... ans = self.u1.sample((2,3), low_b, high_b) + ... ans = self.u2.sample((2,3), low_a, high_a) + ... """ def __init__(self, diff --git a/mindspore/nn/sparse/sparse.py b/mindspore/nn/sparse/sparse.py index 2b9b5fa686..ce50905cf8 100644 --- a/mindspore/nn/sparse/sparse.py +++ b/mindspore/nn/sparse/sparse.py @@ -31,14 +31,14 @@ class SparseToDense(Cell): Examples: >>> class SparseToDenseCell(nn.Cell): - >>> def __init__(self, dense_shape): - >>> super(SparseToDenseCell, self).__init__() - >>> self.dense_shape = dense_shape - >>> self.sparse_to_dense = nn.SparseToDense() - >>> def construct(self, indices, values): - >>> sparse = SparseTensor(indices, values, self.dense_shape) - >>> return self.sparse_to_dense(sparse) - >>> + ... def __init__(self, dense_shape): + ... super(SparseToDenseCell, self).__init__() + ... self.dense_shape = dense_shape + ... self.sparse_to_dense = nn.SparseToDense() + ... def construct(self, indices, values): + ... sparse = SparseTensor(indices, values, self.dense_shape) + ... return self.sparse_to_dense(sparse) + ... >>> indices = Tensor([[0, 1], [1, 2]]) >>> values = Tensor([1, 2], dtype=ms.float32) >>> dense_shape = (3, 4) diff --git a/mindspore/ops/operations/_quant_ops.py b/mindspore/ops/operations/_quant_ops.py index 5e2b870837..70b591d77c 100644 --- a/mindspore/ops/operations/_quant_ops.py +++ b/mindspore/ops/operations/_quant_ops.py @@ -1417,13 +1417,15 @@ class IFMR(PrimitiveWithInfer): Examples: >>> data = Tensor(np.random.rand(1, 3, 6, 4).astype(np.float32)) - >>> data_min = Tensor([0.1], mstype.float32) - >>> data_max = Tensor([0.5], mstype.float32) + >>> data_min = Tensor([0.1], mindspore.float32) + >>> data_max = Tensor([0.5], mindspore.float32) >>> cumsum = Tensor(np.random.rand(4).astype(np.int32)) >>> ifmr = Q.IFMR(min_percentile=0.2, max_percentile=0.9, search_range=(1.0, 2.0), - >>> search_step=1.0, with_offset=False) + ... search_step=1.0, with_offset=False) >>> output = ifmr(data, data_min, data_max, cumsum) - ([7.87401572e-03], [0.00000000e+00]) + >>> print(output) + (Tensor(shape=[1], dtype=Float32, value= [7.87401572e-03]), + Tensor(shape=[1], dtype=Float32, value= [0.00000000e+00])) """ @prim_attr_register diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index 386cf855c5..4a871f6fc7 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -148,8 +148,8 @@ class ExpandDims(PrimitiveWithInfer): >>> expand_dims = P.ExpandDims() >>> output = expand_dims(input_tensor, 0) >>> print(output) - [[[2.0, 2.0], - [2.0, 2.0]]] + [[[2. 2.] + [2. 2.]]] """ @prim_attr_register @@ -230,8 +230,8 @@ class SameTypeShape(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32) >>> input_y = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32) - >>> out = P.SameTypeShape()(input_x, input_y) - >>> print(out) + >>> output = P.SameTypeShape()(input_x, input_y) + >>> print(output) [[2. 2.] [2. 2.]] """ @@ -342,8 +342,8 @@ class IsSubClass(PrimitiveWithInfer): bool, the check result. Examples: - >>> result = P.IsSubClass()(mindspore.int32, mindspore.intc) - >>> print(result) + >>> output = P.IsSubClass()(mindspore.int32, mindspore.intc) + >>> print(output) True """ @@ -379,9 +379,9 @@ class IsInstance(PrimitiveWithInfer): Examples: >>> a = 1 - >>> result = P.IsInstance()(a, mindspore.int64) - >>> print(result) - True + >>> output = P.IsInstance()(a, mindspore.int32) + >>> print(output) + False """ @prim_attr_register @@ -429,9 +429,9 @@ class Reshape(PrimitiveWithInfer): >>> reshape = P.Reshape() >>> output = reshape(input_tensor, (3, 2)) >>> print(output) - [[-0.1 0.3] - [3.6 0.4 ] - [0.5 -3.2]] + [[-0.1 0.3] + [ 3.6 0.4] + [ 0.5 -3.2]] """ @prim_attr_register @@ -632,12 +632,12 @@ class Transpose(PrimitiveWithCheck): >>> transpose = P.Transpose() >>> output = transpose(input_tensor, perm) >>> print(output) - [[[1. 4.] - [2. 5.] - [3. 6.]] - [[7. 10.] - [8. 11.] - [9. 12.]]] + [[[ 1. 4.] + [ 2. 5.] + [ 3. 6.]] + [[ 7. 10.] + [ 8. 11.] + [ 9. 12.]]] """ @prim_attr_register @@ -668,8 +668,9 @@ class Unique(Primitive): Examples: >>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32) - >>> out = P.Unique()(x) - (Tensor([1, 2, 5], mindspore.int32), Tensor([0, 1, 2, 1], mindspore.int32)) + >>> output = P.Unique()(x) + >>> print(output) + (Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 1])) """ @prim_attr_register @@ -696,11 +697,11 @@ class GatherV2(PrimitiveWithCheck): >>> input_params = Tensor(np.array([[1, 2, 7, 42], [3, 4, 54, 22], [2, 2, 55, 3]]), mindspore.float32) >>> input_indices = Tensor(np.array([1, 2]), mindspore.int32) >>> axis = 1 - >>> out = P.GatherV2()(input_params, input_indices, axis) - >>> print(out) - [[2.0, 7.0], - [4.0, 54.0], - [2.0, 55.0]] + >>> output = P.GatherV2()(input_params, input_indices, axis) + >>> print(output) + [[ 2. 7.] + [ 4. 54.] + [ 2. 55.]] """ @prim_attr_register @@ -770,9 +771,10 @@ class Padding(PrimitiveWithInfer): Examples: >>> x = Tensor(np.array([[8], [10]]), mindspore.float32) >>> pad_dim_size = 4 - >>> out = P.Padding(pad_dim_size)(x) - >>> print(out) - [[8, 0, 0, 0], [10, 0, 0, 0]] + >>> output = P.Padding(pad_dim_size)(x) + >>> print(output) + [[ 8. 0. 0. 0.] + [10. 0. 0. 0.]] """ @prim_attr_register @@ -811,9 +813,10 @@ class UniqueWithPad(PrimitiveWithInfer): Examples: >>> x = Tensor(np.array([1, 1, 5, 5, 4, 4, 3, 3, 2, 2,]), mindspore.int32) >>> pad_num = 8 - >>> out = P.UniqueWithPad()(x, pad_num) - >>> print(out) - ([1, 5, 4, 3, 2, 8, 8, 8, 8, 8], [0, 0, 1, 1, 2, 2, 3, 3, 4, 4]) + >>> output = P.UniqueWithPad()(x, pad_num) + >>> print(output) + (Tensor(shape=[10], dtype=Int32, value= [1, 5, 4, 3, 2, 8, 8, 8, 8, 8]), + Tensor(shape=[10], dtype=Int32, value= [0, 0, 1, 1, 2, 2, 3, 3, 4, 4])) """ @prim_attr_register @@ -854,13 +857,14 @@ class Split(PrimitiveWithInfer): Examples: >>> split = P.Split(1, 2) - >>> x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]])) + >>> x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]), mindspore.int32) >>> output = split(x) >>> print(output) - ([[1, 1], - [2, 2]], - [[1, 1], - [2, 2]]) + (Tensor(shape=[2, 2], dtype=Int32, value= + [[1, 1], + [2, 2]]), Tensor(shape=[2, 2], dtype=Int32, value= + [[1, 1], + [2, 2]])) """ @prim_attr_register @@ -1025,8 +1029,8 @@ class Fill(PrimitiveWithInfer): >>> fill = P.Fill() >>> output = fill(mindspore.float32, (2, 2), 1) >>> print(output) - [[1.0, 1.0], - [1.0, 1.0]] + [[1. 1.] + [1. 1.]] """ @prim_attr_register @@ -1156,8 +1160,8 @@ class OnesLike(PrimitiveWithInfer): >>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32)) >>> output = oneslike(x) >>> print(output) - [[1, 1], - [1, 1]] + [[1 1] + [1 1]] """ @prim_attr_register @@ -1189,8 +1193,8 @@ class ZerosLike(PrimitiveWithCheck): >>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32)) >>> output = zeroslike(x) >>> print(output) - [[0.0, 0.0], - [0.0, 0.0]] + [[0. 0.] + [0. 0.]] """ @prim_attr_register @@ -1338,7 +1342,8 @@ class InvertPermutation(PrimitiveWithInfer): >>> invert = P.InvertPermutation() >>> input_data = (3, 4, 0, 2, 1) >>> output = invert(input_data) - >>> output == (2, 4, 3, 0, 1) + >>> print(output) + (2, 4, 3, 0, 1) """ @prim_attr_register @@ -1400,8 +1405,8 @@ class Argmax(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([2.0, 3.1, 1.2]), mindspore.float32) - >>> index = P.Argmax(output_type=mindspore.int32)(input_x) - >>> print(index) + >>> output = P.Argmax(output_type=mindspore.int32)(input_x) + >>> print(output) 1 """ @@ -1559,9 +1564,9 @@ class ArgMinWithValue(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.random.rand(5), mindspore.float32) - >>> index, output = P.ArgMinWithValue()(input_x) - >>> print((index, output)) - 0 0.0496291 + >>> output = P.ArgMinWithValue()(input_x) + >>> print(output) + (Tensor(shape=[], dtype=Int32, value= 2), Tensor(shape=[], dtype=Float32, value= 0.0595638)) """ @prim_attr_register @@ -1616,8 +1621,8 @@ class Tile(PrimitiveWithInfer): >>> tile = P.Tile() >>> input_x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.float32) >>> multiples = (2, 3) - >>> result = tile(input_x, multiples) - >>> print(result) + >>> output = tile(input_x, multiples) + >>> print(output) [[1. 2. 1. 2. 1. 2.] [3. 4. 3. 4. 3. 4.] [1. 2. 1. 2. 1. 2.] @@ -1693,7 +1698,7 @@ class UnsortedSegmentSum(PrimitiveWithInfer): >>> num_segments = 4 >>> output = P.UnsortedSegmentSum()(input_x, segment_ids, num_segments) >>> print(output) - [3, 3, 4, 0] + [3. 3. 4. 0.] """ @prim_attr_register @@ -1767,8 +1772,10 @@ class UnsortedSegmentMin(PrimitiveWithInfer): >>> segment_ids = Tensor(np.array([0, 1, 1]).astype(np.int32)) >>> num_segments = 2 >>> unsorted_segment_min = P.UnsortedSegmentMin() - >>> unsorted_segment_min(input_x, segment_ids, num_segments) - [[1., 2., 3.], [4., 2., 1.]] + >>> output = unsorted_segment_min(input_x, segment_ids, num_segments) + >>> print(output) + [[1. 2. 3.] + [4. 2. 1.]] """ @prim_attr_register @@ -1821,8 +1828,10 @@ class UnsortedSegmentMax(PrimitiveWithInfer): >>> segment_ids = Tensor(np.array([0, 1, 1]).astype(np.int32)) >>> num_segments = 2 >>> unsorted_segment_max = P.UnsortedSegmentMax() - >>> unsorted_segment_max(input_x, segment_ids, num_segments) - [[1., 2., 3.], [4., 5., 6.]] + >>> output = unsorted_segment_max(input_x, segment_ids, num_segments) + >>> print(output) + [[1. 2. 3.] + [4. 5. 6.]] """ @prim_attr_register @@ -1872,8 +1881,10 @@ class UnsortedSegmentProd(PrimitiveWithInfer): >>> segment_ids = Tensor(np.array([0, 1, 0]).astype(np.int32)) >>> num_segments = 2 >>> unsorted_segment_prod = P.UnsortedSegmentProd() - >>> unsorted_segment_prod(input_x, segment_ids, num_segments) - [[4., 4., 3.], [4., 5., 6.]] + >>> output = unsorted_segment_prod(input_x, segment_ids, num_segments) + >>> print(output) + [[4. 4. 3.] + [4. 5. 6.]] """ @prim_attr_register @@ -1935,10 +1946,10 @@ class Concat(PrimitiveWithInfer): >>> op = P.Concat() >>> output = op((data1, data2)) >>> print(output) - [[0, 1], - [2, 1], - [0, 1], - [2, 1]] + [[0 1] + [2 1] + [0 1] + [2 1]] """ @prim_attr_register @@ -1983,7 +1994,8 @@ class ParallelConcat(PrimitiveWithInfer): >>> op = P.ParallelConcat() >>> output = op((data1, data2)) >>> print(output) - [[0, 1], [2, 1]] + [[0 1] + [2 1]] """ @prim_attr_register @@ -2066,7 +2078,8 @@ class Pack(PrimitiveWithInfer): >>> pack = P.Pack() >>> output = pack([data1, data2]) >>> print(output) - [[0, 1], [2, 3]] + [[0. 1.] + [2. 3.]] """ @prim_attr_register @@ -2116,7 +2129,8 @@ class Unpack(PrimitiveWithInfer): >>> input_x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]])) >>> output = unpack(input_x) >>> print(output) - ([1, 1, 1, 1], [2, 2, 2, 2]) + (Tensor(shape=[4], dtype=Int32, value= [1, 1, 1, 1]), + Tensor(shape=[4], dtype=Int32, value= [2, 2, 2, 2])) """ @prim_attr_register @@ -2169,8 +2183,9 @@ class Slice(PrimitiveWithInfer): >>> data = Tensor(np.array([[[1, 1, 1], [2, 2, 2]], ... [[3, 3, 3], [4, 4, 4]], ... [[5, 5, 5], [6, 6, 6]]]).astype(np.int32)) - >>> type = P.Slice()(data, (1, 0, 0), (1, 1, 3)) - >>> print(type) + >>> slice = P.Slice() + >>> output = slice(data, (1, 0, 0), (1, 1, 3)) + >>> print(output) [[[3 3 3]]] """ @@ -2223,7 +2238,8 @@ class ReverseV2(PrimitiveWithInfer): >>> op = P.ReverseV2(axis=[1]) >>> output = op(input_x) >>> print(output) - [[4, 3, 2, 1], [8, 7, 6, 5]] + [[4 3 2 1] + [8 7 6 5]] """ @prim_attr_register @@ -2261,7 +2277,7 @@ class Rint(PrimitiveWithInfer): >>> op = P.Rint() >>> output = op(input_x) >>> print(output) - [-2., 0., 2., 2.] + [-2. 0. 2. 2.] """ @prim_attr_register @@ -2321,7 +2337,8 @@ class Select(PrimitiveWithInfer): >>> input_cond = Tensor([True, False]) >>> input_x = Tensor([2,3], mindspore.float32) >>> input_y = Tensor([1,2], mindspore.float32) - >>> select(input_cond, input_x, input_y) + >>> output = select(input_cond, input_x, input_y) + >>> print(output) [2. 2.] """ @@ -2454,10 +2471,8 @@ class StridedSlice(PrimitiveWithInfer): ... [[5, 5, 5], [6, 6, 6]]], mindspore.float32) >>> slice = P.StridedSlice() >>> output = slice(input_x, (1, 0, 0), (2, 1, 3), (1, 1, 1)) - >>> output.shape - (1, 1, 3) - >>> output - [[[3, 3, 3]]] + >>> print(output) + [[[3. 3. 3.]]] """ @prim_attr_register @@ -2648,13 +2663,13 @@ class DiagPart(PrimitiveWithInfer): Examples >>> input_x = Tensor([[1, 0, 0, 0], - >>> [0, 2, 0, 0], - >>> [0, 0, 3, 0], - >>> [0, 0, 0, 4]]) + ... [0, 2, 0, 0], + ... [0, 0, 3, 0], + ... [0, 0, 0, 4]]) >>> diag_part = P.DiagPart() >>> output = diag_part(input_x) >>> print(output) - [1, 2, 3, 4] + [1 2 3 4] """ @prim_attr_register @@ -2702,10 +2717,10 @@ class Eye(PrimitiveWithInfer): Examples: >>> eye = P.Eye() - >>> out_tensor = eye(2, 2, mindspore.int32) - >>> print(out_tensor) - [[1, 0], - [0, 1]] + >>> output = eye(2, 2, mindspore.int32) + >>> print(output) + [[1 0] + [0 1]] """ @prim_attr_register @@ -2743,9 +2758,9 @@ class ScatterNd(PrimitiveWithInfer): >>> shape = (3, 3) >>> output = op(indices, update, shape) >>> print(output) - [[0. 3.2 0.] - [0. 1.1 0.] - [0. 0. 0. ]] + [[0. 3.2 0. ] + [0. 1.1 0. ] + [0. 0. 0. ]] """ @prim_attr_register @@ -2794,8 +2809,8 @@ class ResizeNearestNeighbor(PrimitiveWithInfer): >>> resize = P.ResizeNearestNeighbor((2, 2)) >>> output = resize(input_tensor) >>> print(output) - [[[[-0.1 0.3] - [0.4 0.5 ]]]] + [[[[-0.1 0.3] + [ 0.4 0.5]]]] """ @prim_attr_register @@ -2836,7 +2851,7 @@ class GatherNd(PrimitiveWithInfer): >>> op = P.GatherNd() >>> output = op(input_x, indices) >>> print(output) - [-0.1, 0.5] + [-0.1 0.5] """ @prim_attr_register @@ -2873,8 +2888,9 @@ class TensorScatterUpdate(PrimitiveWithInfer): >>> update = Tensor(np.array([1.0, 2.2]), mindspore.float32) >>> op = P.TensorScatterUpdate() >>> output = op(input_x, indices, update) - [[1.0, 0.3, 3.6], - [0.4, 2.2, -3.2]] + >>> print(output) + [[ 1. 0.3 3.6] + [ 0.4 2.2 -3.2]] """ @prim_attr_register @@ -2928,8 +2944,8 @@ class ScatterUpdate(_ScatterOp_Dynamic): >>> op = P.ScatterUpdate() >>> output = op(input_x, indices, updates) >>> print(output) - [[2.0, 1.2, 1.0], - [3.0, 1.2, 1.0]] + [[2. 1.2 1. ] + [3. 1.2 1. ]] """ @prim_attr_register @@ -2969,8 +2985,8 @@ class ScatterNdUpdate(_ScatterNdOp): >>> op = P.ScatterNdUpdate() >>> output = op(input_x, indices, update) >>> print(output) - [[1. 0.3 3.6] - [0.4 2.2 -3.2]] + [[ 1. 0.3 3.6] + [ 0.4 2.2 -3.2]] """ @prim_attr_register @@ -3017,7 +3033,8 @@ class ScatterMax(_ScatterOp): >>> scatter_max = P.ScatterMax() >>> output = scatter_max(input_x, indices, update) >>> print(output) - [[88.0, 88.0, 88.0], [88.0, 88.0, 88.0]] + [[88. 88. 88.] + [88. 88. 88.]] """ @prim_attr_register @@ -3058,7 +3075,8 @@ class ScatterMin(_ScatterOp): >>> scatter_min = P.ScatterMin() >>> output = scatter_min(input_x, indices, update) >>> print(output) - [[0.0, 1.0, 1.0], [0.0, 0.0, 0.0]] + [[0. 1. 1.] + [0. 0. 0.]] """ @@ -3093,7 +3111,8 @@ class ScatterAdd(_ScatterOp_Dynamic): >>> scatter_add = P.ScatterAdd() >>> output = scatter_add(input_x, indices, updates) >>> print(output) - [[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]] + [[1. 1. 1.] + [3. 3. 3.]] """ @prim_attr_register @@ -3170,7 +3189,8 @@ class ScatterMul(_ScatterOp): >>> scatter_mul = P.ScatterMul() >>> output = scatter_mul(input_x, indices, updates) >>> print(output) - [[2.0, 2.0, 2.0], [4.0, 4.0, 4.0]] + [[2. 2. 2.] + [4. 4. 4.]] """ @@ -3205,7 +3225,8 @@ class ScatterDiv(_ScatterOp): >>> scatter_div = P.ScatterDiv() >>> output = scatter_div(input_x, indices, updates) >>> print(output) - [[3.0, 3.0, 3.0], [1.0, 1.0, 1.0]] + [[3. 3. 3.] + [1. 1. 1.]] """ @@ -3240,7 +3261,7 @@ class ScatterNdAdd(_ScatterNdOp): >>> scatter_nd_add = P.ScatterNdAdd() >>> output = scatter_nd_add(input_x, indices, updates) >>> print(output) - [1, 10, 9, 4, 12, 6, 7, 17] + [ 1. 10. 9. 4. 12. 6. 7. 17.] """ @@ -3275,7 +3296,7 @@ class ScatterNdSub(_ScatterNdOp): >>> scatter_nd_sub = P.ScatterNdSub() >>> output = scatter_nd_sub(input_x, indices, updates) >>> print(output) - [1, -6, -3, 4, -2, 6, 7, -1] + [ 1. -6. -3. 4. -2. 6. 7. -1.] """ @@ -3307,7 +3328,7 @@ class ScatterNonAliasingAdd(_ScatterNdOp): >>> scatter_non_aliasing_add = P.ScatterNonAliasingAdd() >>> output = scatter_non_aliasing_add(input_x, indices, updates) >>> print(output) - [1, 10, 9, 4, 12, 6, 7, 17] + [ 1. 10. 9. 4. 12. 6. 7. 17.] """ @prim_attr_register @@ -3347,9 +3368,10 @@ class SpaceToDepth(PrimitiveWithInfer): Examples: >>> x = Tensor(np.random.rand(1,3,2,2), mindspore.float32) >>> block_size = 2 - >>> op = P.SpaceToDepth(block_size) - >>> output = op(x) - >>> output.asnumpy().shape == (1,12,1,1) + >>> space_to_depth = P.SpaceToDepth(block_size) + >>> output = space_to_depth(x) + >>> print(output) + (1, 12, 1, 1) """ @prim_attr_register @@ -3404,8 +3426,8 @@ class DepthToSpace(PrimitiveWithInfer): Examples: >>> x = Tensor(np.random.rand(1,12,1,1), mindspore.float32) >>> block_size = 2 - >>> op = P.DepthToSpace(block_size) - >>> output = op(x) + >>> depth_to_space = P.DepthToSpace(block_size) + >>> output = depth_to_space(x) >>> print(output.shape) (1, 3, 2, 2) """ @@ -3472,9 +3494,12 @@ class SpaceToBatch(PrimitiveWithInfer): >>> paddings = [[0, 0], [0, 0]] >>> space_to_batch = P.SpaceToBatch(block_size, paddings) >>> input_x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mindspore.float32) - >>> space_to_batch(input_x) - [[[[1.]]], [[[2.]]], [[[3.]]], [[[4.]]]] - + >>> output = space_to_batch(input_x) + >>> print(output) + [[[[1.]]] + [[[2.]]] + [[[3.]]] + [[[4.]]]] """ @prim_attr_register @@ -3541,11 +3566,12 @@ class BatchToSpace(PrimitiveWithInfer): Examples: >>> block_size = 2 >>> crops = [[0, 0], [0, 0]] - >>> op = P.BatchToSpace(block_size, crops) + >>> batch_to_space = P.BatchToSpace(block_size, crops) >>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32) - >>> output = op(input_x) + >>> output = batch_to_space(input_x) >>> print(output) - [[[[1., 2.], [3., 4.]]]] + [[[[1. 2.] + [3. 4.]]]] """ @@ -3620,9 +3646,12 @@ class SpaceToBatchND(PrimitiveWithInfer): >>> paddings = [[0, 0], [0, 0]] >>> space_to_batch_nd = P.SpaceToBatchND(block_shape, paddings) >>> input_x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mindspore.float32) - >>> space_to_batch_nd(input_x) - [[[[1.]]], [[[2.]]], [[[3.]]], [[[4.]]]] - + >>> output = space_to_batch_nd(input_x) + >>> print(output) + [[[[1.]]] + [[[2.]]] + [[[3.]]] + [[[4.]]]] """ @prim_attr_register @@ -3715,7 +3744,8 @@ class BatchToSpaceND(PrimitiveWithInfer): >>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32) >>> output = batch_to_space_nd(input_x) >>> print(output) - [[[[1., 2.], [3., 4.]]]] + [[[[1. 2.] + [3. 4.]]]] """ @@ -3791,8 +3821,10 @@ class BroadcastTo(PrimitiveWithInfer): >>> shape = (2, 3) >>> input_x = Tensor(np.array([1, 2, 3]).astype(np.float32)) >>> broadcast_to = P.BroadcastTo(shape) - >>> broadcast_to(input_x) - [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]] + >>> output = broadcast_to(input_x) + >>> print(output) + [[1. 2. 3.] + [1. 2. 3.]] """ @prim_attr_register @@ -3939,11 +3971,11 @@ class InplaceUpdate(PrimitiveWithInfer): >>> x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32) >>> v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32) >>> inplace_update = P.InplaceUpdate(indices) - >>> result = inplace_update(x, v) - >>> print(result) - [[0.5, 1.0], - [1.0, 1.5], - [5.0, 6.0]] + >>> output = inplace_update(x, v) + >>> print(output) + [[0.5 1. ] + [1. 1.5] + [5. 6. ]] """ @prim_attr_register @@ -3997,9 +4029,9 @@ class ReverseSequence(PrimitiveWithInfer): >>> reverse_sequence = P.ReverseSequence(seq_dim=1) >>> output = reverse_sequence(x, seq_lengths) >>> print(output) - [[1 2 3] - [5 4 6] - [9 8 7]] + [[1. 2. 3.] + [5. 4. 6.] + [9. 8. 7.]] """ @prim_attr_register @@ -4057,16 +4089,16 @@ class EditDistance(PrimitiveWithInfer): >>> import mindspore.ops.operations as P >>> context.set_context(mode=context.GRAPH_MODE) >>> class EditDistance(nn.Cell): - >>> def __init__(self, hypothesis_shape, truth_shape, normalize=True): - >>> super(EditDistance, self).__init__() - >>> self.edit_distance = P.EditDistance(normalize) - >>> self.hypothesis_shape = hypothesis_shape - >>> self.truth_shape = truth_shape - >>> - >>> def construct(self, hypothesis_indices, hypothesis_values, truth_indices, truth_values): - >>> return self.edit_distance(hypothesis_indices, hypothesis_values, self.hypothesis_shape, - >>> truth_indices, truth_values, self.truth_shape) - >>> + ... def __init__(self, hypothesis_shape, truth_shape, normalize=True): + ... super(EditDistance, self).__init__() + ... self.edit_distance = P.EditDistance(normalize) + ... self.hypothesis_shape = hypothesis_shape + ... self.truth_shape = truth_shape + ... + ... def construct(self, hypothesis_indices, hypothesis_values, truth_indices, truth_values): + ... return self.edit_distance(hypothesis_indices, hypothesis_values, self.hypothesis_shape, + ... truth_indices, truth_values, self.truth_shape) + ... >>> hypothesis_indices = Tensor(np.array([[0, 0, 0], [1, 0, 1], [1, 1, 1]]).astype(np.int64)) >>> hypothesis_values = Tensor(np.array([1, 2, 3]).astype(np.float32)) >>> hypothesis_shape = Tensor(np.array([1, 1, 2]).astype(np.int64)) @@ -4074,9 +4106,10 @@ class EditDistance(PrimitiveWithInfer): >>> truth_values = Tensor(np.array([1, 3, 2, 1]).astype(np.float32)) >>> truth_shape = Tensor(np.array([2, 2, 2]).astype(np.int64)) >>> edit_distance = EditDistance(hypothesis_shape, truth_shape) - >>> out = edit_distance(hypothesis_indices, hypothesis_values, truth_indices, truth_values) - >>> print(out) - >>> [[1.0, 1.0], [1.0, 1.0]] + >>> output = edit_distance(hypothesis_indices, hypothesis_values, truth_indices, truth_values) + >>> print(output) + [[1. 1.] + [1. 1.]] """ @prim_attr_register @@ -4166,9 +4199,15 @@ class Sort(PrimitiveWithInfer): Examples: >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16) >>> sort = P.Sort() - >>> sort(x) - ([[1.0, 2.0, 8.0], [3.0, 5.0, 9.0], [4.0, 6.0 ,7.0]], - [[2, 1, 0], [2, 0, 1], [0, 1, 2]]) + >>> output = sort(x) + >>> print(output) + (Tensor(shape=[3, 3], dtype=Float16, value= + [[ 1.0000e+00, 2.0000e+00, 8.0000e+00], + [ 3.0000e+00, 5.0000e+00, 9.0000e+00], + [ 4.0000e+00, 6.0000e+00, 7.0000e+00]]), Tensor(shape=[3, 3], dtype=Int32, value= + [[2, 1, 0], + [2, 0, 1], + [0, 1, 2]])) """ @prim_attr_register @@ -4208,9 +4247,12 @@ class EmbeddingLookup(PrimitiveWithInfer): >>> input_params = Tensor(np.array([[8, 9], [10, 11], [12, 13], [14, 15]]), mindspore.float32) >>> input_indices = Tensor(np.array([[5, 2], [8, 5]]), mindspore.int32) >>> offset = 4 - >>> out = P.EmbeddingLookup()(input_params, input_indices, offset) - >>> print(out) - [[[10, 11], [0 ,0]], [[0, 0], [10, 11]]] + >>> output = P.EmbeddingLookup()(input_params, input_indices, offset) + >>> print(output) + [[[10. 11.] + [ 0. 0.]] + [[ 0. 0.] + [10. 11.]]] """ @prim_attr_register @@ -4259,9 +4301,10 @@ class GatherD(PrimitiveWithInfer): >>> x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.int32) >>> index = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int32) >>> dim = 1 - >>> out = P.GatherD()(x, dim, index) - >>> print(out) - [[1, 1], [4, 3]] + >>> output = P.GatherD()(x, dim, index) + >>> print(output) + [[1 1] + [4 3]] """ @prim_attr_register @@ -4304,9 +4347,9 @@ class Identity(PrimitiveWithInfer): Examples: >>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64) - >>> y = P.Identity()(x) - >>> print(y) - [1, 2, 3, 4] + >>> output = P.Identity()(x) + >>> print(output) + [1 2 3 4] """ @prim_attr_register @@ -4341,10 +4384,10 @@ class RepeatElements(PrimitiveWithInfer): >>> repeat_elements = P.RepeatElements(rep = 2, axis = 0) >>> output = repeat_elements(x) >>> print(output) - [[0, 1, 2], - [0, 1, 2], - [3, 4, 5], - [3, 4, 5]], + [[0 1 2] + [0 1 2] + [3 4 5] + [3 4 5]] """ @prim_attr_register diff --git a/mindspore/ops/operations/comm_ops.py b/mindspore/ops/operations/comm_ops.py index c1b28be8bf..875a9ef536 100644 --- a/mindspore/ops/operations/comm_ops.py +++ b/mindspore/ops/operations/comm_ops.py @@ -76,16 +76,19 @@ class AllReduce(PrimitiveWithInfer): >>> >>> init() >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.allreduce_sum = P.AllReduce(ReduceOp.SUM, group="nccl_world_group") - >>> - >>> def construct(self, x): - >>> return self.allreduce_sum(x) - >>> + ... def __init__(self): + ... super(Net, self).__init__() + ... self.allreduce_sum = P.AllReduce(ReduceOp.SUM, group="nccl_world_group") + ... + ... def construct(self, x): + ... return self.allreduce_sum(x) + ... >>> input_ = Tensor(np.ones([2, 8]).astype(np.float32)) >>> net = Net() >>> output = net(input_) + >>> print(output) + [[4. 5. 6. 0. 0. 0. 0. 0.] + [0. 0. 0. 0. 0. 0. 0. 0.]] """ @prim_attr_register @@ -249,17 +252,18 @@ class AllGather(PrimitiveWithInfer): >>> from mindspore import Tensor >>> >>> init() - >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.allgather = P.AllGather(group="nccl_world_group") - >>> - >>> def construct(self, x): - >>> return self.allgather(x) - >>> + ... class Net(nn.Cell): + ... def __init__(self): + ... super(Net, self).__init__() + ... self.allgather = P.AllGather(group="nccl_world_group") + ... + ... def construct(self, x): + ... return self.allgather(x) + ... >>> input_ = Tensor(np.ones([2, 8]).astype(np.float32)) >>> net = Net() >>> output = net(input_) + >>> print(output) """ @prim_attr_register @@ -364,16 +368,17 @@ class ReduceScatter(PrimitiveWithInfer): >>> >>> init() >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.reducescatter = P.ReduceScatter(ReduceOp.SUM) - >>> - >>> def construct(self, x): - >>> return self.reducescatter(x) - >>> + ... def __init__(self): + ... super(Net, self).__init__() + ... self.reducescatter = P.ReduceScatter(ReduceOp.SUM) + ... + ... def construct(self, x): + ... return self.reducescatter(x) + ... >>> input_ = Tensor(np.ones([8, 8]).astype(np.float32)) >>> net = Net() >>> output = net(input_) + >>> print(output) """ @prim_attr_register @@ -480,16 +485,20 @@ class Broadcast(PrimitiveWithInfer): >>> >>> init() >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.broadcast = P.Broadcast(1) - >>> - >>> def construct(self, x): - >>> return self.broadcast((x,)) - >>> - >>> input_ = Tensor(np.ones([2, 8]).astype(np.float32)) + ... def __init__(self): + ... super(Net, self).__init__() + ... self.broadcast = P.Broadcast(1) + ... + ... def construct(self, x): + ... return self.broadcast((x,)) + ... + >>> input_ = Tensor(np.ones([2, 4]).astype(np.int32)) >>> net = Net() >>> output = net(input_) + >>> print(output) + (Tensor(shape[2,4], dtype=Int32, value= + [[1, 1, 1, 1], + [1, 1, 1, 1]]),) """ @prim_attr_register diff --git a/mindspore/ops/operations/control_ops.py b/mindspore/ops/operations/control_ops.py index ed948b9ce5..bd80dca71e 100644 --- a/mindspore/ops/operations/control_ops.py +++ b/mindspore/ops/operations/control_ops.py @@ -51,27 +51,26 @@ class ControlDepend(Primitive): Examples: >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.control_depend = P.ControlDepend() - >>> self.softmax = P.Softmax() - >>> - >>> def construct(self, x, y): - >>> mul = x * y - >>> softmax = self.softmax(x) - >>> ret = self.control_depend(mul, softmax) - >>> return ret + ... def __init__(self): + ... super(Net, self).__init__() + ... self.control_depend = P.ControlDepend() + ... self.softmax = P.Softmax() + ... + ... def construct(self, x, y): + ... mul = x * y + ... softmax = self.softmax(x) + ... ret = self.control_depend(mul, softmax) + ... return ret + ... >>> x = Tensor(np.ones([4, 5]), dtype=mindspore.float32) >>> y = Tensor(np.ones([4, 5]), dtype=mindspore.float32) >>> net = Net() >>> output = net(x, y) >>> print(output) - [[1. 1. 1. 1. 1.] - [1. 1. 1. 1. 1.] - [1. 1. 1. 1. 1.] - [1. 1. 1. 1. 1.]] - >>> print(output.dtype) - Float32 + [[1. 1. 1. 1. 1.] + [1. 1. 1. 1. 1.] + [1. 1. 1. 1. 1.] + [1. 1. 1. 1. 1.]] """ @prim_attr_register @@ -100,29 +99,30 @@ class GeSwitch(PrimitiveWithInfer): Examples: >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.square = P.Square() - >>> self.add = P.TensorAdd() - >>> self.value = Tensor(np.full((1), 3), mindspore.float32) - >>> self.switch = P.GeSwitch() - >>> self.merge = P.Merge() - >>> self.less = P.Less() - >>> - >>> def construct(self, x, y): - >>> cond = self.less(x, y) - >>> st1, sf1 = self.switch(x, cond) - >>> st2, sf2 = self.switch(y, cond) - >>> add_ret = self.add(st1, st2) - >>> st3, sf3 = self.switch(self.value, cond) - >>> sq_ret = self.square(sf3) - >>> ret = self.merge((add_ret, sq_ret)) - >>> return ret[0] - >>> + ... def __init__(self): + ... super(Net, self).__init__() + ... self.square = P.Square() + ... self.add = P.TensorAdd() + ... self.value = Tensor(np.full((1), 3), mindspore.float32) + ... self.switch = P.GeSwitch() + ... self.merge = P.Merge() + ... self.less = P.Less() + ... + ... def construct(self, x, y): + ... cond = self.less(x, y) + ... st1, sf1 = self.switch(x, cond) + ... st2, sf2 = self.switch(y, cond) + ... add_ret = self.add(st1, st2) + ... st3, sf3 = self.switch(self.value, cond) + ... sq_ret = self.square(sf3) + ... ret = self.merge((add_ret, sq_ret)) + ... return ret[0] + ... >>> x = Tensor(10.0, dtype=mindspore.float32) >>> y = Tensor(5.0, dtype=mindspore.float32) >>> net = Net() >>> output = net(x, y) + >>> print(output) """ @prim_attr_register diff --git a/mindspore/ops/operations/debug_ops.py b/mindspore/ops/operations/debug_ops.py index 0213a1669a..f1367a0809 100644 --- a/mindspore/ops/operations/debug_ops.py +++ b/mindspore/ops/operations/debug_ops.py @@ -50,16 +50,17 @@ class ScalarSummary(PrimitiveWithInfer): Examples: >>> class SummaryDemo(nn.Cell): - >>> def __init__(self,): - >>> super(SummaryDemo, self).__init__() - >>> self.summary = P.ScalarSummary() - >>> self.add = P.TensorAdd() - >>> - >>> def construct(self, x, y): - >>> name = "x" - >>> self.summary(name, x) - >>> x = self.add(x, y) - >>> return x + ... def __init__(self,): + ... super(SummaryDemo, self).__init__() + ... self.summary = P.ScalarSummary() + ... self.add = P.TensorAdd() + ... + ... def construct(self, x, y): + ... name = "x" + ... self.summary(name, x) + ... x = self.add(x, y) + ... return x + ... """ @prim_attr_register @@ -88,14 +89,15 @@ class ImageSummary(PrimitiveWithInfer): Examples: >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.summary = P.ImageSummary() - >>> - >>> def construct(self, x): - >>> name = "image" - >>> out = self.summary(name, x) - >>> return out + ... def __init__(self): + ... super(Net, self).__init__() + ... self.summary = P.ImageSummary() + ... + ... def construct(self, x): + ... name = "image" + ... out = self.summary(name, x) + ... return out + ... """ @prim_attr_register @@ -125,16 +127,17 @@ class TensorSummary(PrimitiveWithInfer): Examples: >>> class SummaryDemo(nn.Cell): - >>> def __init__(self,): - >>> super(SummaryDemo, self).__init__() - >>> self.summary = P.TensorSummary() - >>> self.add = P.TensorAdd() - >>> - >>> def construct(self, x, y): - >>> x = self.add(x, y) - >>> name = "x" - >>> self.summary(name, x) - >>> return x + ... def __init__(self,): + ... super(SummaryDemo, self).__init__() + ... self.summary = P.TensorSummary() + ... self.add = P.TensorAdd() + ... + ... def construct(self, x, y): + ... x = self.add(x, y) + ... name = "x" + ... self.summary(name, x) + ... return x + ... """ @prim_attr_register @@ -163,16 +166,17 @@ class HistogramSummary(PrimitiveWithInfer): Examples: >>> class SummaryDemo(nn.Cell): - >>> def __init__(self,): - >>> super(SummaryDemo, self).__init__() - >>> self.summary = P.HistogramSummary() - >>> self.add = P.TensorAdd() - >>> - >>> def construct(self, x, y): - >>> x = self.add(x, y) - >>> name = "x" - >>> self.summary(name, x) - >>> return x + ... def __init__(self,): + ... super(SummaryDemo, self).__init__() + ... self.summary = P.HistogramSummary() + ... self.add = P.TensorAdd() + ... + ... def construct(self, x, y): + ... x = self.add(x, y) + ... name = "x" + ... self.summary(name, x) + ... return x + ... """ @prim_attr_register @@ -206,33 +210,34 @@ class InsertGradientOf(PrimitiveWithInfer): Examples: >>> def clip_gradient(dx): - >>> ret = dx - >>> if ret > 1.0: - >>> ret = 1.0 - >>> - >>> if ret < 0.2: - >>> ret = 0.2 - >>> - >>> return ret - >>> + ... ret = dx + ... if ret > 1.0: + ... ret = 1.0 + ... + ... if ret < 0.2: + ... ret = 0.2 + ... + ... return ret + ... >>> clip = P.InsertGradientOf(clip_gradient) >>> grad_all = C.GradOperation(get_all=True) >>> def InsertGradientOfClipDemo(): - >>> def clip_test(x, y): - >>> x = clip(x) - >>> y = clip(y) - >>> c = x * y - >>> return c - >>> - >>> @ms_function - >>> def f(x, y): - >>> return clip_test(x, y) - >>> - >>> def fd(x, y): - >>> return grad_all(clip_test)(x, y) - >>> - >>> print("forward: ", f(1.1, 0.1)) - >>> print("clip_gradient:", fd(1.1, 0.1)) + ... def clip_test(x, y): + ... x = clip(x) + ... y = clip(y) + ... c = x * y + ... return c + ... + ... @ms_function + ... def f(x, y): + ... return clip_test(x, y) + ... + ... def fd(x, y): + ... return grad_all(clip_test)(x, y) + ... + ... print("forward: ", f(1.1, 0.1)) + ... print("clip_gradient:", fd(1.1, 0.1)) + ... """ @prim_attr_register @@ -266,21 +271,21 @@ class HookBackward(PrimitiveWithInfer): Examples: >>> def hook_fn(grad_out): - >>> print(grad_out) - >>> + ... print(grad_out) + ... >>> grad_all = GradOperation(get_all=True) >>> hook = P.HookBackward(hook_fn) - >>> >>> def hook_test(x, y): - >>> z = x * y - >>> z = hook(z) - >>> z = z * y - >>> return z - >>> + ... z = x * y + ... z = hook(z) + ... z = z * y + ... return z + ... >>> def backward(x, y): - >>> return grad_all(hook_test)(x, y) - >>> - >>> backward(1, 2) + ... return grad_all(hook_test)(x, y) + ... + >>> output = backward(1, 2) + >>> print(output) """ def __init__(self, hook_fn, cell_id=""): @@ -316,13 +321,14 @@ class Print(PrimitiveWithInfer): Examples: >>> class PrintDemo(nn.Cell): - >>> def __init__(self): - >>> super(PrintDemo, self).__init__() - >>> self.print = P.Print() - >>> - >>> def construct(self, x, y): - >>> self.print('Print Tensor x and Tensor y:', x, y) - >>> return x + ... def __init__(self): + ... super(PrintDemo, self).__init__() + ... self.print = P.Print() + ... + ... def construct(self, x, y): + ... self.print('Print Tensor x and Tensor y:', x, y) + ... return x + ... """ @prim_attr_register @@ -356,15 +362,16 @@ class Assert(PrimitiveWithInfer): Examples: >>> class AssertDemo(nn.Cell): - >>> def __init__(self): - >>> super(AssertDemo, self).__init__() - >>> self.assert1 = P.Assert(summarize=10) - >>> self.add = P.TensorAdd() - >>> - >>> def construct(self, x, y): - >>> data = self.add(x, y) - >>> self.assert1(True, [data]) - >>> return data + ... def __init__(self): + ... super(AssertDemo, self).__init__() + ... self.assert1 = P.Assert(summarize=10) + ... self.add = P.TensorAdd() + ... + ... def construct(self, x, y): + ... data = self.add(x, y) + ... self.assert1(True, [data]) + ... return data + ... """ @prim_attr_register diff --git a/mindspore/ops/operations/image_ops.py b/mindspore/ops/operations/image_ops.py index e57be1d6ac..18d4974fad 100644 --- a/mindspore/ops/operations/image_ops.py +++ b/mindspore/ops/operations/image_ops.py @@ -55,14 +55,14 @@ class CropAndResize(PrimitiveWithInfer): Examples: >>> class CropAndResizeNet(nn.Cell): - >>> def __init__(self, crop_size): - >>> super(CropAndResizeNet, self).__init__() - >>> self.crop_and_resize = P.CropAndResize() - >>> self.crop_size = crop_size - >>> - >>> def construct(self, x, boxes, box_index): - >>> return self.crop_and_resize(x, boxes, box_index, self.crop_size) - >>> + ... def __init__(self, crop_size): + ... super(CropAndResizeNet, self).__init__() + ... self.crop_and_resize = P.CropAndResize() + ... self.crop_size = crop_size + ... + ... def construct(self, x, boxes, box_index): + ... return self.crop_and_resize(x, boxes, box_index, self.crop_size) + ... >>> BATCH_SIZE = 1 >>> NUM_BOXES = 5 >>> IMAGE_HEIGHT = 256 @@ -74,7 +74,7 @@ class CropAndResize(PrimitiveWithInfer): >>> crop_size = (24, 24) >>> crop_and_resize = CropAndResizeNet(crop_size=crop_size) >>> output = crop_and_resize(Tensor(image), Tensor(boxes), Tensor(box_index)) - >>> output.shape + >>> print(output.shape) (5, 24, 24, 3) """ diff --git a/mindspore/ops/operations/inner_ops.py b/mindspore/ops/operations/inner_ops.py index d0b387b71b..95ce7637e5 100644 --- a/mindspore/ops/operations/inner_ops.py +++ b/mindspore/ops/operations/inner_ops.py @@ -35,6 +35,7 @@ class ScalarCast(PrimitiveWithInfer): Examples: >>> scalar_cast = P.ScalarCast() >>> output = scalar_cast(255.0, mindspore.int32) + >>> print(output) 255 """ diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index 9eb09c2ef3..8b77c09964 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -139,8 +139,9 @@ class TensorAdd(_MathBinaryOp): >>> add = P.TensorAdd() >>> input_x = Tensor(np.array([1,2,3]).astype(np.float32)) >>> input_y = Tensor(np.array([4,5,6]).astype(np.float32)) - >>> add(input_x, input_y) - [5,7,9] + >>> output = add(input_x, input_y) + >>> print(output) + [5. 7. 9.] """ def infer_value(self, x, y): @@ -170,16 +171,16 @@ class AssignAdd(PrimitiveWithInfer): It must have the same shape as `variable` if it is a Tensor. Examples: - >>> class Net(Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.AssignAdd = P.AssignAdd() - >>> self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int64), name="global_step") - >>> - >>> def construct(self, x): - >>> self.AssignAdd(self.variable, x) - >>> return self.variable - >>> + >>> class Net(nn.Cell): + ... def __init__(self): + ... super(Net, self).__init__() + ... self.AssignAdd = P.AssignAdd() + ... self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int64), name="global_step") + ... + ... def construct(self, x): + ... self.AssignAdd(self.variable, x) + ... return self.variable + ... >>> net = Net() >>> value = Tensor(np.ones([1]).astype(np.int64)*100) >>> output = net(value) @@ -222,16 +223,16 @@ class AssignSub(PrimitiveWithInfer): It must have the same shape as `variable` if it is a Tensor. Examples: - >>> class Net(Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.AssignSub = P.AssignSub() - >>> self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int32), name="global_step") - >>> - >>> def construct(self, x): - >>> self.AssignSub(self.variable, x) - >>> return self.variable - >>> + >>> class Net(nn.Cell): + ... def __init__(self): + ... super(Net, self).__init__() + ... self.AssignSub = P.AssignSub() + ... self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int32), name="global_step") + ... + ... def construct(self, x): + ... self.AssignSub(self.variable, x) + ... return self.variable + ... >>> net = Net() >>> value = Tensor(np.ones([1]).astype(np.int32)*100) >>> output = net(value) @@ -422,8 +423,9 @@ class ReduceAll(_Reduce): >>> input_x = Tensor(np.array([[True, False], [True, True]])) >>> op = P.ReduceAll(keep_dims=True) >>> output = op(input_x, 1) + >>> print(output) [[False] - [True ]] + [ True]] """ def __infer__(self, input_x, axis): @@ -461,8 +463,8 @@ class ReduceAny(_Reduce): >>> op = P.ReduceAny(keep_dims=True) >>> output = op(input_x, 1) >>> print(output) - [[True], - [True]] + [[ True] + [ True]] """ def __infer__(self, input_x, axis): @@ -744,6 +746,7 @@ class BatchMatMul(MatMul): >>> input_y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32) >>> batmatmul = P.BatchMatMul() >>> output = batmatmul(input_x, input_y) + >>> print(output) [[[[3. 3. 3. 3.]] [[3. 3. 3. 3.]] [[3. 3. 3. 3.]] @@ -757,6 +760,7 @@ class BatchMatMul(MatMul): >>> input_y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32) >>> batmatmul = P.BatchMatMul(transpose_a=True) >>> output = batmatmul(input_x, input_y) + >>> print(output) [[[[3. 3. 3. 3.]] [[3. 3. 3. 3.]] [[3. 3. 3. 3.]] @@ -800,6 +804,7 @@ class CumSum(PrimitiveWithInfer): >>> input = Tensor(np.array([[3, 4, 6, 10],[1, 6, 7, 9],[4, 3, 8, 7],[1, 3, 7, 9]]).astype(np.float32)) >>> cumsum = P.CumSum() >>> output = cumsum(input, 1) + >>> print(output) [[ 3. 7. 13. 23.] [ 1. 7. 14. 23.] [ 4. 7. 15. 22.] @@ -842,18 +847,19 @@ class AddN(PrimitiveWithInfer): Examples: >>> class NetAddN(nn.Cell): - >>> def __init__(self): - >>> super(NetAddN, self).__init__() - >>> self.addN = P.AddN() - >>> - >>> def construct(self, *z): - >>> return self.addN(z) - >>> + ... def __init__(self): + ... super(NetAddN, self).__init__() + ... self.addN = P.AddN() + ... + ... def construct(self, *z): + ... return self.addN(z) + ... >>> net = NetAddN() >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32) >>> input_y = Tensor(np.array([4, 5, 6]), mindspore.float32) - >>> net(input_x, input_y, input_x, input_y) - [10.0, 14.0, 18.0] + >>> output = net(input_x, input_y, input_x, input_y) + >>> print(output) + [10. 14. 18.] """ @prim_attr_register @@ -924,18 +930,19 @@ class AccumulateNV2(PrimitiveWithInfer): Examples: >>> class NetAccumulateNV2(nn.Cell): - >>> def __init__(self): - >>> super(NetAccumulateNV2, self).__init__() - >>> self.accumulateNV2 = P.AccumulateNV2() - >>> - >>> def construct(self, *z): - >>> return self.accumulateNV2(z) - >>> + ... def __init__(self): + ... super(NetAccumulateNV2, self).__init__() + ... self.accumulateNV2 = P.AccumulateNV2() + ... + ... def construct(self, *z): + ... return self.accumulateNV2(z) + ... >>> net = NetAccumulateNV2() >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32) >>> input_y = Tensor(np.array([4, 5, 6]), mindspore.float32) - >>> net(input_x, input_y, input_x, input_y) - Tensor([10., 14., 18.], shape=(3,), dtype=mindspore.float32) + >>> output = net(input_x, input_y, input_x, input_y) + >>> print(output) + [10. 14. 18.] """ @prim_attr_register @@ -983,8 +990,8 @@ class Neg(PrimitiveWithInfer): Examples: >>> neg = P.Neg() >>> input_x = Tensor(np.array([1, 2, -1, 2, 0, -3.5]), mindspore.float32) - >>> result = neg(input_x) - >>> print(result) + >>> output = neg(input_x) + >>> print(output) [-1. -2. 1. -2. 0. 3.5] """ @@ -1030,10 +1037,11 @@ class InplaceAdd(PrimitiveWithInfer): >>> input_x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32) >>> input_v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32) >>> inplaceAdd = P.InplaceAdd(indices) - >>> inplaceAdd(input_x, input_v) - [[1.5 3.] - [4. 5.5] - [5. 6.]] + >>> output = inplaceAdd(input_x, input_v) + >>> print(output) + [[1.5 3. ] + [4. 5.5] + [5. 6. ]] """ @prim_attr_register @@ -1088,10 +1096,11 @@ class InplaceSub(PrimitiveWithInfer): >>> input_x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32) >>> input_v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32) >>> inplaceSub = P.InplaceSub(indices) - >>> inplaceSub(input_x, input_v) - [[0.5 1.] - [2. 2.5] - [5. 6.]] + >>> output = inplaceSub(input_x, input_v) + >>> print(output) + [[0.5 1. ] + [2. 2.5] + [5. 6. ]] """ @prim_attr_register @@ -1150,8 +1159,9 @@ class Sub(_MathBinaryOp): >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) >>> input_y = Tensor(np.array([4, 5, 6]), mindspore.int32) >>> sub = P.Sub() - >>> sub(input_x, input_y) - [-3, -3, -3] + >>> output = sub(input_x, input_y) + >>> print(output) + [-3 -3 -3] """ def infer_value(self, x, y): @@ -1189,8 +1199,9 @@ class Mul(_MathBinaryOp): >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32) >>> input_y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32) >>> mul = P.Mul() - >>> mul(input_x, input_y) - [4, 10, 18] + >>> output = mul(input_x, input_y) + >>> print(output) + [ 4. 10. 18.] """ def infer_value(self, x, y): @@ -1228,8 +1239,9 @@ class SquaredDifference(_MathBinaryOp): >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32) >>> input_y = Tensor(np.array([2.0, 4.0, 6.0]), mindspore.float32) >>> squared_difference = P.SquaredDifference() - >>> squared_difference(input_x, input_y) - [1.0, 4.0, 9.0] + >>> output = squared_difference(input_x, input_y) + >>> print(output) + [1. 4. 9.] """ def infer_dtype(self, x_dtype, y_dtype): @@ -1250,8 +1262,9 @@ class Square(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32) >>> square = P.Square() - >>> square(input_x) - [1.0, 4.0, 9.0] + >>> output = square(input_x) + >>> print(output) + [1. 4. 9.] """ @prim_attr_register @@ -1288,8 +1301,10 @@ class Rsqrt(PrimitiveWithInfer): Examples: >>> input_tensor = Tensor([[4, 4], [9, 9]], mindspore.float32) >>> rsqrt = P.Rsqrt() - >>> rsqrt(input_tensor) - [[0.5, 0.5], [0.333333, 0.333333]] + >>> output = rsqrt(input_tensor) + >>> print(output) + [[0.5 0.5 ] + [0.333334 0.333334]] """ @prim_attr_register @@ -1326,8 +1341,9 @@ class Sqrt(PrimitiveWithCheck): Examples: >>> input_x = Tensor(np.array([1.0, 4.0, 9.0]), mindspore.float32) >>> sqrt = P.Sqrt() - >>> sqrt(input_x) - [1.0, 2.0, 3.0] + >>> output = sqrt(input_x) + >>> print(output) + [1. 2. 3.] """ @prim_attr_register @@ -1360,8 +1376,9 @@ class Reciprocal(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32) >>> reciprocal = P.Reciprocal() - >>> reciprocal(input_x) - [1.0, 0.5, 0.25] + >>> output = reciprocal(input_x) + >>> print(output) + [1. 0.5 0.25] """ @prim_attr_register @@ -1414,14 +1431,16 @@ class Pow(_MathBinaryOp): >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32) >>> input_y = 3.0 >>> pow = P.Pow() - >>> pow(input_x, input_y) - [1.0, 8.0, 64.0] + >>> output = pow(input_x, input_y) + >>> print(output) + [ 1. 8. 64.] >>> >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32) >>> input_y = Tensor(np.array([2.0, 4.0, 3.0]), mindspore.float32) >>> pow = P.Pow() - >>> pow(input_x, input_y) - [1.0, 16.0, 64.0] + >>> output = pow(input_x, input_y) + >>> print(output) + [ 1. 16. 64.] """ def infer_value(self, x, power): @@ -1447,8 +1466,9 @@ class Exp(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32) >>> exp = P.Exp() - >>> exp(input_x) - [ 2.71828183, 7.3890561 , 54.59815003] + >>> output = exp(input_x) + >>> print(output) + [ 2.718282 7.389056 54.598152] """ @prim_attr_register @@ -1485,8 +1505,9 @@ class Expm1(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([0.0, 1.0, 2.0, 4.0]), mindspore.float32) >>> expm1 = P.Expm1() - >>> expm1(input_x) - [ 0., 1.71828183, 6.3890561 , 53.59815003] + >>> output = expm1(input_x) + >>> print(output) + [ 0. 1.718282 6.389056 53.598152] """ @prim_attr_register @@ -1523,7 +1544,8 @@ class HistogramFixedWidth(PrimitiveWithInfer): >>> x = Tensor([-1.0, 0.0, 1.5, 2.0, 5.0, 15], mindspore.float16) >>> range = Tensor([0.0, 5.0], mindspore.float16) >>> hist = P.HistogramFixedWidth(5) - >>> hist(x, range) + >>> output = hist(x, range) + >>> print(output) [2 1 1 0 2] """ @@ -1559,8 +1581,9 @@ class Log(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32) >>> log = P.Log() - >>> log(input_x) - [0.0, 0.69314718, 1.38629436] + >>> output = log(input_x) + >>> print(output) + [0. 0.6931472 1.38629444] """ @prim_attr_register @@ -1596,8 +1619,9 @@ class Log1p(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32) >>> log1p = P.Log1p() - >>> log1p(input_x) - [0.6931472, 1.0986123, 1.609438] + >>> output = log1p(input_x) + >>> print(output) + [0.6931472 1.0986123 1.609438 ] """ @prim_attr_register @@ -1626,8 +1650,9 @@ class Erf(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32) >>> erf = P.Erf() - >>> erf(input_x) - [-0.8427168, 0., 0.8427168, 0.99530876, 0.99997765] + >>> output = erf(input_x) + >>> print(output) + [-0.8427168 0. 0.8427168 0.99530876 0.99997765] """ @prim_attr_register @@ -1656,8 +1681,9 @@ class Erfc(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32) >>> erfc = P.Erfc() - >>> erfc(input_x) - [1.8427168, 1.0, 0.1572832, 0.00469124, 0.00002235] + >>> output = erfc(input_x) + >>> print(output) + [1.8427168e+00 1.0000000e+00 1.5728319e-01 4.6912432e-03 2.2351742e-05] """ @prim_attr_register @@ -1698,8 +1724,9 @@ class Minimum(_MathBinaryOp): >>> input_x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32) >>> input_y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32) >>> minimum = P.Minimum() - >>> minimum(input_x, input_y) - [1.0, 2.0, 3.0] + >>> output = minimum(input_x, input_y) + >>> print(output) + [1. 2. 3.] """ def infer_value(self, x, y): @@ -1737,8 +1764,9 @@ class Maximum(_MathBinaryOp): >>> input_x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32) >>> input_y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32) >>> maximum = P.Maximum() - >>> maximum(input_x, input_y) - [4.0, 5.0, 6.0] + >>> output = maximum(input_x, input_y) + >>> print(output) + [4. 5. 6.] """ def infer_value(self, x, y): @@ -1776,8 +1804,9 @@ class RealDiv(_MathBinaryOp): >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32) >>> input_y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32) >>> realdiv = P.RealDiv() - >>> realdiv(input_x, input_y) - [0.25, 0.4, 0.5] + >>> output = realdiv(input_x, input_y) + >>> print(output) + [0.25 0.4 0.5 ] """ def infer_value(self, x, y): @@ -1816,8 +1845,9 @@ class Div(_MathBinaryOp): >>> input_x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32) >>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32) >>> div = P.Div() - >>> div(input_x, input_y) - [-1.3, 2.5, 2.0] + >>> output = div(input_x, input_y) + >>> print(output) + [-1.3333334 2.5 2. ] """ def infer_value(self, x, y): @@ -1854,8 +1884,9 @@ class DivNoNan(_MathBinaryOp): >>> input_x = Tensor(np.array([-1.0, 0., 1.0, 5.0, 6.0]), mindspore.float32) >>> input_y = Tensor(np.array([0., 0., 0., 2.0, 3.0]), mindspore.float32) >>> div_no_nan = P.DivNoNan() - >>> div_no_nan(input_x, input_y) - [0., 0., 0., 2.5, 2.0] + >>> output = div_no_nan(input_x, input_y) + >>> print(output) + [0. 0. 0. 2.5 2. ] """ @prim_attr_register @@ -1899,8 +1930,9 @@ class FloorDiv(_MathBinaryOp): >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32) >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32) >>> floor_div = P.FloorDiv() - >>> floor_div(input_x, input_y) - [0, 1, -1] + >>> output = floor_div(input_x, input_y) + >>> print(output) + [ 0 1 -1] """ @@ -1930,8 +1962,9 @@ class TruncateDiv(_MathBinaryOp): >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32) >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32) >>> truncate_div = P.TruncateDiv() - >>> truncate_div(input_x, input_y) - [0, 1, 0] + >>> output = truncate_div(input_x, input_y) + >>> print(output) + [0 1 0] """ @@ -1960,8 +1993,9 @@ class TruncateMod(_MathBinaryOp): >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32) >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32) >>> truncate_mod = P.TruncateMod() - >>> truncate_mod(input_x, input_y) - [2, 1, -1] + >>> output = truncate_mod(input_x, input_y) + >>> print(output) + [ 2 1 -1] """ @@ -1991,8 +2025,9 @@ class Mod(_MathBinaryOp): >>> input_x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32) >>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32) >>> mod = P.Mod() - >>> mod(input_x, input_y) - [-1. 1. 0.] + >>> output = mod(input_x, input_y) + >>> print(output) + [-1. 1. 0.] """ def infer_value(self, x, y): @@ -2016,8 +2051,9 @@ class Floor(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32) >>> floor = P.Floor() - >>> floor(input_x) - [1.0, 2.0, -2.0] + >>> output = floor(input_x) + >>> print(output) + [ 1. 2. -2.] """ @prim_attr_register @@ -2057,8 +2093,9 @@ class FloorMod(_MathBinaryOp): >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32) >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32) >>> floor_mod = P.FloorMod() - >>> floor_mod(input_x, input_y) - [2, 1, 2] + >>> output = floor_mod(input_x, input_y) + >>> print(output) + [2 1 2] """ @@ -2075,8 +2112,9 @@ class Ceil(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32) >>> ceil_op = P.Ceil() - >>> ceil_op(input_x) - [2.0, 3.0, -1.0] + >>> output = ceil_op(input_x) + >>> print(output) + [ 2. 3. -1.] """ @prim_attr_register @@ -2116,8 +2154,9 @@ class Xdivy(_MathBinaryOp): >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.float32) >>> input_y = Tensor(np.array([2, 2, 2]), mindspore.float32) >>> xdivy = P.Xdivy() - >>> xdivy(input_x, input_y) - [1.0, 2.0, -0.5] + >>> output = xdivy(input_x, input_y) + >>> print(output) + [ 1. 2. -0.5] """ def infer_dtype(self, x_dtype, y_dtype): @@ -2151,8 +2190,9 @@ class Xlogy(_MathBinaryOp): >>> input_x = Tensor(np.array([-5, 0, 4]), mindspore.float32) >>> input_y = Tensor(np.array([2, 2, 2]), mindspore.float32) >>> xlogy = P.Xlogy() - >>> xlogy(input_x, input_y) - [-3.465736, 0.0, 2.7725887] + >>> output = xlogy(input_x, input_y) + >>> print(output) + [-3.465736 0. 2.7725887] """ def infer_dtype(self, x_dtype, y_dtype): @@ -2201,7 +2241,8 @@ class Cosh(PrimitiveWithInfer): >>> cosh = P.Cosh() >>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32) >>> output = cosh(input_x) - [1.0289385 1.364684 1.048436 1.4228927] + >>> print(output) + [1.0289385 1.364684 1.048436 1.0040528] """ @prim_attr_register @@ -2230,7 +2271,8 @@ class Asinh(PrimitiveWithInfer): >>> asinh = P.Asinh() >>> input_x = Tensor(np.array([-5.0, 1.5, 3.0, 100.0]), mindspore.float32) >>> output = asinh(input_x) - [-2.3212, 1.1976, 1.8184, 5.2983] + >>> print(output) + [-2.3124385 1.1947632 1.8184465 5.298342 ] """ @prim_attr_register @@ -2259,7 +2301,8 @@ class Sinh(PrimitiveWithInfer): >>> sinh = P.Sinh() >>> input_x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32) >>> output = sinh(input_x) - [0.6604918 0.28367308 0.44337422 0.6604918] + >>> print(output) + [0.6604918 0.28367308 0.44337422 0.6604918 ] """ @prim_attr_register @@ -2316,8 +2359,9 @@ class Equal(_LogicBinaryOp): >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) >>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32) >>> equal = P.Equal() - >>> equal(input_x, input_y) - [True, True, False] + >>> output = equal(input_x, input_y) + >>> print(output) + [ True True False] """ def infer_dtype(self, x_dtype, y_dtype): @@ -2356,8 +2400,9 @@ class ApproximateEqual(_LogicBinaryOp): >>> x1 = Tensor(np.array([1, 2, 3]), mindspore.float32) >>> x2 = Tensor(np.array([2, 4, 6]), mindspore.float32) >>> approximate_equal = P.ApproximateEqual(2.) - >>> result = approximate_equal(x1, x2) - [True True False] + >>> output = approximate_equal(x1, x2) + >>> print(output) + [ True True False] """ @prim_attr_register @@ -2393,7 +2438,8 @@ class EqualCount(PrimitiveWithInfer): >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) >>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32) >>> equal_count = P.EqualCount() - >>> equal_count(input_x, input_y) + >>> output = equal_count(input_x, input_y) + >>> print(output) [2] """ @@ -2434,14 +2480,16 @@ class NotEqual(_LogicBinaryOp): Examples: >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32) >>> not_equal = P.NotEqual() - >>> not_equal(input_x, 2.0) - [True, False, True] + >>> output = not_equal(input_x, 2.0) + >>> print(output) + [ True False True] >>> >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) >>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32) >>> not_equal = P.NotEqual() - >>> not_equal(input_x, input_y) - [False, False, True] + >>> output = not_equal(input_x, input_y) + >>> print(output) + [False False True] """ def infer_dtype(self, x_dtype, y_dtype): @@ -2472,8 +2520,9 @@ class Greater(_LogicBinaryOp): >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32) >>> greater = P.Greater() - >>> greater(input_x, input_y) - [False, True, False] + >>> output = greater(input_x, input_y) + >>> print(output) + [False True False] """ def infer_value(self, x, y): @@ -2509,8 +2558,9 @@ class GreaterEqual(_LogicBinaryOp): >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32) >>> greater_equal = P.GreaterEqual() - >>> greater_equal(input_x, input_y) - [True, True, False] + >>> output = greater_equal(input_x, input_y) + >>> print(output) + [ True True False] """ def infer_value(self, x, y): @@ -2546,8 +2596,9 @@ class Less(_LogicBinaryOp): >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32) >>> less = P.Less() - >>> less(input_x, input_y) - [False, False, True] + >>> output = less(input_x, input_y) + >>> print(output) + [False False True] """ def infer_value(self, x, y): @@ -2583,8 +2634,9 @@ class LessEqual(_LogicBinaryOp): >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32) >>> less_equal = P.LessEqual() - >>> less_equal(input_x, input_y) - [True, False, True] + >>> output = less_equal(input_x, input_y) + >>> print(output) + [ True False True] """ def infer_value(self, x, y): @@ -2609,8 +2661,9 @@ class LogicalNot(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_) >>> logical_not = P.LogicalNot() - >>> logical_not(input_x) - [False, True, False] + >>> output = logical_not(input_x) + >>> print(output) + [False True False] """ @prim_attr_register @@ -2649,8 +2702,9 @@ class LogicalAnd(_LogicBinaryOp): >>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_) >>> input_y = Tensor(np.array([True, True, False]), mindspore.bool_) >>> logical_and = P.LogicalAnd() - >>> logical_and(input_x, input_y) - [True, False, False] + >>> output = logical_and(input_x, input_y) + >>> print(output) + [ True False False] """ def infer_dtype(self, x_dtype, y_dtype): @@ -2680,8 +2734,9 @@ class LogicalOr(_LogicBinaryOp): >>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_) >>> input_y = Tensor(np.array([True, True, False]), mindspore.bool_) >>> logical_or = P.LogicalOr() - >>> logical_or(input_x, input_y) - [True, True, True] + >>> output = logical_or(input_x, input_y) + >>> print(output) + [ True True True] """ def infer_dtype(self, x_dtype, y_dtype): @@ -2757,8 +2812,9 @@ class IsFinite(PrimitiveWithInfer): Examples: >>> is_finite = P.IsFinite() >>> input_x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32) - >>> result = is_finite(input_x) - [False True False] + >>> output = is_finite(input_x) + >>> print(output) + [False True False] """ @prim_attr_register @@ -2820,8 +2876,9 @@ class NPUAllocFloatStatus(PrimitiveWithInfer): Examples: >>> alloc_status = P.NPUAllocFloatStatus() - >>> init = alloc_status() - Tensor([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=(8,), dtype=mindspore.float32) + >>> output = alloc_status() + >>> print(output) + [0. 0. 0. 0. 0. 0. 0. 0.] """ @prim_attr_register @@ -2855,8 +2912,9 @@ class NPUGetFloatStatus(PrimitiveWithInfer): >>> alloc_status = P.NPUAllocFloatStatus() >>> get_status = P.NPUGetFloatStatus() >>> init = alloc_status() - >>> flag = get_status(init) - Tensor([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=(8,), dtype=mindspore.float32) + >>> output = get_status(init) + >>> print(output) + [0. 0. 0. 0. 0. 0. 0. 0.] """ @prim_attr_register @@ -2898,9 +2956,9 @@ class NPUClearFloatStatus(PrimitiveWithInfer): >>> clear_status = P.NPUClearFloatStatus() >>> init = alloc_status() >>> flag = get_status(init) - >>> clear = clear_status(init) - >>> print(clear) - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + >>> output = clear_status(init) + >>> print(output) + [0. 0. 0. 0. 0. 0. 0. 0.] """ @prim_attr_register @@ -2991,7 +3049,8 @@ class Sin(PrimitiveWithInfer): >>> sin = P.Sin() >>> input_x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32) >>> output = sin(input_x) - [0.5810352 0.27635565 0.41687083 0.5810352] + >>> print(output) + [0.5810352 0.27635565 0.41687083 0.5810352 ] """ @prim_attr_register @@ -3020,7 +3079,8 @@ class Asin(PrimitiveWithInfer): >>> asin = P.Asin() >>> input_x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32) >>> output = asin(input_x) - [0.8331, 0.0400, 0.3047, 0.5944] + >>> print(output) + [0.8330927 0.04001068 0.30469266 0.59438497] """ @prim_attr_register @@ -3105,10 +3165,11 @@ class Abs(PrimitiveWithInfer): Tensor, has the same shape as the `input_x`. Examples: - >>> input_x = Tensor(np.array([-1.0, 1.0, 0.0]), mindspore.float32) - >>> abs = P.Abs() - >>> abs(input_x) - [1.0, 1.0, 0.0] + >>> input_x = Tensor(np.array([-1.0, 1.0, 0.0]), mindspore.float32) + >>> abs = P.Abs() + >>> output = abs(input_x) + >>> print(output) + [1. 1. 0.] """ @prim_attr_register @@ -3152,7 +3213,7 @@ class Sign(PrimitiveWithInfer): >>> sign = P.Sign() >>> output = sign(input_x) >>> print(output) - [[1.0, 0.0, -1.0]] + [[ 1. 0. -1.]] """ @prim_attr_register @@ -3180,8 +3241,9 @@ class Round(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]), mindspore.float32) >>> round = P.Round() - >>> round(input_x) - [1.0, 2.0, 2.0, 2.0, -4.0] + >>> output = round(input_x) + >>> print(output) + [ 1. 2. 2. 2. -4.] """ @prim_attr_register @@ -3245,7 +3307,7 @@ class Atan(PrimitiveWithInfer): >>> atan = P.Atan() >>> output = atan(output_y) >>> print(output) - [[1.047, 0.7850001]] + [1.047 0.7850001] """ @prim_attr_register @@ -3273,8 +3335,9 @@ class Atanh(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([1.047, 0.785]), mindspore.float32) >>> atanh = P.Atanh() - >>> atanh(input_x) - [[1.8869909 1.058268]] + >>> output = atanh(input_x) + >>> print(output) + [1.8869909 1.058268 ] """ @prim_attr_register @@ -3309,11 +3372,12 @@ class Atan2(_MathBinaryOp): Tensor, the shape is the same as the one after broadcasting,and the data type is same as `input_x`. Examples: - >>> input_x = Tensor(np.array([[0, 1]]), mindspore.float32) - >>> input_y = Tensor(np.array([[1, 1]]), mindspore.float32) - >>> atan2 = P.Atan2() - >>> atan2(input_x, input_y) - [[0. 0.7853982]] + >>> input_x = Tensor(np.array([0, 1]), mindspore.float32) + >>> input_y = Tensor(np.array([1, 1]), mindspore.float32) + >>> atan2 = P.Atan2() + >>> output = atan2(input_x, input_y) + >>> print(output) + [0. 0.7853982] """ @@ -3333,11 +3397,13 @@ class SquareSumAll(PrimitiveWithInfer): - **output_y2** (Tensor) - The same type as the `input_x1`. Examples: - >>> input_x1 = Tensor(np.array([0, 0, 2, 0]), mindspore.float32) - >>> input_x2 = Tensor(np.array([0, 0, 2, 4]), mindspore.float32) - >>> square_sum_all = P.SquareSumAll() - >>> square_sum_all(input_x1, input_x2) - (4, 20) + >>> input_x1 = Tensor(np.array([0, 0, 2, 0]), mindspore.float32) + >>> input_x2 = Tensor(np.array([0, 0, 2, 4]), mindspore.float32) + >>> square_sum_all = P.SquareSumAll() + >>> output = square_sum_all(input_x1, input_x2) + >>> print(output) + (Tensor(shape=[], dtype=Float32, value= 4), + Tensor(shape=[], dtype=Float32, value= 20)) """ @prim_attr_register @@ -3373,11 +3439,12 @@ class BitwiseAnd(_BitwiseBinaryOp): Tensor, has the same type as the `input_x1`. Examples: - >>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16) - >>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mstype.int16) - >>> bitwise_and = P.BitwiseAnd() - >>> bitwise_and(input_x1, input_x2) - [0, 0, 1, -1, 1, 0, 1] + >>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16) + >>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16) + >>> bitwise_and = P.BitwiseAnd() + >>> output = bitwise_and(input_x1, input_x2) + >>> print(output) + [ 0 0 1 -1 1 0 1] """ @@ -3399,11 +3466,12 @@ class BitwiseOr(_BitwiseBinaryOp): Tensor, has the same type as the `input_x1`. Examples: - >>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16) - >>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mstype.int16) - >>> bitwise_or = P.BitwiseOr() - >>> bitwise_or(input_x1, input_x2) - [0, 1, 1, -1, -1, 3, 3] + >>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16) + >>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16) + >>> bitwise_or = P.BitwiseOr() + >>> boutput = itwise_or(input_x1, input_x2) + >>> print(output) + [ 0 1 1 -1 -1 3 3] """ @@ -3425,11 +3493,12 @@ class BitwiseXor(_BitwiseBinaryOp): Tensor, has the same type as the `input_x1`. Examples: - >>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16) - >>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mstype.int16) - >>> bitwise_xor = P.BitwiseXor() - >>> bitwise_xor(input_x1, input_x2) - [0, 1, 0, 0, -2, 3, 2] + >>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16) + >>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16) + >>> bitwise_xor = P.BitwiseXor() + >>> output = bitwise_xor(input_x1, input_x2) + >>> print(output) + [ 0 1 0 0 -2 3 2] """ @@ -3449,7 +3518,7 @@ class BesselI0e(PrimitiveWithInfer): >>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32) >>> output = bessel_i0e(input_x) >>> print(output) - [0.7979961, 0.5144438, 0.75117415, 0.9157829] + [0.7979961 0.5144438 0.75117415 0.9157829 ] """ @prim_attr_register @@ -3480,7 +3549,7 @@ class BesselI1e(PrimitiveWithInfer): >>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32) >>> output = bessel_i1e(input_x) >>> print(output) - [0.09507662, 0.19699717, 0.11505538, 0.04116856] + [0.09507662 0.19699717 0.11505538 0.04116856] """ @prim_attr_register @@ -3511,7 +3580,7 @@ class Inv(PrimitiveWithInfer): >>> input_x = Tensor(np.array([0.25, 0.4, 0.31, 0.52]), mindspore.float32) >>> output = inv(input_x) >>> print(output) - [4., 2.5, 3.2258065, 1.923077] + [4. 2.5 3.2258065 1.923077 ] """ @prim_attr_register @@ -3542,7 +3611,7 @@ class Invert(PrimitiveWithInfer): >>> input_x = Tensor(np.array([25, 4, 13, 9]), mindspore.int16) >>> output = invert(input_x) >>> print(output) - [-26, -5, -14, -10] + [-26 -5 -14 -10] """ @prim_attr_register @@ -3569,9 +3638,9 @@ class Eps(PrimitiveWithInfer): Examples: >>> input_x = Tensor([4, 1, 2, 3], mindspore.float32) - >>> out = P.Eps()(input_x) - >>> print(out) - [1.52587891e-05, 1.52587891e-05, 1.52587891e-05, 1.52587891e-05] + >>> output = P.Eps()(input_x) + >>> print(output) + [1.5258789e-05 1.5258789e-05 1.5258789e-05 1.5258789e-05] """ @prim_attr_register diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 9bee3851f4..62c8ba7b61 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -133,8 +133,9 @@ class Softmax(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32) >>> softmax = P.Softmax() - >>> softmax(input_x) - [0.01165623, 0.03168492, 0.08612854, 0.23412167, 0.6364086] + >>> output = softmax(input_x) + >>> print(output) + [0.01165623 0.03168492 0.08612854 0.23412167 0.6364086 ] """ @prim_attr_register @@ -183,8 +184,9 @@ class LogSoftmax(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32) >>> log_softmax = P.LogSoftmax() - >>> log_softmax(input_x) - [-4.4519143, -3.4519143, -2.4519143, -1.4519144, -0.4519144] + >>> output = log_softmax(input_x) + >>> print(output) + [-4.4519143 -3.4519143 -2.4519143 -1.4519144 -0.4519144] """ @prim_attr_register @@ -220,8 +222,9 @@ class Softplus(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32) >>> softplus = P.Softplus() - >>> softplus(input_x) - [1.3132615, 2.126928, 3.0485873, 4.01815, 5.0067153] + >>> output = softplus(input_x) + >>> print(output) + [1.3132615 2.126928 3.0485873 4.01815 5.0067153] """ @prim_attr_register @@ -255,8 +258,9 @@ class Softsign(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([0, -1, 2, 30, -30]), mindspore.float32) >>> softsign = P.Softsign() - >>> softsign(input_x) - [0. -0.5 0.6666667 0.9677419 -0.9677419] + >>> output = softsign(input_x) + >>> print(output) + [ 0. -0.5 0.6666667 0.9677419 -0.9677419] """ @prim_attr_register @@ -287,9 +291,10 @@ class ReLU(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32) >>> relu = P.ReLU() - >>> result = relu(input_x) - >>> print(result) - [[0, 4.0, 0.0], [2.0, 0.0, 9.0]] + >>> output = relu(input_x) + >>> print(output) + [[0. 4. 0.] + [2. 0. 9.]] """ @prim_attr_register @@ -355,9 +360,17 @@ class ReLUV2(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([[[[1, -2], [-3, 4]], [[-5, 6], [7, -8]]]]), mindspore.float32) >>> relu_v2 = P.ReLUV2() - >>> output = relu_v2(input_x) - ([[[[1., 0.], [0., 4.]], [[0., 6.], [7., 0.]]]], - [[[[[1, 0], [2, 0]], [[2, 0], [1, 0]]]]]) + >>> output, mask= relu_v2(input_x) + >>> print(output) + [[[[1. 0.] + [0. 4.]] + [[0. 6.] + [7. 0.]]]] + >>> print(mask) + [[[[[1 0] + [2 0]] + [[2 0] + [1 0]]]]] """ @prim_attr_register @@ -414,10 +427,10 @@ class Elu(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32) >>> elu = P.Elu() - >>> result = elu(input_x) - >>> print(result) - [[-0.632 4.0 -0.999] - [2.0 -0.993 9.0 ]] + >>> output = elu(input_x) + >>> print(output) + [[-0.63212055 4. -0.99966455] + [ 2. -0.99326205 9. ]] """ @prim_attr_register @@ -493,8 +506,9 @@ class Sigmoid(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32) >>> sigmoid = P.Sigmoid() - >>> sigmoid(input_x) - [0.73105866, 0.880797, 0.9525742, 0.98201376, 0.9933071] + >>> output = sigmoid(input_x) + >>> print(output) + [0.7310586 0.880797 0.95257413 0.98201376 0.9933072 ] """ @prim_attr_register @@ -568,8 +582,9 @@ class Tanh(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32) >>> tanh = P.Tanh() - >>> tanh(input_x) - [0.7615941, 0.9640276, 0.9950548, 0.9993293, 0.99990916] + >>> output = tanh(input_x) + >>> print(output) + [0.7615941 0.9640276 0.9950547 0.9993293 0.9999092] """ @prim_attr_register @@ -1554,24 +1569,24 @@ class AvgPool(_Pool): >>> from mindspore import Tensor >>> from mindspore.ops import operations as P >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.avgpool_op = P.AvgPool(padding="VALID", ksize=2, strides=1) - >>> - >>> def construct(self, x): - >>> result = self.avgpool_op(x) - >>> return result - >>> + ... def __init__(self): + ... super(Net, self).__init__() + ... self.avgpool_op = P.AvgPool(padding="VALID", ksize=2, strides=1) + ... + ... def construct(self, x): + ... result = self.avgpool_op(x) + ... return result + ... >>> input_x = Tensor(np.arange(1 * 3 * 3 * 4).reshape(1, 3, 3, 4), mindspore.float32) >>> net = Net() - >>> result = net(input_x) - >>> print(result) + >>> output = net(input_x) + >>> print(output) [[[[ 2.5 3.5 4.5] [ 6.5 7.5 8.5]] - [[ 14.5 15.5 16.5] - [ 18.5 19.5 20.5]] - [[ 26.5 27.5 28.5] - [ 30.5 31.5 32.5]]]] + [[14.5 15.5 16.5] + [18.5 19.5 20.5]] + [[26.5 27.5 28.5] + [30.5 31.5 32.5]]]] """ @prim_attr_register @@ -1835,10 +1850,12 @@ class SoftmaxCrossEntropyWithLogits(PrimitiveWithInfer): >>> logits = Tensor([[2, 4, 1, 4, 5], [2, 1, 2, 4, 3]], mindspore.float32) >>> labels = Tensor([[0, 0, 0, 0, 1], [0, 0, 0, 1, 0]], mindspore.float32) >>> softmax_cross = P.SoftmaxCrossEntropyWithLogits() - >>> loss, backprop = softmax_cross(logits, labels) - >>> print((loss, backprop)) - ([0.5899297, 0.52374405], [[0.02760027, 0.20393994, 0.01015357, 0.20393994, -0.44563377], - [0.08015892, 0.02948882, 0.08015892, -0.4077012, 0.21789455]]) + >>> loss, dlogits = softmax_cross(logits, labels) + >>> print(loss) + [0.5899297 0.52374405] + >>> print(dlogits) + [[ 0.02760027 0.20393994 0.01015357 0.20393994 -0.44563377] + [ 0.08015892 0.02948882 0.08015892 -0.4077012 0.21789455]] """ @prim_attr_register @@ -2009,7 +2026,7 @@ class SmoothL1Loss(PrimitiveWithInfer): >>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32) >>> output = loss(input_data, target_data) >>> print(output) - [0, 0, 0.5] + [0. 0. 0.5] """ @prim_attr_register @@ -2281,7 +2298,8 @@ class ApplyRMSProp(PrimitiveWithInfer): >>> epsilon = 0.001 >>> output = apply_rms(input_x, mean_square, moment, learning_rate, grad, decay, momentum, epsilon) >>> print(output) - (-2.9977674, 0.80999994, 1.9987665) + (Tensor(shape=[], dtype=Float32, value= 0.100112), Tensor(shape=[], dtype=Float32, value= 4), + Tensor(shape=[], dtype=Float32, value= 0.899888)) """ @prim_attr_register @@ -2369,24 +2387,27 @@ class ApplyCenteredRMSProp(PrimitiveWithInfer): Examples: >>> centered_rms_prop = P.ApplyCenteredRMSProp() - >>> input_x = Tensor(np.arange(-6, 6).astype(np.float32).reshape(2, 3, 2), mindspore.float32) - >>> mean_grad = Tensor(np.arange(12).astype(np.float32).reshape(2, 3, 2), mindspore.float32) - >>> mean_square = Tensor(np.arange(-8, 4).astype(np.float32).reshape(2, 3, 2), mindspore.float32) - >>> moment = Tensor(np.arange(12).astype(np.float32).reshape(2, 3, 2), mindspore.float32) - >>> grad = Tensor(np.arange(12).astype(np.float32).reshape(2, 3, 2), mindspore.float32) + >>> input_x = Tensor(np.arange(-2, 2).astype(np.float32).reshape(2, 2), mindspore.float32) + >>> mean_grad = Tensor(np.arange(4).astype(np.float32).reshape(2, 2), mindspore.float32) + >>> mean_square = Tensor(np.arange(-3, 1).astype(np.float32).reshape(2, 2), mindspore.float32) + >>> moment = Tensor(np.arange(4).astype(np.float32).reshape(2, 2), mindspore.float32) + >>> grad = Tensor(np.arange(4).astype(np.float32).reshape(2, 2), mindspore.float32) >>> learning_rate = Tensor(0.9, mindspore.float32) >>> decay = 0.0 >>> momentum = 1e-10 >>> epsilon = 0.05 >>> output = centered_rms_prop(input_x, mean_grad, mean_square, moment, grad, - >>> learning_rate, decay, momentum, epsilon) + ... learning_rate, decay, momentum, epsilon) >>> print(output) - [[[ -6. -9.024922] - [-12.049845 -15.074766] - [-18.09969 -21.124613]] - [[-24.149532 -27.174456] - [-30.199379 -33.2243 ] - [-36.249226 -39.274143]]] + (Tensor(shape=[2, 2], dtype=Float32, value= + [[-2.00000000e+00, -5.02492237e+00], + [-8.04984474e+00, -1.10747662e+01]]), Tensor(shape=[2, 2], dtype=Float32, value= + [[ 0.00000000e+00, 1.00000000e+00], + [ 2.00000000e+00, 3.00000000e+00]]), Tensor(shape=[2, 2], dtype=Float32, value= + [[ 0.00000000e+00, 1.00000000e+00], + [ 4.00000000e+00, 9.00000000e+00]]), Tensor(shape=[2, 2], dtype=Float32, value= + [[ 0.00000000e+00, 4.02492237e+00], + [ 8.04984474e+00, 1.20747662e+01]])) """ @prim_attr_register @@ -2460,10 +2481,16 @@ class LayerNorm(Primitive): >>> gamma = Tensor(np.ones([3]), mindspore.float32) >>> beta = Tensor(np.ones([3]), mindspore.float32) >>> layer_norm = P.LayerNorm() - >>> output = layer_norm(input_x, gamma, beta) + >>> output, mean, variance = layer_norm(input_x, gamma, beta) >>> print(output) - ([[-0.22474492, 1., 2.2247488], [-0.22474492, 1., 2.2247488]], - [[2.], [2.]], [[0.6666667], [0.6666667]]) + [[-0.2247448 1. 2.2247448] + [-0.2247448 1. 2.2247448]] + >>> print(mean) + [[2.] + [2.]] + >>> print(variance) + [[0.6666667] + [0.6666667]]) """ @prim_attr_register @@ -2544,7 +2571,7 @@ class DropoutGenMask(Primitive): >>> keep_prob = Tensor(0.5, mindspore.float32) >>> output = dropout_gen_mask(shape, keep_prob) >>> print(output) - [249, 11, 134, 133, 143, 246, 89, 52, 169, 15, 94, 63, 146, 103, 7, 101] + [249 11 134 133 143 246 89 52 169 15 94 63 146 103 7 101] """ @prim_attr_register @@ -2581,11 +2608,11 @@ class DropoutDoMask(PrimitiveWithInfer): >>> dropout_do_mask = P.DropoutDoMask() >>> mask = dropout_gen_mask(shape, keep_prob) >>> output = dropout_do_mask(x, mask, keep_prob) - >>> assert output.shape == (2, 2, 3) - [[[2.0, 0.0, 0.0], - [0.0, 0.0, 0.0]], - [[0.0, 0.0, 0.0], - [2.0, 2.0, 2.0]]] + >>> print(output) + [[[2. 0. 0.] + [2. 0. 0.]] + [[0. 2. 2.] + [2. 0. 2.]]] """ @prim_attr_register @@ -2706,7 +2733,9 @@ class OneHot(PrimitiveWithInfer): >>> onehot = P.OneHot() >>> output = onehot(indices, depth, on_value, off_value) >>> print(output) - [[1, 0, 0], [0, 1, 0], [0, 0, 1]] + [[1. 0. 0.] + [0. 1. 0.] + [0. 0. 1.]] """ @prim_attr_register @@ -2860,14 +2889,14 @@ class PReLU(PrimitiveWithInfer): >>> input_x = Tensor(np.random.randint(-3, 3, (2, 3, 2)), mindspore.float32) >>> weight = Tensor(np.array([0.1, 0.6, -0.3]), mindspore.float32) >>> net = Net() - >>> result = net(input_x, weight) - >>> print(result) - [[[-0.1, 1.0], - [0.0, 2.0], - [0.0, 0.0]], - [[-0.2, -0.1], - [2.0, -1.8000001], - [0.6, 0.6]]] + >>> output = net(input_x, weight) + >>> print(output) + [[[-0.2 -0.1 ] + [-1.8000001 -0.6 ] + [ 0.90000004 1. ]] + [[-0.3 -0.1 ] + [-1.8000001 2. ] + [ 0.90000004 0.90000004]]] """ @prim_attr_register @@ -3004,8 +3033,8 @@ class SigmoidCrossEntropyWithLogits(PrimitiveWithInfer): >>> sigmoid = P.SigmoidCrossEntropyWithLogits() >>> output = sigmoid(logits, labels) >>> print(output) - [[0.6113 0.5034 0.263 ] - [0.5845 0.553 -0.4365]] + [[0.6111007 0.5032824 0.26318604] + [0.58439666 0.5530153 -0.4368139 ]] """ @prim_attr_register @@ -3042,12 +3071,12 @@ class Pad(PrimitiveWithInfer): Examples: >>> input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32) >>> pad_op = P.Pad(((1, 2), (2, 1))) - >>> output_tensor = pad_op(input_tensor) - >>> print(output_tensor) - [[ 0. 0. 0. 0. 0. 0. ] - [ 0. 0. -0.1 0.3 3.6 0. ] - [ 0. 0. 0.4 0.5 -3.2 0. ] - [ 0. 0. 0. 0. 0. 0. ] + >>> output = pad_op(input_tensor) + >>> print(output) + [[ 0. 0. 0. 0. 0. 0. ], + [ 0. 0. -0.1 0.3 3.6 0. ], + [ 0. 0. 0.4 0.5 -3.2 0. ], + [ 0. 0. 0. 0. 0. 0. ], [ 0. 0. 0. 0. 0. 0. ]] """ @@ -3110,20 +3139,21 @@ class MirrorPad(PrimitiveWithInfer): >>> import mindspore.nn as nn >>> import numpy as np >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.pad = P.MirrorPad(mode="REFLECT") - >>> def construct(self, x, paddings): - >>> return self.pad(x, paddings) + ... def __init__(self): + ... super(Net, self).__init__() + ... self.pad = P.MirrorPad(mode="REFLECT") + ... def construct(self, x, paddings): + ... return self.pad(x, paddings) + ... >>> x = np.random.random(size=(2, 3)).astype(np.float32) >>> paddings = Tensor([[1,1],[2,2]]) >>> pad = Net() - >>> ms_output = pad(Tensor(x), paddings) - >>> print(ms_output) - [[0.5525309 0.49183875 0.99110144 0.49183875 0.5525309 0.49183875 0.99110144] - [0.31417271 0.96308136 0.934709 0.96308136 0.31417271 0.96308136 0.934709 ] - [0.5525309 0.49183875 0.99110144 0.49183875 0.5525309 0.49183875 0.99110144] - [0.31417271 0.96308136 0.934709 0.96308136 0.31417271 0.96308136 0.934709 ]] + >>> output = pad(Tensor(x), paddings) + >>> print(output) + [[0.5525309 0.49183875 0.99110144 0.49183875 0.5525309 0.49183875 0.99110144] + [0.31417271 0.96308136 0.934709 0.96308136 0.31417271 0.96308136 0.934709 ] + [0.5525309 0.49183875 0.99110144 0.49183875 0.5525309 0.49183875 0.99110144] + [0.31417271 0.96308136 0.934709 0.96308136 0.31417271 0.96308136 0.934709 ]] """ @prim_attr_register @@ -3189,10 +3219,10 @@ class ROIAlign(PrimitiveWithInfer): >>> input_tensor = Tensor(np.array([[[[1., 2.], [3., 4.]]]]), mindspore.float32) >>> rois = Tensor(np.array([[0, 0.2, 0.3, 0.2, 0.3]]), mindspore.float32) >>> roi_align = P.ROIAlign(2, 2, 0.5, 2) - >>> output_tensor = roi_align(input_tensor, rois) - >>> print(output_tensor) - [[[[1.77499998e+00, 2.02500010e+00], - [2.27500010e+00, 2.52500010e+00]]]] + >>> output = roi_align(input_tensor, rois) + >>> print(output) + [[[[1.775 2.025] + [2.275 2.525]]]] """ @prim_attr_register @@ -3277,19 +3307,29 @@ class Adam(PrimitiveWithInfer): >>> from mindspore import Tensor, Parameter >>> from mindspore.ops import operations as P >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.apply_adam = P.Adam() - >>> self.var = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="var") - >>> self.m = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="m") - >>> self.v = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="v") - >>> def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad): - >>> out = self.apply_adam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1, beta2, - >>> epsilon, grad) + ... def __init__(self): + ... super(Net, self).__init__() + ... self.apply_adam = P.Adam() + ... self.var = Parameter(Tensor(np.ones([2, 2]).astype(np.float32)), name="var") + ... self.m = Parameter(Tensor(np.ones([2, 2]).astype(np.float32)), name="m") + ... self.v = Parameter(Tensor(np.ones([2, 2]).astype(np.float32)), name="v") + ... def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad): + ... out = self.apply_adam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1, beta2, + ... epsilon, grad) + ... >>> return out >>> net = Net() - >>> gradient = Tensor(np.random.rand(3, 3, 3).astype(np.float32)) + >>> gradient = Tensor(np.random.rand(2, 2).astype(np.float32)) >>> result = net(0.9, 0.999, 0.001, 0.9, 0.999, 1e-8, gradient) + >>> output = net(0.9, 0.999, 0.001, 0.9, 0.999, 1e-8, gradient) + >>> print(output) + (Tensor(shape=[2, 2], dtype=Float32, value= + [[[ 9.99458194e-01, 9.99398530e-01], + [ 9.99404728e-01, 9.99371529e-01]]]), Tensor(shape=[2, 2], dtype=Float32, value= + [[[ 8.17151368e-01, 9.41661000e-01], + [ 9.28607702e-01, 9.98143375e-01]]]), Tensor(shape=[2, 2], dtype=Float32, value= + [[[ 9.98003900e-01, 9.98960912e-01], + [ 9.98780012e-01, 9.99961138e-01]]])) """ @prim_attr_register @@ -3481,16 +3521,17 @@ class FusedSparseAdam(PrimitiveWithInfer): >>> from mindspore.ops import operations as P >>> import mindspore.common.dtype as mstype >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.sparse_apply_adam = P.FusedSparseAdam() - >>> self.var = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="var") - >>> self.m = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="m") - >>> self.v = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="v") - >>> def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, indices): - >>> out = self.sparse_apply_adam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1, beta2, - >>> epsilon, grad, indices) - >>> return out + ... def __init__(self): + ... super(Net, self).__init__() + ... self.sparse_apply_adam = P.FusedSparseAdam() + ... self.var = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="var") + ... self.m = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="m") + ... self.v = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="v") + ... def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, indices): + ... out = self.sparse_apply_adam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1, beta2, + ... epsilon, grad, indices) + ... return out + ... >>> net = Net() >>> beta1_power = Tensor(0.9, mstype.float32) >>> beta2_power = Tensor(0.999, mstype.float32) @@ -3500,7 +3541,8 @@ class FusedSparseAdam(PrimitiveWithInfer): >>> epsilon = Tensor(1e-8, mstype.float32) >>> gradient = Tensor(np.random.rand(2, 1, 2), mstype.float32) >>> indices = Tensor([0, 1], mstype.int32) - >>> result = net(beta1_power, beta2_power, lr, beta1, beta2, epsilon, gradient, indices) + >>> output = net(beta1_power, beta2_power, lr, beta1, beta2, epsilon, gradient, indices) + >>> print(output) """ __mindspore_signature__ = ( sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T), @@ -3614,16 +3656,17 @@ class FusedSparseLazyAdam(PrimitiveWithInfer): >>> from mindspore.ops import operations as P >>> import mindspore.common.dtype as mstype >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.sparse_apply_lazyadam = P.FusedSparseLazyAdam() - >>> self.var = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="var") - >>> self.m = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="m") - >>> self.v = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="v") - >>> def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, indices): - >>> out = self.sparse_apply_lazyadam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1, - >>> beta2, epsilon, grad, indices) - >>> return out + ... def __init__(self): + ... super(Net, self).__init__() + ... self.sparse_apply_lazyadam = P.FusedSparseLazyAdam() + ... self.var = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="var") + ... self.m = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="m") + ... self.v = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="v") + ... def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, indices): + ... out = self.sparse_apply_lazyadam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1, + ... beta2, epsilon, grad, indices) + ... return out + ... >>> net = Net() >>> beta1_power = Tensor(0.9, mstype.float32) >>> beta2_power = Tensor(0.999, mstype.float32) @@ -3633,7 +3676,8 @@ class FusedSparseLazyAdam(PrimitiveWithInfer): >>> epsilon = Tensor(1e-8, mstype.float32) >>> gradient = Tensor(np.random.rand(2, 1, 2), mstype.float32) >>> indices = Tensor([0, 1], mstype.int32) - >>> result = net(beta1_power, beta2_power, lr, beta1, beta2, epsilon, gradient, indices) + >>> output = net(beta1_power, beta2_power, lr, beta1, beta2, epsilon, gradient, indices) + >>> print(output) """ __mindspore_signature__ = ( sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T), @@ -3722,21 +3766,22 @@ class FusedSparseFtrl(PrimitiveWithInfer): >>> from mindspore import Tensor >>> from mindspore.ops import operations as P >>> class SparseApplyFtrlNet(nn.Cell): - >>> def __init__(self): - >>> super(SparseApplyFtrlNet, self).__init__() - >>> self.sparse_apply_ftrl = P.FusedSparseFtrl(lr=0.01, l1=0.0, l2=0.0, lr_power=-0.5) - >>> self.var = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="var") - >>> self.accum = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="accum") - >>> self.linear = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="linear") - >>> - >>> def construct(self, grad, indices): - >>> out = self.sparse_apply_ftrl(self.var, self.accum, self.linear, grad, indices) - >>> return out - >>> + ... def __init__(self): + ... super(SparseApplyFtrlNet, self).__init__() + ... self.sparse_apply_ftrl = P.FusedSparseFtrl(lr=0.01, l1=0.0, l2=0.0, lr_power=-0.5) + ... self.var = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="var") + ... self.accum = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="accum") + ... self.linear = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="linear") + ... + ... def construct(self, grad, indices): + ... out = self.sparse_apply_ftrl(self.var, self.accum, self.linear, grad, indices) + ... return out + ... >>> net = SparseApplyFtrlNet() >>> grad = Tensor(np.random.rand(2, 1, 2).astype(np.float32)) >>> indices = Tensor(np.array([0, 1]).astype(np.int32)) >>> output = net(grad, indices) + >>> print(output) """ __mindspore_signature__ = ( sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T), @@ -3820,22 +3865,24 @@ class FusedSparseProximalAdagrad(PrimitiveWithInfer): >>> from mindspore import Tensor, Parameter >>> from mindspore.ops import operations as P >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.sparse_apply_proximal_adagrad = P.FusedSparseProximalAdagrad() - >>> self.var = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="var") - >>> self.accum = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="accum") - >>> self.lr = Tensor(0.01, mstype.float32) - >>> self.l1 = Tensor(0.0, mstype.float32) - >>> self.l2 = Tensor(0.0, mstype.float32) - >>> def construct(self, grad, indices): - >>> out = self.sparse_apply_proximal_adagrad(self.var, self.accum, self.lr, self.l1, - >>> self.l2, grad, indices) - >>> return out + ... def __init__(self): + ... super(Net, self).__init__() + ... self.sparse_apply_proximal_adagrad = P.FusedSparseProximalAdagrad() + ... self.var = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="var") + ... self.accum = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="accum") + ... self.lr = Tensor(0.01, mstype.float32) + ... self.l1 = Tensor(0.0, mstype.float32) + ... self.l2 = Tensor(0.0, mstype.float32) + ... def construct(self, grad, indices): + ... out = self.sparse_apply_proximal_adagrad(self.var, self.accum, self.lr, self.l1, + ... self.l2, grad, indices) + ... return out + ... >>> net = Net() >>> grad = Tensor(np.random.rand(2, 1, 2).astype(np.float32)) >>> indices = Tensor(np.array([0, 1]).astype(np.int32)) >>> output = net(grad, indices) + >>> print(output) """ __mindspore_signature__ = ( sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T), @@ -3909,17 +3956,19 @@ class KLDivLoss(PrimitiveWithInfer): >>> from mindspore import Tensor >>> from mindspore.ops import operations as P >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.kldiv_loss = P.KLDivLoss() - >>> def construct(self, x, y): - >>> result = self.kldiv_loss(x, y) - >>> return result - >>> + ... def __init__(self): + ... super(Net, self).__init__() + ... self.kldiv_loss = P.KLDivLoss() + ... def construct(self, x, y): + ... result = self.kldiv_loss(x, y) + ... return result + ... >>> net = Net() >>> input_x = Tensor(np.array([0.2, 0.7, 0.1]), mindspore.float32) >>> input_y = Tensor(np.array([0., 1., 0.]), mindspore.float32) - >>> result = net(input_x, input_y) + >>> output = net(input_x, input_y) + >>> print(output) + -0.23333333 """ @prim_attr_register @@ -3983,19 +4032,19 @@ class BinaryCrossEntropy(PrimitiveWithInfer): >>> from mindspore import Tensor >>> from mindspore.ops import operations as P >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.binary_cross_entropy = P.BinaryCrossEntropy() - >>> def construct(self, x, y, weight): - >>> result = self.binary_cross_entropy(x, y, weight) - >>> return result - >>> + ... def __init__(self): + ... super(Net, self).__init__() + ... self.binary_cross_entropy = P.BinaryCrossEntropy() + ... def construct(self, x, y, weight): + ... result = self.binary_cross_entropy(x, y, weight) + ... return result + ... >>> net = Net() >>> input_x = Tensor(np.array([0.2, 0.7, 0.1]), mindspore.float32) >>> input_y = Tensor(np.array([0., 1., 0.]), mindspore.float32) >>> weight = Tensor(np.array([1, 2, 2]), mindspore.float32) - >>> result = net(input_x, input_y, weight) - >>> print(result) + >>> output = net(input_x, input_y, weight) + >>> print(output) 0.38240486 """ @@ -4081,23 +4130,32 @@ class ApplyAdaMax(PrimitiveWithInfer): >>> from mindspore.ops import operations as P >>> import mindspore.common.dtype as mstype >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.apply_ada_max = P.ApplyAdaMax() - >>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var") - >>> self.m = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="m") - >>> self.v = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="v") - >>> def construct(self, beta1_power, lr, beta1, beta2, epsilon, grad): - >>> out = self.apply_ada_max(self.var, self.m, self.v, beta1_power, lr, beta1, beta2, epsilon, grad) - >>> return out + ... def __init__(self): + ... super(Net, self).__init__() + ... self.apply_ada_max = P.ApplyAdaMax() + ... self.var = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="var") + ... self.m = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="m") + ... self.v = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="v") + ... def construct(self, beta1_power, lr, beta1, beta2, epsilon, grad): + ... out = self.apply_ada_max(self.var, self.m, self.v, beta1_power, lr, beta1, beta2, epsilon, grad) + ... return out + ... >>> net = Net() >>> beta1_power =Tensor(0.9, mstype.float32) >>> lr = Tensor(0.001, mstype.float32) >>> beta1 = Tensor(0.9, mstype.float32) >>> beta2 = Tensor(0.99, mstype.float32) >>> epsilon = Tensor(1e-10, mstype.float32) - >>> grad = Tensor(np.random.rand(3, 3).astype(np.float32)) - >>> result = net(beta1_power, lr, beta1, beta2, epsilon, grad) + >>> grad = Tensor(np.random.rand(2, 2).astype(np.float32)) + >>> output = net(beta1_power, lr, beta1, beta2, epsilon, grad) + >>> print(output) + (Tensor(shape=[2, 2], dtype=Float32, value= + [[ 6.46618605e-01, 6.48276925e-01], + [ 7.72792041e-01, 8.58803272e-01]]), Tensor(shape=[2, 2], dtype=Float32, value= + [[ 6.23247683e-01, 6.30929232e-01], + [ 9.17923033e-01, 8.98910999e-01]]), Tensor(shape=[2, 2], dtype=Float32, value= + [[ 3.03175300e-01, 5.75195193e-01], + [ 9.43458021e-01, 8.41971099e-01]])) """ __mindspore_signature__ = ( @@ -4201,21 +4259,30 @@ class ApplyAdadelta(PrimitiveWithInfer): >>> from mindspore.ops import operations as P >>> import mindspore.common.dtype as mstype >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.apply_adadelta = P.ApplyAdadelta() - >>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var") - >>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum") - >>> self.accum_update = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum_update") - >>> def construct(self, lr, rho, epsilon, grad): - >>> out = self.apply_adadelta(self.var, self.accum, self.accum_update, lr, rho, epsilon, grad) - >>> return out + ... def __init__(self): + ... super(Net, self).__init__() + ... self.apply_adadelta = P.ApplyAdadelta() + ... self.var = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="var") + ... self.accum = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="accum") + ... self.accum_update = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="accum_update") + ... def construct(self, lr, rho, epsilon, grad): + ... out = self.apply_adadelta(self.var, self.accum, self.accum_update, lr, rho, epsilon, grad) + ... return out + ... >>> net = Net() >>> lr = Tensor(0.001, mstype.float32) >>> rho = Tensor(0.0, mstype.float32) >>> epsilon = Tensor(1e-6, mstype.float32) - >>> grad = Tensor(np.random.rand(3, 3).astype(np.float32)) - >>> result = net(lr, rho, epsilon, grad) + >>> grad = Tensor(np.random.rand(1, 2).astype(np.float32)) + >>> output = net(lr, rho, epsilon, grad) + >>> print(output) + (Tensor(shape=[2, 2], dtype=Float32, value= + [[ 7.60124624e-01, 9.54110503e-01], + [ 7.25456238e-01, 4.98913884e-01]]), Tensor(shape=[2, 2], dtype=Float32, value= + [[ 1.00194868e-02, 5.50848258e-01], + [ 9.95293319e-01, 1.97404027e-02]]), Tensor(shape=[2, 2], dtype=Float32, value= + [[ 4.17240560e-01, 8.39873433e-01], + [ 4.95992631e-01, 9.19294059e-01]])) """ __mindspore_signature__ = ( @@ -4301,18 +4368,25 @@ class ApplyAdagrad(PrimitiveWithInfer): >>> from mindspore.ops import operations as P >>> import mindspore.common.dtype as mstype >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.apply_adagrad = P.ApplyAdagrad() - >>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var") - >>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum") - >>> def construct(self, lr, grad): - >>> out = self.apply_adagrad(self.var, self.accum, lr, grad) - >>> return out + ... def __init__(self): + ... super(Net, self).__init__() + ... self.apply_adagrad = P.ApplyAdagrad() + ... self.var = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="var") + ... self.accum = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="accum") + ... def construct(self, lr, grad): + ... out = self.apply_adagrad(self.var, self.accum, lr, grad) + ... return out + ... >>> net = Net() >>> lr = Tensor(0.001, mstype.float32) - >>> grad = Tensor(np.random.rand(3, 3).astype(np.float32)) - >>> result = net(lr, grad) + >>> grad = Tensor(np.random.rand(2, 2).astype(np.float32)) + >>> output = net(lr, grad) + >>> print(output) + (Tensor(shape=[2, 2], dtype=Float32, value= + [[ 7.12832332e-01, 3.10275197e-01], + [ 9.02635300e-01, 3.90718848e-01]]), Tensor(shape=[2, 2], dtype=Float32, value= + [[ 8.68964046e-02, 3.21274072e-01], + [ 1.19302607e+00, 9.59712446e-01]])) """ __mindspore_signature__ = ( @@ -4384,18 +4458,25 @@ class ApplyAdagradV2(PrimitiveWithInfer): >>> from mindspore.ops import operations as P >>> import mindspore.common.dtype as mstype >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.apply_adagrad_v2 = P.ApplyAdagradV2(epsilon=1e-6) - >>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var") - >>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum") - >>> def construct(self, lr, grad): - >>> out = self.apply_adagrad_v2(self.var, self.accum, lr, grad) - >>> return out + ... def __init__(self): + ... super(Net, self).__init__() + ... self.apply_adagrad_v2 = P.ApplyAdagradV2(epsilon=1e-6) + ... self.var = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="var") + ... self.accum = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="accum") + ... def construct(self, lr, grad): + ... out = self.apply_adagrad_v2(self.var, self.accum, lr, grad) + ... return out + ... >>> net = Net() >>> lr = Tensor(0.001, mstype.float32) - >>> grad = Tensor(np.random.rand(3, 3).astype(np.float32)) - >>> result = net(lr, grad) + >>> grad = Tensor(np.random.rand(2, 2).astype(np.float32)) + >>> output = net(lr, grad) + >>> print(output) + (Tensor(shape=[2, 2], dtype=Float32, value= + [[ 6.75180078e-01, 5.12131870e-01], + [ 9.32922423e-01, 6.53732181e-01]]), Tensor(shape=[2, 2], dtype=Float32, value= + [[ 8.45080376e-01, 4.80091214e-01], + [ 1.68451762e+00, 1.03823669e+00]])) """ __mindspore_signature__ = ( @@ -4468,20 +4549,23 @@ class SparseApplyAdagrad(PrimitiveWithInfer): >>> from mindspore.ops import operations as P >>> import mindspore.common.dtype as mstype >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.sparse_apply_adagrad = P.SparseApplyAdagrad(lr=1e-8) - >>> self.var = Parameter(Tensor(np.ones([1, 1, 1]).astype(np.float32)), name="var") - >>> self.accum = Parameter(Tensor(np.ones([1, 1, 1]).astype(np.float32)), name="accum") - >>> def construct(self, grad, indices): - >>> out = self.sparse_apply_adagrad(self.var, self.accum, grad, indices) - >>> return out + ... def __init__(self): + ... super(Net, self).__init__() + ... self.sparse_apply_adagrad = P.SparseApplyAdagrad(lr=1e-8) + ... self.var = Parameter(Tensor(np.ones([1, 1, 1]).astype(np.float32)), name="var") + ... self.accum = Parameter(Tensor(np.ones([1, 1, 1]).astype(np.float32)), name="accum") + ... def construct(self, grad, indices): + ... out = self.sparse_apply_adagrad(self.var, self.accum, grad, indices) + ... return out + ... >>> net = Net() >>> grad = Tensor(np.random.rand(1, 1, 1).astype(np.float32)) >>> indices = Tensor([0], mstype.int32) - >>> result = net(grad, indices) - >>> print(result) - ([[[1.0]]], [[[1.0]]]) + >>> output = net(grad, indices) + >>> print(output) + (Tensor(shape=[1, 1, 1], dtype=Float32, value= + [[[1.00000000e+00]]]), Tensor(shape=[1, 1, 1], dtype=Float32, value= + [[[1.00000000e+00]]])) """ __mindspore_signature__ = ( @@ -4557,21 +4641,24 @@ class SparseApplyAdagradV2(PrimitiveWithInfer): >>> from mindspore.ops import operations as P >>> import mindspore.common.dtype as mstype >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.sparse_apply_adagrad_v2 = P.SparseApplyAdagradV2(lr=1e-8, epsilon=1e-6) - >>> self.var = Parameter(Tensor(np.ones([1, 1, 1]).astype(np.float32)), name="var") - >>> self.accum = Parameter(Tensor(np.ones([1, 1, 1]).astype(np.float32)), name="accum") - >>> - >>> def construct(self, grad, indices): - >>> out = self.sparse_apply_adagrad_v2(self.var, self.accum, grad, indices) - >>> return out + ... def __init__(self): + ... super(Net, self).__init__() + ... self.sparse_apply_adagrad_v2 = P.SparseApplyAdagradV2(lr=1e-8, epsilon=1e-6) + ... self.var = Parameter(Tensor(np.ones([1, 1, 1]).astype(np.float32)), name="var") + ... self.accum = Parameter(Tensor(np.ones([1, 1, 1]).astype(np.float32)), name="accum") + ... + ... def construct(self, grad, indices): + ... out = self.sparse_apply_adagrad_v2(self.var, self.accum, grad, indices) + ... return out + ... >>> net = Net() >>> grad = Tensor(np.random.rand(1, 1, 1).astype(np.float32)) >>> indices = Tensor([0], mstype.int32) - >>> result = net(grad, indices) - >>> print(result) - ([[[1.0]]], [[[1.67194188]]]) + >>> output = net(grad, indices) + >>> print(output) + (Tensor(shape=[1, 1, 1], dtype=Float32, value= + [[[1.00000000e+00]]]), Tensor(shape=[1, 1, 1], dtype=Float32, value= + [[[1.13986731e+00]]])) """ __mindspore_signature__ = ( @@ -4648,20 +4735,27 @@ class ApplyProximalAdagrad(PrimitiveWithInfer): >>> from mindspore import Tensor, Parameter >>> from mindspore.ops import operations as P >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.apply_proximal_adagrad = P.ApplyProximalAdagrad() - >>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var") - >>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum") - >>> self.lr = 0.01 - >>> self.l1 = 0.0 - >>> self.l2 = 0.0 - >>> def construct(self, grad): - >>> out = self.apply_proximal_adagrad(self.var, self.accum, self.lr, self.l1, self.l2, grad) - >>> return out + ... def __init__(self): + ... super(Net, self).__init__() + ... self.apply_proximal_adagrad = P.ApplyProximalAdagrad() + ... self.var = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="var") + ... self.accum = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="accum") + ... self.lr = 0.01 + ... self.l1 = 0.0 + ... self.l2 = 0.0 + ... def construct(self, grad): + ... out = self.apply_proximal_adagrad(self.var, self.accum, self.lr, self.l1, self.l2, grad) + ... return out + ... >>> net = Net() - >>> grad = Tensor(np.random.rand(3, 3).astype(np.float32)) + >>> grad = Tensor(np.random.rand(2, 2).astype(np.float32)) >>> output = net(grad) + >>> print(output) + (Tensor(shape=[2, 2], dtype=Float32, value= + [[ 3.79054576e-01, 5.28407156e-01], + [ 2.39551291e-01, 7.34573752e-02]]), Tensor(shape=[2, 2], dtype=Float32, value= + [[ 8.96461844e-01, 1.47237992e+00], + [ 8.52952123e-01, 1.22406030e+00]])) """ __mindspore_signature__ = ( @@ -4752,25 +4846,27 @@ class SparseApplyProximalAdagrad(PrimitiveWithCheck): >>> from mindspore import Tensor, Parameter >>> from mindspore.ops import operations as P >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.sparse_apply_proximal_adagrad = P.SparseApplyProximalAdagrad() - >>> self.var = Parameter(Tensor(np.random.rand(1, 3).astype(np.float32)), name="var") - >>> self.accum = Parameter(Tensor(np.random.rand(1, 3).astype(np.float32)), name="accum") - >>> self.lr = 0.01 - >>> self.l1 = 0.0 - >>> self.l2 = 0.0 - >>> def construct(self, grad, indices): - >>> out = self.sparse_apply_proximal_adagrad(self.var, self.accum, self.lr, self.l1, - self.l2, grad, indices) - >>> return out + ... def __init__(self): + ... super(Net, self).__init__() + ... self.sparse_apply_proximal_adagrad = P.SparseApplyProximalAdagrad() + ... self.var = Parameter(Tensor(np.random.rand(1, 2).astype(np.float32)), name="var") + ... self.accum = Parameter(Tensor(np.random.rand(1, 2).astype(np.float32)), name="accum") + ... self.lr = 0.01 + ... self.l1 = 0.0 + ... self.l2 = 0.0 + ... def construct(self, grad, indices): + ... out = self.sparse_apply_proximal_adagrad(self.var, self.accum, self.lr, self.l1, + ... self.l2, grad, indices) + ... return out + ... >>> net = Net() - >>> grad = Tensor(np.random.rand(1, 3).astype(np.float32)) + >>> grad = Tensor(np.random.rand(1, 2).astype(np.float32)) >>> indices = Tensor(np.ones((1,), np.int32)) >>> output = net(grad, indices) >>> print(output) - ([[6.94971561e-01, 5.24479389e-01, 5.52502394e-01]], - [[1.69961065e-01, 9.21632349e-01, 7.83344746e-01]]) + Tensor(shape=[1, 2], dtype=Float32, value= + [[ 7.74297953e-01, 7.12414503e-01]]), Tensor(shape=[1, 2], dtype=Float32, value= + [[ 6.14362955e-01, 6.38007671e-02]])) """ __mindspore_signature__ = ( @@ -4846,21 +4942,28 @@ class ApplyAddSign(PrimitiveWithInfer): >>> from mindspore import Tensor, Parameter >>> from mindspore.ops import operations as P >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.apply_add_sign = P.ApplyAddSign() - >>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var") - >>> self.m = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="m") - >>> self.lr = 0.001 - >>> self.alpha = 1.0 - >>> self.sign_decay = 0.99 - >>> self.beta = 0.9 - >>> def construct(self, grad): - >>> out = self.apply_add_sign(self.var, self.m, self.lr, self.alpha, self.sign_decay, self.beta, grad) - >>> return out + ... def __init__(self): + ... super(Net, self).__init__() + ... self.apply_add_sign = P.ApplyAddSign() + ... self.var = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="var") + ... self.m = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="m") + ... self.lr = 0.001 + ... self.alpha = 1.0 + ... self.sign_decay = 0.99 + ... self.beta = 0.9 + ... def construct(self, grad): + ... out = self.apply_add_sign(self.var, self.m, self.lr, self.alpha, self.sign_decay, self.beta, grad) + ... return out + ... >>> net = Net() - >>> grad = Tensor(np.random.rand(3, 3).astype(np.float32)) + >>> grad = Tensor(np.random.rand(2, 2).astype(np.float32)) >>> output = net(grad) + >>> print(output) + (Tensor(shape=[2, 2], dtype=Float32, value= + [[ 5.37551343e-01, 3.78310502e-01], + [ 7.81984031e-01, 5.19252002e-01]]), Tensor(shape=[2, 2], dtype=Float32, value= + [[ 8.28343272e-01, 8.14828694e-01], + [ 3.79919171e-01, 2.55756438e-01]])) """ __mindspore_signature__ = ( @@ -4954,22 +5057,29 @@ class ApplyPowerSign(PrimitiveWithInfer): >>> from mindspore import Tensor, Parameter >>> from mindspore.ops import operations as P >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.apply_power_sign = P.ApplyPowerSign() - >>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var") - >>> self.m = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="m") - >>> self.lr = 0.001 - >>> self.logbase = np.e - >>> self.sign_decay = 0.99 - >>> self.beta = 0.9 - >>> def construct(self, grad): - >>> out = self.apply_power_sign(self.var, self.m, self.lr, self.logbase, + ... def __init__(self): + ... super(Net, self).__init__() + ... self.apply_power_sign = P.ApplyPowerSign() + ... self.var = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="var") + ... self.m = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="m") + ... self.lr = 0.001 + ... self.logbase = np.e + ... self.sign_decay = 0.99 + ... self.beta = 0.9 + ... def construct(self, grad): + ... out = self.apply_power_sign(self.var, self.m, self.lr, self.logbase, ... self.sign_decay, self.beta, grad) - >>> return out + ... return out + ... >>> net = Net() - >>> grad = Tensor(np.random.rand(3, 3).astype(np.float32)) + >>> grad = Tensor(np.random.rand(2, 2).astype(np.float32)) >>> output = net(grad) + >>> print(output) + (Tensor(shape=[2, 2], dtype=Float32, value= + [[ 5.01964271e-01, 8.59248936e-01], + [ 5.14324069e-01, 2.50274092e-01]]), Tensor(shape=[2, 2], dtype=Float32, value= + [[ 5.16151905e-01, 7.50251293e-01], + [ 4.36047137e-01, 1.26427144e-01]])) """ __mindspore_signature__ = ( @@ -5044,17 +5154,21 @@ class ApplyGradientDescent(PrimitiveWithInfer): >>> from mindspore import Tensor, Parameter >>> from mindspore.ops import operations as P >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.apply_gradient_descent = P.ApplyGradientDescent() - >>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var") - >>> self.alpha = 0.001 - >>> def construct(self, delta): - >>> out = self.apply_gradient_descent(self.var, self.alpha, delta) - >>> return out + ... def __init__(self): + ... super(Net, self).__init__() + ... self.apply_gradient_descent = P.ApplyGradientDescent() + ... self.var = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="var") + ... self.alpha = 0.001 + ... def construct(self, delta): + ... out = self.apply_gradient_descent(self.var, self.alpha, delta) + ... return out + ... >>> net = Net() - >>> delta = Tensor(np.random.rand(3, 3).astype(np.float32)) + >>> delta = Tensor(np.random.rand(2, 2).astype(np.float32)) >>> output = net(delta) + >>> print(output) + [[0.54876804 0.38894778] + [0.5847089 0.09858753]] """ __mindspore_signature__ = ( @@ -5115,19 +5229,23 @@ class ApplyProximalGradientDescent(PrimitiveWithInfer): >>> from mindspore import Tensor, Parameter >>> from mindspore.ops import operations as P >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.apply_proximal_gradient_descent = P.ApplyProximalGradientDescent() - >>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var") - >>> self.alpha = 0.001 - >>> self.l1 = 0.0 - >>> self.l2 = 0.0 - >>> def construct(self, delta): - >>> out = self.apply_proximal_gradient_descent(self.var, self.alpha, self.l1, self.l2, delta) - >>> return out + ... def __init__(self): + ... super(Net, self).__init__() + ... self.apply_proximal_gradient_descent = P.ApplyProximalGradientDescent() + ... self.var = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="var") + ... self.alpha = 0.001 + ... self.l1 = 0.0 + ... self.l2 = 0.0 + ... def construct(self, delta): + ... out = self.apply_proximal_gradient_descent(self.var, self.alpha, self.l1, self.l2, delta) + ... return out + ... >>> net = Net() - >>> delta = Tensor(np.random.rand(3, 3).astype(np.float32)) + >>> delta = Tensor(np.random.rand(2, 2).astype(np.float32)) >>> output = net(delta) + >>> print(output) + [[0.38671502 0.087947 ] + [0.07595529 0.44336063]] """ __mindspore_signature__ = ( @@ -5195,19 +5313,23 @@ class LARSUpdate(PrimitiveWithInfer): >>> import mindspore.nn as nn >>> import numpy as np >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.lars = P.LARSUpdate() - >>> self.reduce = P.ReduceSum() - >>> def construct(self, weight, gradient): - >>> w_square_sum = self.reduce(F.square(weight)) - >>> grad_square_sum = self.reduce(F.square(gradient)) - >>> grad_t = self.lars(weight, gradient, w_square_sum, grad_square_sum, 0.0, 1.0) - >>> return grad_t + ... def __init__(self): + ... super(Net, self).__init__() + ... self.lars = P.LARSUpdate() + ... self.reduce = P.ReduceSum() + ... def construct(self, weight, gradient): + ... w_square_sum = self.reduce(F.square(weight)) + ... grad_square_sum = self.reduce(F.square(gradient)) + ... grad_t = self.lars(weight, gradient, w_square_sum, grad_square_sum, 0.0, 1.0) + ... return grad_t + ... >>> weight = np.random.random(size=(2, 3)).astype(np.float32) >>> gradient = np.random.random(size=(2, 3)).astype(np.float32) >>> net = Net() - >>> ms_output = net(Tensor(weight), Tensor(gradient)) + >>> output = net(Tensor(weight), Tensor(gradient)) + >>> print(output) + [[1.0630977e-03 1.0647357e-03 1.0038106e-03] + [2.9038603e-04 5.9235965e-05 6.8709702e-04]] """ @prim_attr_register @@ -5279,29 +5401,33 @@ class ApplyFtrl(PrimitiveWithInfer): >>> from mindspore import Tensor >>> from mindspore.ops import operations as P >>> class ApplyFtrlNet(nn.Cell): - >>> def __init__(self): - >>> super(ApplyFtrlNet, self).__init__() - >>> self.apply_ftrl = P.ApplyFtrl() - >>> self.lr = 0.001 - >>> self.l1 = 0.0 - >>> self.l2 = 0.0 - >>> self.lr_power = -0.5 - >>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var") - >>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum") - >>> self.linear = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="linear") - >>> - >>> def construct(self, grad): - >>> out = self.apply_ftrl(self.var, self.accum, self.linear, grad, self.lr, self.l1, self.l2, - >>> self.lr_power) - >>> return out - >>> + ... def __init__(self): + ... super(ApplyFtrlNet, self).__init__() + ... self.apply_ftrl = P.ApplyFtrl() + ... self.lr = 0.001 + ... self.l1 = 0.0 + ... self.l2 = 0.0 + ... self.lr_power = -0.5 + ... self.var = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="var") + ... self.accum = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="accum") + ... self.linear = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="linear") + ... + ... def construct(self, grad): + ... out = self.apply_ftrl(self.var, self.accum, self.linear, grad, self.lr, self.l1, self.l2, + ... self.lr_power) + ... return out + ... >>> net = ApplyFtrlNet() - >>> input_x = Tensor(np.random.randint(-4, 4, (3, 3)), mindspore.float32) - >>> result = net(input_x) - >>> print(result) - [[0.67455846 0.14630564 0.160499 ] - [0.16329421 0.00415689 0.05202988] - [0.18672481 0.17418946 0.36420345]] + >>> input_x = Tensor(np.random.randint(-4, 4, (2, 2)), mindspore.float32) + >>> output = net(input_x) + >>> print(output) + (Tensor(shape=[2, 2], dtype=Float32, value= + [[ 1.51306406e-01, 4.06460911e-02], + [ 5.40895802e-01, 1.35308430e-01]]), Tensor(shape=[2, 2], dtype=Float32, value= + [[ 1.62730598e+01, 4.53126240e+00], + [ 4.10181570e+01, 4.67408800e+00]]), Tensor(shape=[2, 2], dtype=Float32, value= + [[-6.10368164e+02, -8.65223694e+01], + [-1.09547302e+03, -2.92531921e+02]])) """ @prim_attr_register @@ -5371,23 +5497,26 @@ class SparseApplyFtrl(PrimitiveWithCheck): >>> from mindspore import Tensor >>> from mindspore.ops import operations as P >>> class SparseApplyFtrlNet(nn.Cell): - >>> def __init__(self): - >>> super(SparseApplyFtrlNet, self).__init__() - >>> self.sparse_apply_ftrl = P.SparseApplyFtrl(lr=0.01, l1=0.0, l2=0.0, lr_power=-0.5) - >>> self.var = Parameter(Tensor(np.random.rand(1, 1).astype(np.float32)), name="var") - >>> self.accum = Parameter(Tensor(np.random.rand(1, 1).astype(np.float32)), name="accum") - >>> self.linear = Parameter(Tensor(np.random.rand(1, 1).astype(np.float32)), name="linear") - >>> - >>> def construct(self, grad, indices): - >>> out = self.sparse_apply_ftrl(self.var, self.accum, self.linear, grad, indices) - >>> return out - >>> + ... def __init__(self): + ... super(SparseApplyFtrlNet, self).__init__() + ... self.sparse_apply_ftrl = P.SparseApplyFtrl(lr=0.01, l1=0.0, l2=0.0, lr_power=-0.5) + ... self.var = Parameter(Tensor(np.random.rand(1, 1).astype(np.float32)), name="var") + ... self.accum = Parameter(Tensor(np.random.rand(1, 1).astype(np.float32)), name="accum") + ... self.linear = Parameter(Tensor(np.random.rand(1, 1).astype(np.float32)), name="linear") + ... + ... def construct(self, grad, indices): + ... out = self.sparse_apply_ftrl(self.var, self.accum, self.linear, grad, indices) + ... return out + ... >>> net = SparseApplyFtrlNet() >>> grad = Tensor(np.random.rand(1, 1).astype(np.float32)) >>> indices = Tensor(np.ones([1]), mindspore.int32) >>> output = net(grad, indices) >>> print(output) - ([[1.02914639e-01]], [[7.60280550e-01]], [[7.64630079e-01]]) + (Tensor(shape=[1, 1], dtype=Float32, value= + [[1.21931173e-01]]), Tensor(shape=[1, 1], dtype=Float32, value= + [[3.54384869e-01]]), Tensor(shape=[1, 1], dtype=Float32, value= + [[2.99625486e-01]])) """ __mindspore_signature__ = ( @@ -5469,26 +5598,27 @@ class SparseApplyFtrlV2(PrimitiveWithInfer): >>> from mindspore import Tensor >>> from mindspore.ops import operations as P >>> class SparseApplyFtrlV2Net(nn.Cell): - >>> def __init__(self): - >>> super(SparseApplyFtrlV2Net, self).__init__() - >>> self.sparse_apply_ftrl_v2 = P.SparseApplyFtrlV2(lr=0.01, l1=0.0, l2=0.0, - l2_shrinkage=0.0, lr_power=-0.5) - >>> self.var = Parameter(Tensor(np.random.rand(1, 3).astype(np.float32)), name="var") - >>> self.accum = Parameter(Tensor(np.random.rand(1, 3).astype(np.float32)), name="accum") - >>> self.linear = Parameter(Tensor(np.random.rand(1, 3).astype(np.float32)), name="linear") - >>> - >>> def construct(self, grad, indices): - >>> out = self.sparse_apply_ftrl_v2(self.var, self.accum, self.linear, grad, indices) - >>> return out - >>> + ... def __init__(self): + ... super(SparseApplyFtrlV2Net, self).__init__() + ... self.sparse_apply_ftrl_v2 = P.SparseApplyFtrlV2(lr=0.01, l1=0.0, l2=0.0, + ... l2_shrinkage=0.0, lr_power=-0.5) + ... self.var = Parameter(Tensor(np.random.rand(1, 2).astype(np.float32)), name="var") + ... self.accum = Parameter(Tensor(np.random.rand(1, 2).astype(np.float32)), name="accum") + ... self.linear = Parameter(Tensor(np.random.rand(1, 2).astype(np.float32)), name="linear") + ... + ... def construct(self, grad, indices): + ... out = self.sparse_apply_ftrl_v2(self.var, self.accum, self.linear, grad, indices) + ... return out + ... >>> net = SparseApplyFtrlV2Net() - >>> grad = Tensor(np.random.rand(1, 3).astype(np.float32)) + >>> grad = Tensor(np.random.rand(1, 2).astype(np.float32)) >>> indices = Tensor(np.ones([1]), mindspore.int32) >>> output = net(grad, indices) >>> print(output) - ([[3.98493223e-02, 4.38684933e-02, 8.25387388e-02]], - [[6.40987396e-01, 7.19417334e-01, 1.52606890e-01]], - [[7.43463933e-01, 2.92334408e-01, 6.81572020e-01]]) + Tensor(shape=[1, 2], dtype=Float32, value= + [[ 8.69189978e-01, 7.50899851e-01]]), Tensor(shape=[1, 2], dtype=Float32, value= + [[ 2.51525849e-01, 2.19218452e-02]]), Tensor(shape=[1, 2], dtype=Float32, value= + [[ 1.70145389e-02, 7.74444342e-01]])) """ __mindspore_signature__ = ( @@ -5678,21 +5808,22 @@ class CTCGreedyDecoder(PrimitiveWithInfer): containing sequence log-probability, has the same type as `inputs`. Examples: - >>> class CTCGreedyDecoderNet(nn.Cell): - >>> def __init__(self): - >>> super(CTCGreedyDecoderNet, self).__init__() - >>> self.ctc_greedy_decoder = P.CTCGreedyDecoder() - >>> self.assert_op = P.Assert(300) - >>> - >>> def construct(self, inputs, sequence_length): - >>> out = self.ctc_greedy_decoder(inputs,sequence_length) - >>> self.assert_op(True, (out[0], out[1], out[2], out[3])) - >>> return out[2] - >>> + >>>class CTCGreedyDecoderNet(nn.Cell): + ... def __init__(self): + ... super(CTCGreedyDecoderNet, self).__init__() + ... self.ctc_greedy_decoder = P.CTCGreedyDecoder() + ... self.assert_op = P.Assert(300) + ... + ... def construct(self, inputs, sequence_length): + ... out = self.ctc_greedy_decoder(inputs,sequence_length) + ... self.assert_op(True, (out[0], out[1], out[2], out[3])) + ... return out[2] + ... >>> inputs = Tensor(np.random.random((2, 2, 3)), mindspore.float32) >>> sequence_length = Tensor(np.array([2, 2]), mindspore.int32) >>> net = CTCGreedyDecoderNet() >>> output = net(inputs, sequence_length) + >>> print(output) """ @prim_attr_register @@ -5784,13 +5915,14 @@ class BasicLSTMCell(PrimitiveWithInfer): >>> lstm = P.BasicLSTMCell(keep_prob=1.0, forget_bias=1.0, state_is_tuple=True, activation='tanh') >>> output = lstm(x, h, c, w, b) >>> print(output) - ([[9.5459e-01, 9.2725e-01]], - [[1.0000e+00, 1.0000e+00]], - [[1.0000e+00, 1.0000e+00]], - [[1.0000e+00, 1.0000e+00]], - [[9.9951e-01, 1.0000e+00]], - [[9.5459e-01, 9.2773e-01]], - [[0.0000e+00, 0.0000e+00]]) + (Tensor(shape=[1, 2], dtype=Float16, value= + ([[9.5312e-01, 9.5215e-01]]), Tensor(shape=[1, 2], dtype=Float16, value= + [[1.0000e+00, 1.0000e+00]]), Tensor(shape=[1, 2], dtype=Float16, value= + [[1.0000e+00, 1.0000e+00]]), Tensor(shape=[1, 2], dtype=Float16, value= + [[1.0000e+00, 1.0000e+00]]), Tensor(shape=[1, 2], dtype=Float16, value= + [[1.0000e+00, 1.0000e+00]]), Tensor(shape=[1, 2], dtype=Float16, value= + [[9.5312e-01, 9.5215e-01]]), Tensor(shape=[1, 2], dtype=Float16, value= + [[0.0000e+00, 0.0000e+00]])) """ @prim_attr_register @@ -5998,9 +6130,9 @@ class InTopK(PrimitiveWithInfer): >>> x1 = Tensor(np.array([[1, 8, 5, 2, 7], [4, 9, 1, 3, 5]]), mindspore.float32) >>> x2 = Tensor(np.array([1, 3]), mindspore.int32) >>> in_top_k = P.InTopK(3) - >>> result = in_top_k(x1, x2) - >>> print(result) - [True False] + >>> output = in_top_k(x1, x2) + >>> print(output) + [ True False] """ @prim_attr_register @@ -6042,7 +6174,8 @@ class LRN(PrimitiveWithInfer): Examples: >>> x = Tensor(np.random.rand(1, 2, 2, 2), mindspore.float32) >>> lrn = P.LRN() - >>> lrn(x) + >>> output = lrn(x) + >>> print(output) [[[[0.18990143 0.59475636] [0.6291904 0.1371534 ]] [[0.6258911 0.4964315 ] diff --git a/mindspore/ops/operations/other_ops.py b/mindspore/ops/operations/other_ops.py index 2ff66f1f4e..3efa104524 100644 --- a/mindspore/ops/operations/other_ops.py +++ b/mindspore/ops/operations/other_ops.py @@ -39,13 +39,14 @@ class Assign(PrimitiveWithCheck): Examples: >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.y = mindspore.Parameter(Tensor([1.0], mindspore.float32), name="y") - >>> - >>> def construct(self, x): - >>> P.Assign()(self.y, x) - >>> return self.y + ... def __init__(self): + ... super(Net, self).__init__() + ... self.y = mindspore.Parameter(Tensor([1.0], mindspore.float32), name="y") + ... + ... def construct(self, x): + ... P.Assign()(self.y, x) + ... return self.y + ... >>> x = Tensor([2.0], mindspore.float32) >>> net = Net() >>> output = net(x) @@ -78,13 +79,20 @@ class InplaceAssign(PrimitiveWithInfer): Outputs: Tensor, has the same type as original `variable`. Examples: - >>> def construct(self, x): - >>> val = x - 1.0 - >>> ret = x + 2.0 - >>> return InplaceAssign()(x, val, ret) - >>> x = Tensor([2.0], mindspore.float32) - >>> net = Net() - >>> net(x) + >>> class Net(nn.Cell): + ... def __init__(self): + ... super(Net, self).__init__() + ... self.inplace_assign = P.InplaceAssign() + ... + ... def construct(self, x): + ... val = x - 1.0 + ... ret = x + 2.0 + ... return self.inplace_assign(x, val, ret) + ... + >>> x = Tensor([2.0], mindspore.float32) + >>> net = Net() + >>> output = net(x) + >>> print(output) """ @ prim_attr_register def __init__(self): @@ -116,10 +124,10 @@ class BoundingBoxEncode(PrimitiveWithInfer): >>> anchor_box = Tensor([[4,1,2,1],[2,2,2,3]],mindspore.float32) >>> groundtruth_box = Tensor([[3,1,2,2],[1,2,1,4]],mindspore.float32) >>> boundingbox_encode = P.BoundingBoxEncode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0)) - >>> boundingbox_encode(anchor_box, groundtruth_box) - [[5.0000000e-01 5.0000000e-01 -6.5504000e+04 6.9335938e-01] + >>> output = boundingbox_encode(anchor_box, groundtruth_box) + >>> print(output) + [[ 5.0000000e-01 5.0000000e-01 -6.5504000e+04 6.9335938e-01] [-1.0000000e+00 2.5000000e-01 0.0000000e+00 4.0551758e-01]] - """ @prim_attr_register @@ -170,9 +178,10 @@ class BoundingBoxDecode(PrimitiveWithInfer): >>> deltas = Tensor([[3,1,2,2],[1,2,1,4]],mindspore.float32) >>> boundingbox_decode = P.BoundingBoxDecode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0), ... max_shape=(768, 1280), wh_ratio_clip=0.016) - >>> boundingbox_decode(anchor_box, deltas) - [[4.1953125 0. 0. 5.1953125] - [2.140625 0. 3.859375 60.59375]] + >>> output = boundingbox_decode(anchor_box, deltas) + >>> print(output) + [[ 4.1953125 0. 0. 5.1953125] + [ 2.140625 0. 3.859375 60.59375 ]] """ @@ -226,19 +235,19 @@ class CheckValid(PrimitiveWithInfer): >>> from mindspore import Tensor >>> from mindspore.ops import operations as P >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.check_valid = P.CheckValid() - >>> def construct(self, x, y): - >>> valid_result = self.check_valid(x, y) - >>> return valid_result - >>> + ... def __init__(self): + ... super(Net, self).__init__() + ... self.check_valid = P.CheckValid() + ... def construct(self, x, y): + ... valid_result = self.check_valid(x, y) + ... return valid_result + ... >>> bboxes = Tensor(np.linspace(0, 6, 12).reshape(3, 4), mindspore.float32) >>> img_metas = Tensor(np.array([2, 1, 3]), mindspore.float32) >>> net = Net() >>> output = net(bboxes, img_metas) >>> print(output) - [True False False] + [ True False False] """ @prim_attr_register @@ -292,10 +301,12 @@ class IOU(PrimitiveWithInfer): >>> iou = P.IOU() >>> anchor_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]), mindspore.float16) >>> gt_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]), mindspore.float16) - >>> iou(anchor_boxes, gt_boxes) - [[0.0, 65504, 65504], - [0.0, 0.0, 0.0], - [0.22253, 0.0, 0.0]] + >>> output = iou(anchor_boxes, gt_boxes) + >>> print(output) + [[65000. 65500. -0.] + [65000. 65500. -0.] + [ 0. 0. 0.]] + """ @prim_attr_register @@ -336,19 +347,20 @@ class MakeRefKey(Primitive): Examples: >>> from mindspore.ops import functional as F >>> class Net(nn.Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.y = mindspore.Parameter(Tensor(np.ones([6, 8, 10]), mindspore.int32), name="y") - >>> self.make_ref_key = P.MakeRefKey("y") - >>> - >>> def construct(self, x): - >>> key = self.make_ref_key() - >>> ref = F.make_ref(key, x, self.y) - >>> return ref * x - >>> + ... def __init__(self): + ... super(Net, self).__init__() + ... self.y = mindspore.Parameter(Tensor(np.ones([6, 8, 10]), mindspore.int32), name="y") + ... self.make_ref_key = P.MakeRefKey("y") + ... + ... def construct(self, x): + ... key = self.make_ref_key() + ... ref = F.make_ref(key, x, self.y) + ... return ref * x + ... >>> x = Tensor(np.ones([3, 4, 5]), mindspore.int32) >>> net = Net() - >>> net(x) + >>> output = net(x) + >>> print(output) """ @prim_attr_register @@ -536,7 +548,9 @@ class PopulationCount(PrimitiveWithInfer): Examples: >>> population_count = P.PopulationCount() >>> x_input = Tensor([0, 1, 3], mindspore.int16) - >>> population_count(x_input) + >>> output = population_count(x_input) + >>> print(output) + [0 1 2] """ @prim_attr_register diff --git a/mindspore/ops/operations/random_ops.py b/mindspore/ops/operations/random_ops.py index d07f4fd4fc..aa921b15f0 100644 --- a/mindspore/ops/operations/random_ops.py +++ b/mindspore/ops/operations/random_ops.py @@ -396,16 +396,27 @@ class RandomCategorical(PrimitiveWithInfer): Examples: >>> class Net(nn.Cell): - >>> def __init__(self, num_sample): - >>> super(Net, self).__init__() - >>> self.random_categorical = P.RandomCategorical(mindspore.int64) - >>> self.num_sample = num_sample - >>> def construct(self, logits, seed=0): - >>> return self.random_categorical(logits, self.num_sample, seed) - >>> + ... def __init__(self, num_sample): + ... super(Net, self).__init__() + ... self.random_categorical = P.RandomCategorical(mindspore.int64) + ... self.num_sample = num_sample + ... def construct(self, logits, seed=0): + ... return self.random_categorical(logits, self.num_sample, seed) + ... >>> x = np.random.random((10, 5)).astype(np.float32) >>> net = Net(8) >>> output = net(Tensor(x)) + >>> print(output) + [[0 2 1 3 4 2 0 2] + [0 2 1 3 4 2 0 2] + [0 2 1 3 4 2 0 2] + [0 2 1 3 4 2 0 2] + [0 2 0 3 4 2 0 2] + [0 2 1 3 4 3 0 3] + [0 2 1 3 4 2 0 2] + [0 2 1 3 4 2 0 2] + [0 2 1 3 4 2 0 2] + [0 2 0 3 4 2 0 2]] """ @prim_attr_register