!8771 [#I24U3E][#I24U50][#I24TZT][#I24U7V] BUG-Fixed: [CT][MS][Document] the example in doc has no print

From: @david-he91
Reviewed-by: @liangchenghui
Signed-off-by: @liangchenghui
pull/8771/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit c12e3876cc

@ -72,7 +72,7 @@ class Softmax(Cell):
>>> softmax = nn.Softmax() >>> softmax = nn.Softmax()
>>> output = softmax(input_x) >>> output = softmax(input_x)
>>> print(output) >>> print(output)
[0.03168 0.01166 0.0861 0.636 0.2341] [0.03168 0.01166 0.0861 0.636 0.2341 ]
""" """
def __init__(self, axis=-1): def __init__(self, axis=-1):
@ -179,7 +179,7 @@ class ReLU(Cell):
>>> relu = nn.ReLU() >>> relu = nn.ReLU()
>>> output = relu(input_x) >>> output = relu(input_x)
>>> print(output) >>> print(output)
[0. 2. 0. 2. 0.] [0. 2. 0. 2. 0.]
""" """
def __init__(self): def __init__(self):
@ -209,7 +209,7 @@ class ReLU6(Cell):
>>> relu6 = nn.ReLU6() >>> relu6 = nn.ReLU6()
>>> output = relu6(input_x) >>> output = relu6(input_x)
>>> print(output) >>> print(output)
[0. 0. 0. 2. 1.] [0. 0. 0. 2. 1.]
""" """
def __init__(self): def __init__(self):
@ -248,7 +248,7 @@ class LeakyReLU(Cell):
>>> output = leaky_relu(input_x) >>> output = leaky_relu(input_x)
>>> print(output) >>> print(output)
[[-0.2 4. -1.6] [[-0.2 4. -1.6]
[ 2 -1. 9.]] [ 2 -1. 9. ]]
""" """
def __init__(self, alpha=0.2): def __init__(self, alpha=0.2):
@ -292,7 +292,7 @@ class Tanh(Cell):
>>> tanh = nn.Tanh() >>> tanh = nn.Tanh()
>>> output = tanh(input_x) >>> output = tanh(input_x)
>>> print(output) >>> print(output)
[0.7617 0.964 0.995 0.964 0.7617] [0.7617 0.964 0.995 0.964 0.7617]
""" """
def __init__(self): def __init__(self):
@ -356,7 +356,7 @@ class Sigmoid(Cell):
>>> sigmoid = nn.Sigmoid() >>> sigmoid = nn.Sigmoid()
>>> output = sigmoid(input_x) >>> output = sigmoid(input_x)
>>> print(output) >>> print(output)
[0.2688 0.11914 0.5 0.881 0.7305] [0.2688 0.11914 0.5 0.881 0.7305 ]
""" """
def __init__(self): def __init__(self):
@ -517,10 +517,9 @@ class LogSigmoid(Cell):
Examples: Examples:
>>> net = nn.LogSigmoid() >>> net = nn.LogSigmoid()
>>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32) >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
>>> logsigmoid = net(input_x) >>> output = net(input_x)
>>> print(logsigmoid) >>> print(output)
[-3.1326166e-01, -1.2692806e-01, -4.8587345e-02] [-0.31326166 -0.12692806 -0.04858734]
""" """
def __init__(self): def __init__(self):

@ -78,10 +78,10 @@ class Dropout(Cell):
>>> net.set_train() >>> net.set_train()
>>> output = net(x) >>> output = net(x)
>>> print(output) >>> print(output)
[[[0., 1.25, 0.], [[[0. 1.25 0. ]
[1.25, 1.25, 1.25]], [1.25 1.25 1.25]]
[[1.25, 1.25, 1.25], [[1.25 1.25 1.25]
[1.25, 1.25, 1.25]]] [1.25 1.25 1.25]]]
""" """
def __init__(self, keep_prob=0.5, dtype=mstype.float32): def __init__(self, keep_prob=0.5, dtype=mstype.float32):
@ -320,8 +320,8 @@ class ClipByNorm(Cell):
>>> net = nn.ClipByNorm() >>> net = nn.ClipByNorm()
>>> input = Tensor(np.random.randint(0, 10, [4, 16]), mindspore.float32) >>> input = Tensor(np.random.randint(0, 10, [4, 16]), mindspore.float32)
>>> clip_norm = Tensor(np.array([100]).astype(np.float32)) >>> clip_norm = Tensor(np.array([100]).astype(np.float32))
>>> result = net(input, clip_norm).shape >>> output = net(input, clip_norm)
>>> print(result) >>> print(output.shape)
(4, 16) (4, 16)
""" """
@ -392,7 +392,7 @@ class Norm(Cell):
>>> input = Tensor(np.random.randint(0, 10, [2, 4]), mindspore.float32) >>> input = Tensor(np.random.randint(0, 10, [2, 4]), mindspore.float32)
>>> output = net(input) >>> output = net(input)
>>> print(output) >>> print(output)
[2.236068 9.848858 4. 5.656854] [7.81025 6.708204 0. 8.602325]
""" """
def __init__(self, axis=(), keep_dims=False): def __init__(self, axis=(), keep_dims=False):
@ -514,7 +514,12 @@ class Pad(Cell):
... return self.pad(x) ... return self.pad(x)
>>> x = np.random.random(size=(2, 3)).astype(np.float32) >>> x = np.random.random(size=(2, 3)).astype(np.float32)
>>> pad = Net() >>> pad = Net()
>>> ms_output = pad(Tensor(x)) >>> output = pad(Tensor(x))
>>> print(output)
[[0. 0. 0. 0. 0. 0. ]
[0. 0. 0.82691735 0.36147234 0.70918983 0. ]
[0. 0. 0.7842975 0.44726616 0.4353459 0. ]
[0. 0. 0. 0. 0. 0. ]]
""" """
def __init__(self, paddings, mode="CONSTANT"): def __init__(self, paddings, mode="CONSTANT"):
@ -574,9 +579,8 @@ class Unfold(Cell):
>>> net = Unfold(ksizes=[1, 2, 2, 1], strides=[1, 2, 2, 1], rates=[1, 2, 2, 1]) >>> net = Unfold(ksizes=[1, 2, 2, 1], strides=[1, 2, 2, 1], rates=[1, 2, 2, 1])
>>> image = Tensor(np.ones([2, 3, 6, 6]), dtype=mstype.float16) >>> image = Tensor(np.ones([2, 3, 6, 6]), dtype=mstype.float16)
>>> output = net(image) >>> output = net(image)
>>> print(output) >>> print(output.shape)
[[[[1, 1] [1, 1]] [[1, 1], [1, 1]] [[1, 1] [1, 1]], [[1, 1] [1, 1]], [[1, 1] [1, 1]], (2, 12, 2, 2)
[[1, 1], [1, 1]]]]
""" """
def __init__(self, ksizes, strides, rates, padding="valid"): def __init__(self, ksizes, strides, rates, padding="valid"):
@ -627,8 +631,8 @@ class MatrixDiag(Cell):
Examples: Examples:
>>> x = Tensor(np.array([1, -1]), mstype.float32) >>> x = Tensor(np.array([1, -1]), mstype.float32)
>>> matrix_diag = nn.MatrixDiag() >>> matrix_diag = nn.MatrixDiag()
>>> result = matrix_diag(x) >>> output = matrix_diag(x)
>>> print(result) >>> print(output)
[[1. 0.] [[1. 0.]
[0. -1.]] [0. -1.]]
""" """
@ -659,9 +663,11 @@ class MatrixDiagPart(Cell):
Examples: Examples:
>>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32) >>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32)
>>> matrix_diag_part = nn.MatrixDiagPart() >>> matrix_diag_part = nn.MatrixDiagPart()
>>> result = matrix_diag_part(x) >>> output = matrix_diag_part(x)
>>> print(result) >>> print(output)
[[-1., 1.], [-1., 1.], [-1., 1.]] [[-1. 1.]
[-1. 1.]
[-1. 1.]]
""" """
def __init__(self): def __init__(self):
super(MatrixDiagPart, self).__init__() super(MatrixDiagPart, self).__init__()
@ -692,9 +698,14 @@ class MatrixSetDiag(Cell):
>>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32) >>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32)
>>> diagonal = Tensor([[-1., 2.], [-1., 1.], [-1., 1.]], mindspore.float32) >>> diagonal = Tensor([[-1., 2.], [-1., 1.], [-1., 1.]], mindspore.float32)
>>> matrix_set_diag = nn.MatrixSetDiag() >>> matrix_set_diag = nn.MatrixSetDiag()
>>> result = matrix_set_diag(x, diagonal) >>> output = matrix_set_diag(x, diagonal)
>>> print(result) >>> print(output)
[[[-1, 0], [0, 2]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]] [[[-1. 0.]
[ 0. 2.]]
[[-1. 0.]
[ 0. 1.]]
[[-1. 0.]
[ 0. 1.]]]
""" """
def __init__(self): def __init__(self):
super(MatrixSetDiag, self).__init__() super(MatrixSetDiag, self).__init__()

@ -85,7 +85,6 @@ class SequentialCell(Cell):
>>> bn = nn.BatchNorm2d(2) >>> bn = nn.BatchNorm2d(2)
>>> relu = nn.ReLU() >>> relu = nn.ReLU()
>>> seq = nn.SequentialCell([conv, bn, relu]) >>> seq = nn.SequentialCell([conv, bn, relu])
>>>
>>> x = Tensor(np.random.random((1, 3, 4, 4)), dtype=mindspore.float32) >>> x = Tensor(np.random.random((1, 3, 4, 4)), dtype=mindspore.float32)
>>> output = seq(x) >>> output = seq(x)
>>> print(output) >>> print(output)
@ -158,10 +157,10 @@ class SequentialCell(Cell):
>>> x = Tensor(np.ones([1, 3, 4, 4]), dtype=mindspore.float32) >>> x = Tensor(np.ones([1, 3, 4, 4]), dtype=mindspore.float32)
>>> output = seq(x) >>> output = seq(x)
>>> print(output) >>> print(output)
[[[[0.12445523 0.12445523] [[[[0.08789019 0.08789019]
[0.12445523 0.12445523]] [0.08789019 0.08789019]]
[[0. 0. ] [[0.07690391 0.07690391]
[0. 0. ]]]] [0.07690391 0.07690391]]]]
""" """
if _valid_cell(cell): if _valid_cell(cell):
self._cells[str(len(self))] = cell self._cells[str(len(self))] = cell
@ -195,9 +194,11 @@ class CellList(_CellListBase, Cell):
>>> x = Tensor(np.random.random((1, 3, 4, 4)), dtype=mindspore.float32) >>> x = Tensor(np.random.random((1, 3, 4, 4)), dtype=mindspore.float32)
>>> # not same as nn.SequentialCell, `cell_ls(x)` is not correct >>> # not same as nn.SequentialCell, `cell_ls(x)` is not correct
>>> cell_ls >>> cell_ls
CellList< (0): Conv2d<input_channels=100, ..., bias_init=None> CellList<
(1): BatchNorm2d<num_features=20, ..., moving_variance=Parameter (name=variance)> (0): Conv2d<input_channels=100, ..., bias_init=None>
(2): ReLU<> > (1): BatchNorm2d<num_features=20, ..., moving_variance=Parameter (name=variance)>
(2): ReLU<>
>
""" """
def __init__(self, *args): def __init__(self, *args):
_CellListBase.__init__(self) _CellListBase.__init__(self)

@ -52,13 +52,14 @@ class ImageGradients(Cell):
Examples: Examples:
>>> net = nn.ImageGradients() >>> net = nn.ImageGradients()
>>> image = Tensor(np.array([[[[1,2],[3,4]]]]), dtype=mstype.int32) >>> image = Tensor(np.array([[[[1,2],[3,4]]]]), dtype=mindspore.int32)
>>> output = net(image) >>> output = net(image)
>>> print(output) >>> print(output)
[[[[2,2] (Tensor(shape=[1, 1, 2, 2], dtype=Int32, value=
[0,0]]]] [[[[2, 2],
[[[[1,0] [0, 0]]]]), Tensor(shape=[1, 1, 2, 2], dtype=Int32, value=
[1,0]]]] [[[[1, 0],
[1, 0]]]]))
""" """
def __init__(self): def __init__(self):
super(ImageGradients, self).__init__() super(ImageGradients, self).__init__()
@ -214,8 +215,8 @@ class SSIM(Cell):
>>> net = nn.SSIM() >>> net = nn.SSIM()
>>> img1 = Tensor(np.random.random((1,3,16,16)), mindspore.float32) >>> img1 = Tensor(np.random.random((1,3,16,16)), mindspore.float32)
>>> img2 = Tensor(np.random.random((1,3,16,16)), mindspore.float32) >>> img2 = Tensor(np.random.random((1,3,16,16)), mindspore.float32)
>>> ssim = net(img1, img2) >>> output = net(img1, img2)
>>> print(ssim) >>> print(output)
[0.12174469] [0.12174469]
""" """
def __init__(self, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03): def __init__(self, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03):
@ -290,11 +291,11 @@ class MSSSIM(Cell):
Examples: Examples:
>>> net = nn.MSSSIM(power_factors=(0.033, 0.033, 0.033)) >>> net = nn.MSSSIM(power_factors=(0.033, 0.033, 0.033))
>>> img1 = Tensor(np.random.random((1, 3, 128, 128))) >>> img1 = Tensor(np.random.random((1,3,128,128)))
>>> img2 = Tensor(np.random.random((1, 3, 128, 128))) >>> img2 = Tensor(np.random.random((1,3,128,128)))
>>> result = net(img1, img2) >>> output = net(img1, img2)
>>> print(result) >>> print(output)
[0.20930639] [0.22965115]
""" """
def __init__(self, max_val=1.0, power_factors=(0.0448, 0.2856, 0.3001, 0.2363, 0.1333), filter_size=11, def __init__(self, max_val=1.0, power_factors=(0.0448, 0.2856, 0.3001, 0.2363, 0.1333), filter_size=11,
filter_sigma=1.5, k1=0.01, k2=0.03): filter_sigma=1.5, k1=0.01, k2=0.03):
@ -382,9 +383,9 @@ class PSNR(Cell):
>>> net = nn.PSNR() >>> net = nn.PSNR()
>>> img1 = Tensor(np.random.random((1,3,16,16))) >>> img1 = Tensor(np.random.random((1,3,16,16)))
>>> img2 = Tensor(np.random.random((1,3,16,16))) >>> img2 = Tensor(np.random.random((1,3,16,16)))
>>> psnr = net(img1, img2) >>> output = net(img1, img2)
>>> print(psnr) >>> print(output)
[7.8297315] [7.7229595]
""" """
def __init__(self, max_val=1.0): def __init__(self, max_val=1.0):
super(PSNR, self).__init__() super(PSNR, self).__init__()
@ -452,8 +453,7 @@ class CentralCrop(Cell):
>>> net = nn.CentralCrop(central_fraction=0.5) >>> net = nn.CentralCrop(central_fraction=0.5)
>>> image = Tensor(np.random.random((4, 3, 4, 4)), mindspore.float32) >>> image = Tensor(np.random.random((4, 3, 4, 4)), mindspore.float32)
>>> output = net(image) >>> output = net(image)
>>> result = output.shape >>> print(output.shape)
>>> print(result)
(4, 3, 2, 2) (4, 3, 2, 2)
""" """

@ -64,8 +64,7 @@ class ReduceLogSumExp(Cell):
>>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> op = nn.ReduceLogSumExp(1, keep_dims=True) >>> op = nn.ReduceLogSumExp(1, keep_dims=True)
>>> output = op(input_x) >>> output = op(input_x)
>>> result = output.shape >>> print(output.shape)
>>> print(reuslt)
(3, 1, 5, 6) (3, 1, 5, 6)
""" """
@ -101,9 +100,9 @@ class Range(Cell):
Examples: Examples:
>>> net = nn.Range(1, 8, 2) >>> net = nn.Range(1, 8, 2)
>>> out = net() >>> output = net()
>>> print(out) >>> print(output)
[1, 3, 5, 7] [1 3 5 7]
""" """
def __init__(self, start, limit=None, delta=1): def __init__(self, start, limit=None, delta=1):
@ -157,7 +156,7 @@ class LinSpace(Cell):
>>> linspace = nn.LinSpace(1, 10, 5) >>> linspace = nn.LinSpace(1, 10, 5)
>>> output = linspace() >>> output = linspace()
>>> print(output) >>> print(output)
[1, 3.25, 5.5, 7.75, 10] [ 1. 3.25 5.5 7.75 10. ]
""" """
def __init__(self, start, stop, num): def __init__(self, start, stop, num):
@ -230,6 +229,7 @@ class LGamma(Cell):
>>> input_x = Tensor(np.array([2, 3, 4]).astype(np.float32)) >>> input_x = Tensor(np.array([2, 3, 4]).astype(np.float32))
>>> op = nn.LGamma() >>> op = nn.LGamma()
>>> output = op(input_x) >>> output = op(input_x)
>>> print(output)
[3.5762787e-07 6.9314754e-01 1.7917603e+00] [3.5762787e-07 6.9314754e-01 1.7917603e+00]
""" """
@ -830,9 +830,13 @@ class Moments(Cell):
Examples: Examples:
>>> net = nn.Moments(axis=3, keep_dims=True) >>> net = nn.Moments(axis=3, keep_dims=True)
>>> input_x = Tensor(np.array([[[[1, 2, 3, 4], [3, 4, 5, 6]]]]), mindspore.float32) >>> input_x = Tensor(np.array([[[[1, 2, 3, 4], [3, 4, 5, 6]]]]), mindspore.float32)
>>> mean, var = net(input_x) >>> output = net(input_x)
mean: [[[[2.5], [4.5]]]] >>> print(output)
var: [[[[1.25], [1.25]]]] (Tensor(shape=[1, 1, 2, 1], dtype=Float32, value=
[[[[ 2.50000000e+00],
[ 4.50000000e+00]]]]), Tensor(shape=[1, 1, 2, 1], dtype=Float32, value=
[[[[ 1.25000000e+00],
[ 1.25000000e+00]]]]))
""" """
def __init__(self, axis=None, keep_dims=None): def __init__(self, axis=None, keep_dims=None):

@ -285,12 +285,11 @@ class BatchNorm1d(_BatchNorm):
Examples: Examples:
>>> net = nn.BatchNorm1d(num_features=4) >>> net = nn.BatchNorm1d(num_features=4)
>>> input = Tensor(np.random.randint(0, 255, [3, 4]), mindspore.float32) >>> input = Tensor(np.random.randint(0, 255, [2, 4]), mindspore.float32)
>>> result = net(input) >>> output = net(input)
>>> print(result) >>> print(output)
[[ 57.99971 50.99974 220.99889 222.99889 ] [[210.99895 136.99931 89.99955 240.9988 ]
[106.99947 193.99902 77.99961 101.99949 ] [ 87.99956 157.9992 89.99955 42.999786]]
[ 85.99957 188.99905 46.99976 226.99887 ]]
""" """
def __init__(self, def __init__(self,
@ -371,23 +370,15 @@ class BatchNorm2d(_BatchNorm):
Examples: Examples:
>>> net = nn.BatchNorm2d(num_features=3) >>> net = nn.BatchNorm2d(num_features=3)
>>> input = Tensor(np.random.randint(0, 255, [1, 3, 4, 4]), mindspore.float32) >>> input = Tensor(np.random.randint(0, 255, [1, 3, 2, 2]), mindspore.float32)
>>> result = net(input) >>> output = net(input)
>>> print(result) >>> print(output)
[[[[148.99925 148.99925 178.9991 77.99961 ] [[[[128.99936 53.99973]
[ 41.99979 97.99951 157.9992 94.99953 ] [191.99904 183.99908]]
[ 87.99956 158.9992 50.99974 179.9991 ] [[146.99927 182.99908]
[146.99927 27.99986 119.9994 253.99873 ]] [184.99907 120.9994 ]]
[[ 33.99983 234.99883]
[[178.9991 187.99905 190.99904 88.99956 ] [188.99905 11.99994]]]]
[213.99893 158.9992 13.99993 200.999 ]
[224.99887 56.99971 246.99876 239.9988 ]
[ 97.99951 34.99983 28.99986 57.99971 ]]
[[ 14.99993 31.99984 136.99931 207.99896 ]
[180.9991 28.99986 23.99988 71.99964 ]
[112.99944 36.99981 213.99893 71.99964 ]
[ 8.99996 162.99919 157.9992 41.99979 ]]]]
""" """
def __init__(self, def __init__(self,
@ -618,7 +609,7 @@ class GroupNorm(Cell):
[[[[0. 0. 0. 0.] [[[[0. 0. 0. 0.]
[0. 0. 0. 0.] [0. 0. 0. 0.]
[0. 0. 0. 0.] [0. 0. 0. 0.]
[0. 0. 0. 0.]], [0. 0. 0. 0.]]
[[0. 0. 0. 0.] [[0. 0. 0. 0.]
[0. 0. 0. 0.] [0. 0. 0. 0.]
[0. 0. 0. 0.] [0. 0. 0. 0.]

@ -107,19 +107,7 @@ class MaxPool2d(_PoolNd):
Examples: Examples:
>>> pool = nn.MaxPool2d(kernel_size=3, stride=1) >>> pool = nn.MaxPool2d(kernel_size=3, stride=1)
>>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32) >>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32)
>>> print(x)
[[[[1. 5. 5. 1.]
[0. 3. 4. 8.]
[4. 2. 7. 6.]
[4. 9. 0. 1.]]
[[3. 6. 2. 6.]
[4. 4. 7. 8.]
[0. 0. 4. 0.]
[1. 8. 7. 0.]]]]
>>> output = pool(x) >>> output = pool(x)
>>> reuslt = output.shape
>>> print(result)
(1, 2, 2, 2)
>>> print(output) >>> print(output)
[[[[7. 8.] [[[[7. 8.]
[9. 9.]] [9. 9.]]
@ -272,19 +260,7 @@ class AvgPool2d(_PoolNd):
Examples: Examples:
>>> pool = nn.AvgPool2d(kernel_size=3, stride=1) >>> pool = nn.AvgPool2d(kernel_size=3, stride=1)
>>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32) >>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32)
>>> print(x)
[[[[5. 5. 9. 9.]
[8. 4. 3. 0.]
[2. 7. 1. 2.]
[1. 8. 3. 3.]]
[[6. 8. 2. 4.]
[3. 0. 2. 1.]
[0. 8. 9. 7.]
[2. 1. 4. 9.]]]]
>>> output = pool(x) >>> output = pool(x)
>>> result = output.shape
>>> print(result)
(1, 2, 2, 2)
>>> print(output) >>> print(output)
[[[[4.888889 4.4444447] [[[[4.888889 4.4444447]
[4.111111 3.4444444]] [4.111111 3.4444444]]

@ -234,9 +234,10 @@ class FakeQuantWithMinMaxObserver(UniformQuantObserver):
Examples: Examples:
>>> fake_quant = nn.FakeQuantWithMinMaxObserver() >>> fake_quant = nn.FakeQuantWithMinMaxObserver()
>>> input = Tensor(np.array([[1, 2, 1], [-2, 0, -1]]), mindspore.float32) >>> input = Tensor(np.array([[1, 2, 1], [-2, 0, -1]]), mindspore.float32)
>>> result = fake_quant(input) >>> output = fake_quant(input)
>>> print(result) >>> print(output)
[[0.9882355, 1.9764705, 0.9882355], [-1.9764705, 0. , -0.9882355]] [[ 0.9882355 1.9764705 0.9882355]
[-1.9764705 0. -0.9882355]]
""" """
def __init__(self, def __init__(self,
@ -589,11 +590,10 @@ class Conv2dBnFoldQuant(Cell):
Examples: Examples:
>>> qconfig = compression.quant.create_quant_config() >>> qconfig = compression.quant.create_quant_config()
>>> conv2d_bnfold = nn.Conv2dBnFoldQuant(1, 6, kernel_size=(2, 2), stride=(1, 1), pad_mode="valid", >>> conv2d_bnfold = nn.Conv2dBnFoldQuant(1, 6, kernel_size=(2, 2), stride=(1, 1), pad_mode="valid",
>>> quant_config=qconfig) ... quant_config=qconfig)
>>> input = Tensor(np.random.randint(-2, 2, (2, 1, 3, 3)), mindspore.float32) >>> input = Tensor(np.random.randint(-2, 2, (2, 1, 3, 3)), mindspore.float32)
>>> result = conv2d_bnfold(input) >>> output = conv2d_bnfold(input)
>>> output = result.shape >>> print(output.shape)
>>> print(output)
(2, 6, 2, 2) (2, 6, 2, 2)
""" """
@ -775,11 +775,10 @@ class Conv2dBnWithoutFoldQuant(Cell):
Examples: Examples:
>>> qconfig = compression.quant.create_quant_config() >>> qconfig = compression.quant.create_quant_config()
>>> conv2d_no_bnfold = nn.Conv2dBnWithoutFoldQuant(1, 6, kernel_size=(2, 2), stride=(1, 1), pad_mode="valid", >>> conv2d_no_bnfold = nn.Conv2dBnWithoutFoldQuant(1, 6, kernel_size=(2, 2), stride=(1, 1), pad_mode="valid",
>>> quant_config=qconfig) ... quant_config=qconfig)
>>> input = Tensor(np.random.randint(-2, 2, (2, 1, 3, 3)), mstype.float32) >>> input = Tensor(np.random.randint(-2, 2, (2, 1, 3, 3)), mstype.float32)
>>> result = conv2d_no_bnfold(input) >>> output = conv2d_no_bnfold(input)
>>> output = result.shape >>> print(output.shape)
>>> print(output)
(2, 6, 2, 2) (2, 6, 2, 2)
""" """
@ -897,11 +896,10 @@ class Conv2dQuant(Cell):
Examples: Examples:
>>> qconfig = compression.quant.create_quant_config() >>> qconfig = compression.quant.create_quant_config()
>>> conv2d_quant = nn.Conv2dQuant(1, 6, kernel_size= (2, 2), stride=(1, 1), pad_mode="valid", >>> conv2d_quant = nn.Conv2dQuant(1, 6, kernel_size= (2, 2), stride=(1, 1), pad_mode="valid",
>>> quant_config=qconfig) ... quant_config=qconfig)
>>> input = Tensor(np.random.randint(-2, 2, (2, 1, 3, 3)), mindspore.float32) >>> input = Tensor(np.random.randint(-2, 2, (2, 1, 3, 3)), mindspore.float32)
>>> result = conv2d_quant(input) >>> output = conv2d_quant(input)
>>> output = result.shape >>> print(output.shape)
>>> print(output)
(2, 6, 2, 2) (2, 6, 2, 2)
""" """
@ -1106,9 +1104,10 @@ class ActQuant(_QuantActivation):
>>> qconfig = compression.quant.create_quant_config() >>> qconfig = compression.quant.create_quant_config()
>>> act_quant = nn.ActQuant(nn.ReLU(), quant_config=qconfig) >>> act_quant = nn.ActQuant(nn.ReLU(), quant_config=qconfig)
>>> input = Tensor(np.array([[1, 2, -1], [-2, 0, -1]]), mindspore.float32) >>> input = Tensor(np.array([[1, 2, -1], [-2, 0, -1]]), mindspore.float32)
>>> result = act_quant(input) >>> output = act_quant(input)
>>> print(result) >>> print(output)
[[0.9882355, 1.9764705, 0.], [0., 0., 0.]] [[0.9882355 1.9764705 0. ]
[0. 0. 0. ]]
""" """
def __init__(self, def __init__(self,
@ -1168,9 +1167,10 @@ class TensorAddQuant(Cell):
>>> add_quant = nn.TensorAddQuant(quant_config=qconfig) >>> add_quant = nn.TensorAddQuant(quant_config=qconfig)
>>> input_x1 = Tensor(np.array([[1, 2, 1], [-2, 0, -1]]), mindspore.float32) >>> input_x1 = Tensor(np.array([[1, 2, 1], [-2, 0, -1]]), mindspore.float32)
>>> input_x2 = Tensor(np.ones((2, 3)), mindspore.float32) >>> input_x2 = Tensor(np.ones((2, 3)), mindspore.float32)
>>> result = add_quant(input_x1, input_x2) >>> output = add_quant(input_x1, input_x2)
>>> print(result) >>> print(output)
[[1.9764705, 3.011765, 1.9764705], [-0.9882355, 0.9882355, 0.]] [[ 1.9764705 3.011765 1.9764705]
[-0.9882355 0.9882355 0. ]]
""" """
def __init__(self, def __init__(self,
@ -1215,9 +1215,10 @@ class MulQuant(Cell):
>>> mul_quant = nn.MulQuant(quant_config=qconfig) >>> mul_quant = nn.MulQuant(quant_config=qconfig)
>>> input_x1 = Tensor(np.array([[1, 2, 1], [-2, 0, -1]]), mindspore.float32) >>> input_x1 = Tensor(np.array([[1, 2, 1], [-2, 0, -1]]), mindspore.float32)
>>> input_x2 = Tensor(np.ones((2, 3)) * 2, mindspore.float32) >>> input_x2 = Tensor(np.ones((2, 3)) * 2, mindspore.float32)
>>> result = mul_quant(input_x1, input_x2) >>> output = mul_quant(input_x1, input_x2)
>>> print(result) >>> print(output)
[[1.9764705, 4.0000005, 1.9764705], [-4., 0., -1.9764705]] [[ 1.9764705 4.0000005 1.9764705]
[-4. 0. -1.9764705]]
""" """
def __init__(self, def __init__(self,

@ -95,7 +95,8 @@ class L1Loss(_Loss):
>>> loss = nn.L1Loss() >>> loss = nn.L1Loss()
>>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32) >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)
>>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32) >>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32)
>>> loss(input_data, target_data) >>> output = loss(input_data, target_data)
>>> print(output)
0.33333334 0.33333334
""" """
def __init__(self, reduction='mean'): def __init__(self, reduction='mean'):
@ -183,7 +184,9 @@ class SmoothL1Loss(_Loss):
>>> loss = nn.SmoothL1Loss() >>> loss = nn.SmoothL1Loss()
>>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32) >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)
>>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32) >>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32)
>>> loss(input_data, target_data) >>> output = loss(input_data, target_data)
>>> print(output)
[0. 0. 0.5]
""" """
def __init__(self, beta=1.0): def __init__(self, beta=1.0):
super(SmoothL1Loss, self).__init__() super(SmoothL1Loss, self).__init__()
@ -236,7 +239,9 @@ class SoftmaxCrossEntropyWithLogits(_Loss):
>>> logits = Tensor(np.random.randint(0, 9, [1, 10]), mindspore.float32) >>> logits = Tensor(np.random.randint(0, 9, [1, 10]), mindspore.float32)
>>> labels_np = np.ones([1,]).astype(np.int32) >>> labels_np = np.ones([1,]).astype(np.int32)
>>> labels = Tensor(labels_np) >>> labels = Tensor(labels_np)
>>> loss(logits, labels) >>> output = loss(logits, labels)
>>> print(output)
[5.6924148]
""" """
def __init__(self, def __init__(self,
sparse=False, sparse=False,
@ -299,7 +304,7 @@ class SampledSoftmaxLoss(_Loss):
>>> labels = Tensor([0, 1, 2]) >>> labels = Tensor([0, 1, 2])
>>> inputs = Tensor(np.random.randint(0, 9, [3, 10]), mindspore.float32) >>> inputs = Tensor(np.random.randint(0, 9, [3, 10]), mindspore.float32)
>>> output = loss(weights, biases, labels, inputs) >>> output = loss(weights, biases, labels, inputs)
>>> print(output) # output is ranndom >>> print(output)
[ 4.0181947 46.050743 7.0009117] [ 4.0181947 46.050743 7.0009117]
""" """
@ -557,7 +562,7 @@ class CosineEmbeddingLoss(_Loss):
>>> cosine_embedding_loss = nn.CosineEmbeddingLoss() >>> cosine_embedding_loss = nn.CosineEmbeddingLoss()
>>> output = cosine_embedding_loss(x1, x2, y) >>> output = cosine_embedding_loss(x1, x2, y)
>>> print(output) >>> print(output)
[0.0003426671] [0.0003426075]
""" """
def __init__(self, margin=0.0, reduction="mean"): def __init__(self, margin=0.0, reduction="mean"):
super(CosineEmbeddingLoss, self).__init__(reduction) super(CosineEmbeddingLoss, self).__init__(reduction)

@ -39,7 +39,9 @@ class TopKCategoricalAccuracy(Metric):
>>> topk = nn.TopKCategoricalAccuracy(3) >>> topk = nn.TopKCategoricalAccuracy(3)
>>> topk.clear() >>> topk.clear()
>>> topk.update(x, y) >>> topk.update(x, y)
>>> result = topk.eval() >>> output = topk.eval()
>>> print(output)
0.6666666666666666
""" """
def __init__(self, k): def __init__(self, k):
super(TopKCategoricalAccuracy, self).__init__() super(TopKCategoricalAccuracy, self).__init__()
@ -103,7 +105,9 @@ class Top1CategoricalAccuracy(TopKCategoricalAccuracy):
>>> topk = nn.Top1CategoricalAccuracy() >>> topk = nn.Top1CategoricalAccuracy()
>>> topk.clear() >>> topk.clear()
>>> topk.update(x, y) >>> topk.update(x, y)
>>> result = topk.eval() >>> output = topk.eval()
>>> print(output)
0.0
""" """
def __init__(self): def __init__(self):
super(Top1CategoricalAccuracy, self).__init__(1) super(Top1CategoricalAccuracy, self).__init__(1)
@ -121,7 +125,9 @@ class Top5CategoricalAccuracy(TopKCategoricalAccuracy):
>>> topk = nn.Top5CategoricalAccuracy() >>> topk = nn.Top5CategoricalAccuracy()
>>> topk.clear() >>> topk.clear()
>>> topk.update(x, y) >>> topk.update(x, y)
>>> result = topk.eval() >>> output = topk.eval()
>>> print(output)
1.0
""" """
def __init__(self): def __init__(self):
super(Top5CategoricalAccuracy, self).__init__(5) super(Top5CategoricalAccuracy, self).__init__(5)

@ -45,6 +45,7 @@ class Exp(PowerTransform):
... ans2 = self.s1.inverse(value) ... ans2 = self.s1.inverse(value)
... ans3 = self.s1.forward_log_jacobian(value) ... ans3 = self.s1.forward_log_jacobian(value)
... ans4 = self.s1.inverse_log_jacobian(value) ... ans4 = self.s1.inverse_log_jacobian(value)
...
""" """
def __init__(self, def __init__(self,

@ -53,6 +53,7 @@ class GumbelCDF(Bijector):
... ans2 = self.gum.inverse(value) ... ans2 = self.gum.inverse(value)
... ans3 = self.gum.forward_log_jacobian(value) ... ans3 = self.gum.forward_log_jacobian(value)
... ans4 = self.gum.inverse_log_jacobian(value) ... ans4 = self.gum.inverse_log_jacobian(value)
...
""" """
def __init__(self, def __init__(self,

@ -57,6 +57,7 @@ class PowerTransform(Bijector):
... ans2 = self.s1.inverse(value) ... ans2 = self.s1.inverse(value)
... ans3 = self.s1.forward_log_jacobian(value) ... ans3 = self.s1.forward_log_jacobian(value)
... ans4 = self.s1.inverse_log_jacobian(value) ... ans4 = self.s1.inverse_log_jacobian(value)
...
""" """
def __init__(self, def __init__(self,

@ -53,6 +53,7 @@ class ScalarAffine(Bijector):
... ans2 = self.s1.inverse(value) ... ans2 = self.s1.inverse(value)
... ans3 = self.s1.forward_log_jacobian(value) ... ans3 = self.s1.forward_log_jacobian(value)
... ans4 = self.s1.inverse_log_jacobian(value) ... ans4 = self.s1.inverse_log_jacobian(value)
...
""" """
def __init__(self, def __init__(self,

@ -50,62 +50,63 @@ class Bernoulli(Distribution):
>>> >>>
>>> # To use the Bernoulli distribution in a network. >>> # To use the Bernoulli distribution in a network.
>>> class net(Cell): >>> class net(Cell):
>>> def __init__(self): ... def __init__(self):
>>> super(net, self).__init__(): ... super(net, self).__init__():
>>> self.b1 = msd.Bernoulli(0.5, dtype=mstype.int32) ... self.b1 = msd.Bernoulli(0.5, dtype=mstype.int32)
>>> self.b2 = msd.Bernoulli(dtype=mstype.int32) ... self.b2 = msd.Bernoulli(dtype=mstype.int32)
>>> ...
>>> # All the following calls in construct are valid. ... # All the following calls in construct are valid.
>>> def construct(self, value, probs_b, probs_a): ... def construct(self, value, probs_b, probs_a):
>>> ...
>>> # Private interfaces of probability functions corresponding to public interfaces, including ... # Private interfaces of probability functions corresponding to public interfaces, including
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows. ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows.
>>> # Args: ... # Args:
>>> # value (Tensor): the value to be evaluated. ... # value (Tensor): the value to be evaluated.
>>> # probs1 (Tensor): the probability of success. Default: self.probs. ... # probs1 (Tensor): the probability of success. Default: self.probs.
>>> ...
>>> # Examples of `prob`. ... # Examples of `prob`.
>>> # Similar calls can be made to other probability functions ... # Similar calls can be made to other probability functions
>>> # by replacing `prob` by the name of the function. ... # by replacing `prob` by the name of the function.
>>> ans = self.b1.prob(value) ... ans = self.b1.prob(value)
>>> # Evaluate `prob` with respect to distribution b. ... # Evaluate `prob` with respect to distribution b.
>>> ans = self.b1.prob(value, probs_b) ... ans = self.b1.prob(value, probs_b)
>>> # `probs` must be passed in during function calls. ... # `probs` must be passed in during function calls.
>>> ans = self.b2.prob(value, probs_a) ... ans = self.b2.prob(value, probs_a)
>>> ...
>>> ...
>>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. ... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
>>> # Args: ... # Args:
>>> # probs1 (Tensor): the probability of success. Default: self.probs. ... # probs1 (Tensor): the probability of success. Default: self.probs.
>>> ...
>>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar. ... # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
>>> ans = self.b1.mean() # return 0.5 ... ans = self.b1.mean() # return 0.5
>>> ans = self.b1.mean(probs_b) # return probs_b ... ans = self.b1.mean(probs_b) # return probs_b
>>> # `probs` must be passed in during function calls. ... # `probs` must be passed in during function calls.
>>> ans = self.b2.mean(probs_a) ... ans = self.b2.mean(probs_a)
>>> ...
>>> ...
>>> # Interfaces of `kl_loss` and `cross_entropy` are the same as follows: ... # Interfaces of `kl_loss` and `cross_entropy` are the same as follows:
>>> # Args: ... # Args:
>>> # dist (str): the name of the distribution. Only 'Bernoulli' is supported. ... # dist (str): the name of the distribution. Only 'Bernoulli' is supported.
>>> # probs1_b (Tensor): the probability of success of distribution b. ... # probs1_b (Tensor): the probability of success of distribution b.
>>> # probs1_a (Tensor): the probability of success of distribution a. Default: self.probs. ... # probs1_a (Tensor): the probability of success of distribution a. Default: self.probs.
>>> ...
>>> # Examples of kl_loss. `cross_entropy` is similar. ... # Examples of kl_loss. `cross_entropy` is similar.
>>> ans = self.b1.kl_loss('Bernoulli', probs_b) ... ans = self.b1.kl_loss('Bernoulli', probs_b)
>>> ans = self.b1.kl_loss('Bernoulli', probs_b, probs_a) ... ans = self.b1.kl_loss('Bernoulli', probs_b, probs_a)
>>> # An additional `probs_a` must be passed in. ... # An additional `probs_a` must be passed in.
>>> ans = self.b2.kl_loss('Bernoulli', probs_b, probs_a) ... ans = self.b2.kl_loss('Bernoulli', probs_b, probs_a)
>>> ...
>>> ...
>>> # Examples of `sample`. ... # Examples of `sample`.
>>> # Args: ... # Args:
>>> # shape (tuple): the shape of the sample. Default: (). ... # shape (tuple): the shape of the sample. Default: ().
>>> # probs1 (Tensor): the probability of success. Default: self.probs. ... # probs1 (Tensor): the probability of success. Default: self.probs.
>>> ans = self.b1.sample() ... ans = self.b1.sample()
>>> ans = self.b1.sample((2,3)) ... ans = self.b1.sample((2,3))
>>> ans = self.b1.sample((2,3), probs_b) ... ans = self.b1.sample((2,3), probs_b)
>>> ans = self.b2.sample((2,3), probs_a) ... ans = self.b2.sample((2,3), probs_a)
...
""" """
def __init__(self, def __init__(self,

@ -46,59 +46,60 @@ class Categorical(Distribution):
>>> >>>
>>> # To use a Categorical distribution in a network >>> # To use a Categorical distribution in a network
>>> class net(Cell): >>> class net(Cell):
>>> def __init__(self, probs): ... def __init__(self, probs):
>>> super(net, self).__init__(): ... super(net, self).__init__():
>>> self.ca = msd.Categorical(probs=[0.2, 0.8], dtype=mstype.int32) ... self.ca = msd.Categorical(probs=[0.2, 0.8], dtype=mstype.int32)
>>> self.ca1 = msd.Categorical(dtype=mstype.int32) ... self.ca1 = msd.Categorical(dtype=mstype.int32)
>>> ...
>>> # All the following calls in construct are valid ... # All the following calls in construct are valid
>>> def construct(self, value): ... def construct(self, value):
>>> ...
>>> # Private interfaces of probability functions corresponding to public interfaces, including ... # Private interfaces of probability functions corresponding to public interfaces, including
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows. ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows.
>>> # Args: ... # Args:
>>> # value (Tensor): the value to be evaluated. ... # value (Tensor): the value to be evaluated.
>>> # probs (Tensor): event probabilities. Default: self.probs. ... # probs (Tensor): event probabilities. Default: self.probs.
>>> ...
>>> # Examples of `prob`. ... # Examples of `prob`.
>>> # Similar calls can be made to other probability functions ... # Similar calls can be made to other probability functions
>>> # by replacing `prob` by the name of the function. ... # by replacing `prob` by the name of the function.
>>> ans = self.ca.prob(value) ... ans = self.ca.prob(value)
>>> # Evaluate `prob` with respect to distribution b. ... # Evaluate `prob` with respect to distribution b.
>>> ans = self.ca.prob(value, probs_b) ... ans = self.ca.prob(value, probs_b)
>>> # `probs` must be passed in during function calls. ... # `probs` must be passed in during function calls.
>>> ans = self.ca1.prob(value, probs_a) ... ans = self.ca1.prob(value, probs_a)
>>> ...
>>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. ... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
>>> # Args: ... # Args:
>>> # probs (Tensor): event probabilities. Default: self.probs. ... # probs (Tensor): event probabilities. Default: self.probs.
>>> ...
>>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar. ... # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
>>> ans = self.ca.mean() # return 0.8 ... ans = self.ca.mean() # return 0.8
>>> ans = self.ca.mean(probs_b) ... ans = self.ca.mean(probs_b)
>>> # `probs` must be passed in during function calls. ... # `probs` must be passed in during function calls.
>>> ans = self.ca1.mean(probs_a) ... ans = self.ca1.mean(probs_a)
>>> ...
>>> # Interfaces of `kl_loss` and `cross_entropy` are the same as follows: ... # Interfaces of `kl_loss` and `cross_entropy` are the same as follows:
>>> # Args: ... # Args:
>>> # dist (str): the name of the distribution. Only 'Categorical' is supported. ... # dist (str): the name of the distribution. Only 'Categorical' is supported.
>>> # probs_b (Tensor): event probabilities of distribution b. ... # probs_b (Tensor): event probabilities of distribution b.
>>> # probs (Tensor): event probabilities of distribution a. Default: self.probs. ... # probs (Tensor): event probabilities of distribution a. Default: self.probs.
>>> ...
>>> # Examples of kl_loss. `cross_entropy` is similar. ... # Examples of kl_loss. `cross_entropy` is similar.
>>> ans = self.ca.kl_loss('Categorical', probs_b) ... ans = self.ca.kl_loss('Categorical', probs_b)
>>> ans = self.ca.kl_loss('Categorical', probs_b, probs_a) ... ans = self.ca.kl_loss('Categorical', probs_b, probs_a)
>>> # An additional `probs` must be passed in. ... # An additional `probs` must be passed in.
>>> ans = self.ca1.kl_loss('Categorical', probs_b, probs_a) ... ans = self.ca1.kl_loss('Categorical', probs_b, probs_a)
>>> ...
>>> # Examples of `sample`. ... # Examples of `sample`.
>>> # Args: ... # Args:
>>> # shape (tuple): the shape of the sample. Default: (). ... # shape (tuple): the shape of the sample. Default: ().
>>> # probs (Tensor): event probabilities. Default: self.probs. ... # probs (Tensor): event probabilities. Default: self.probs.
>>> ans = self.ca.sample() ... ans = self.ca.sample()
>>> ans = self.ca.sample((2,3)) ... ans = self.ca.sample((2,3))
>>> ans = self.ca.sample((2,3), probs_b) ... ans = self.ca.sample((2,3), probs_b)
>>> ans = self.ca1.sample((2,3), probs_a) ... ans = self.ca1.sample((2,3), probs_a)
...
""" """
def __init__(self, def __init__(self,

@ -52,62 +52,63 @@ class Exponential(Distribution):
>>> >>>
>>> # To use an Exponential distribution in a network. >>> # To use an Exponential distribution in a network.
>>> class net(Cell): >>> class net(Cell):
>>> def __init__(self): ... def __init__(self):
>>> super(net, self).__init__(): ... super(net, self).__init__():
>>> self.e1 = msd.Exponential(0.5, dtype=mstype.float32) ... self.e1 = msd.Exponential(0.5, dtype=mstype.float32)
>>> self.e2 = msd.Exponential(dtype=mstype.float32) ... self.e2 = msd.Exponential(dtype=mstype.float32)
>>> ...
>>> # All the following calls in construct are valid. ... # All the following calls in construct are valid.
>>> def construct(self, value, rate_b, rate_a): ... def construct(self, value, rate_b, rate_a):
>>> ...
>>> # Private interfaces of probability functions corresponding to public interfaces, including ... # Private interfaces of probability functions corresponding to public interfaces, including
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows. ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows.
>>> # Args: ... # Args:
>>> # value (Tensor): the value to be evaluated. ... # value (Tensor): the value to be evaluated.
>>> # rate (Tensor): the rate of the distribution. Default: self.rate. ... # rate (Tensor): the rate of the distribution. Default: self.rate.
>>> ...
>>> # Examples of `prob`. ... # Examples of `prob`.
>>> # Similar calls can be made to other probability functions ... # Similar calls can be made to other probability functions
>>> # by replacing `prob` by the name of the function. ... # by replacing `prob` by the name of the function.
>>> ans = self.e1.prob(value) ... ans = self.e1.prob(value)
>>> # Evaluate with respect to distribution b. ... # Evaluate with respect to distribution b.
>>> ans = self.e1.prob(value, rate_b) ... ans = self.e1.prob(value, rate_b)
>>> # `rate` must be passed in during function calls. ... # `rate` must be passed in during function calls.
>>> ans = self.e2.prob(value, rate_a) ... ans = self.e2.prob(value, rate_a)
>>> ...
>>> ...
>>> # Functions `mean`, `sd`, 'var', and 'entropy' have the same arguments as follows. ... # Functions `mean`, `sd`, 'var', and 'entropy' have the same arguments as follows.
>>> # Args: ... # Args:
>>> # rate (Tensor): the rate of the distribution. Default: self.rate. ... # rate (Tensor): the rate of the distribution. Default: self.rate.
>>> ...
>>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar. ... # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
>>> ans = self.e1.mean() # return 2 ... ans = self.e1.mean() # return 2
>>> ans = self.e1.mean(rate_b) # return 1 / rate_b ... ans = self.e1.mean(rate_b) # return 1 / rate_b
>>> # `rate` must be passed in during function calls. ... # `rate` must be passed in during function calls.
>>> ans = self.e2.mean(rate_a) ... ans = self.e2.mean(rate_a)
>>> ...
>>> ...
>>> # Interfaces of `kl_loss` and `cross_entropy` are the same. ... # Interfaces of `kl_loss` and `cross_entropy` are the same.
>>> # Args: ... # Args:
>>> # dist (str): The name of the distribution. Only 'Exponential' is supported. ... # dist (str): The name of the distribution. Only 'Exponential' is supported.
>>> # rate_b (Tensor): the rate of distribution b. ... # rate_b (Tensor): the rate of distribution b.
>>> # rate_a (Tensor): the rate of distribution a. Default: self.rate. ... # rate_a (Tensor): the rate of distribution a. Default: self.rate.
>>> ...
>>> # Examples of `kl_loss`. `cross_entropy` is similar. ... # Examples of `kl_loss`. `cross_entropy` is similar.
>>> ans = self.e1.kl_loss('Exponential', rate_b) ... ans = self.e1.kl_loss('Exponential', rate_b)
>>> ans = self.e1.kl_loss('Exponential', rate_b, rate_a) ... ans = self.e1.kl_loss('Exponential', rate_b, rate_a)
>>> # An additional `rate` must be passed in. ... # An additional `rate` must be passed in.
>>> ans = self.e2.kl_loss('Exponential', rate_b, rate_a) ... ans = self.e2.kl_loss('Exponential', rate_b, rate_a)
>>> ...
>>> ...
>>> # Examples of `sample`. ... # Examples of `sample`.
>>> # Args: ... # Args:
>>> # shape (tuple): the shape of the sample. Default: () ... # shape (tuple): the shape of the sample. Default: ()
>>> # probs1 (Tensor): the rate of the distribution. Default: self.rate. ... # probs1 (Tensor): the rate of the distribution. Default: self.rate.
>>> ans = self.e1.sample() ... ans = self.e1.sample()
>>> ans = self.e1.sample((2,3)) ... ans = self.e1.sample((2,3))
>>> ans = self.e1.sample((2,3), rate_b) ... ans = self.e1.sample((2,3), rate_b)
>>> ans = self.e2.sample((2,3), rate_a) ... ans = self.e2.sample((2,3), rate_a)
...
""" """
def __init__(self, def __init__(self,

@ -53,62 +53,63 @@ class Geometric(Distribution):
>>> >>>
>>> # To use a Geometric distribution in a network. >>> # To use a Geometric distribution in a network.
>>> class net(Cell): >>> class net(Cell):
>>> def __init__(self): ... def __init__(self):
>>> super(net, self).__init__(): ... super(net, self).__init__():
>>> self.g1 = msd.Geometric(0.5, dtype=mstype.int32) ... self.g1 = msd.Geometric(0.5, dtype=mstype.int32)
>>> self.g2 = msd.Geometric(dtype=mstype.int32) ... self.g2 = msd.Geometric(dtype=mstype.int32)
>>> ...
>>> # The following calls are valid in construct. ... # The following calls are valid in construct.
>>> def construct(self, value, probs_b, probs_a): ... def construct(self, value, probs_b, probs_a):
>>> ...
>>> # Private interfaces of probability functions corresponding to public interfaces, including ... # Private interfaces of probability functions corresponding to public interfaces, including
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows. ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows.
>>> # Args: ... # Args:
>>> # value (Tensor): the value to be evaluated. ... # value (Tensor): the value to be evaluated.
>>> # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs. ... # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs.
>>> ...
>>> # Examples of `prob`. ... # Examples of `prob`.
>>> # Similar calls can be made to other probability functions ... # Similar calls can be made to other probability functions
>>> # by replacing `prob` by the name of the function. ... # by replacing `prob` by the name of the function.
>>> ans = self.g1.prob(value) ... ans = self.g1.prob(value)
>>> # Evaluate with respect to distribution b. ... # Evaluate with respect to distribution b.
>>> ans = self.g1.prob(value, probs_b) ... ans = self.g1.prob(value, probs_b)
>>> # `probs` must be passed in during function calls. ... # `probs` must be passed in during function calls.
>>> ans = self.g2.prob(value, probs_a) ... ans = self.g2.prob(value, probs_a)
>>> ...
>>> ...
>>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. ... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
>>> # Args: ... # Args:
>>> # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs. ... # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs.
>>> ...
>>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar. ... # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
>>> ans = self.g1.mean() # return 1.0 ... ans = self.g1.mean() # return 1.0
>>> ans = self.g1.mean(probs_b) ... ans = self.g1.mean(probs_b)
>>> # Probs must be passed in during function calls ... # Probs must be passed in during function calls
>>> ans = self.g2.mean(probs_a) ... ans = self.g2.mean(probs_a)
>>> ...
>>> ...
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same. ... # Interfaces of 'kl_loss' and 'cross_entropy' are the same.
>>> # Args: ... # Args:
>>> # dist (str): the name of the distribution. Only 'Geometric' is supported. ... # dist (str): the name of the distribution. Only 'Geometric' is supported.
>>> # probs1_b (Tensor): the probability of success of a Bernoulli trail of distribution b. ... # probs1_b (Tensor): the probability of success of a Bernoulli trail of distribution b.
>>> # probs1_a (Tensor): the probability of success of a Bernoulli trail of distribution a. Default: self.probs. ... # probs1_a (Tensor): the probability of success of a Bernoulli trail of distribution a. Default: self.probs.
>>> ...
>>> # Examples of `kl_loss`. `cross_entropy` is similar. ... # Examples of `kl_loss`. `cross_entropy` is similar.
>>> ans = self.g1.kl_loss('Geometric', probs_b) ... ans = self.g1.kl_loss('Geometric', probs_b)
>>> ans = self.g1.kl_loss('Geometric', probs_b, probs_a) ... ans = self.g1.kl_loss('Geometric', probs_b, probs_a)
>>> # An additional `probs` must be passed in. ... # An additional `probs` must be passed in.
>>> ans = self.g2.kl_loss('Geometric', probs_b, probs_a) ... ans = self.g2.kl_loss('Geometric', probs_b, probs_a)
>>> ...
>>> ...
>>> # Examples of `sample`. ... # Examples of `sample`.
>>> # Args: ... # Args:
>>> # shape (tuple): the shape of the sample. Default: () ... # shape (tuple): the shape of the sample. Default: ()
>>> # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs. ... # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs.
>>> ans = self.g1.sample() ... ans = self.g1.sample()
>>> ans = self.g1.sample((2,3)) ... ans = self.g1.sample((2,3))
>>> ans = self.g1.sample((2,3), probs_b) ... ans = self.g1.sample((2,3), probs_b)
>>> ans = self.g2.sample((2,3), probs_a) ... ans = self.g2.sample((2,3), probs_a)
...
""" """
def __init__(self, def __init__(self,

@ -50,47 +50,48 @@ class Gumbel(TransformedDistribution):
>>> >>>
>>> # To use a Gumbel distribution in a network. >>> # To use a Gumbel distribution in a network.
>>> class net(Cell): >>> class net(Cell):
>>> def __init__(self): ... def __init__(self):
>>> super(net, self).__init__(): ... super(net, self).__init__():
>>> self.g1 = msd.Gumbel(0.0, 1.0, dtype=mstype.float32) ... self.g1 = msd.Gumbel(0.0, 1.0, dtype=mstype.float32)
>>> ...
>>> # The following calls are valid in construct. ... # The following calls are valid in construct.
>>> def construct(self, value, loc_b, scale_b): ... def construct(self, value, loc_b, scale_b):
>>> ...
>>> # Private interfaces of probability functions corresponding to public interfaces, including ... # Private interfaces of probability functions corresponding to public interfaces, including
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same
>>> # arguments as follows. ... # arguments as follows.
>>> # Args: ... # Args:
>>> # value (Tensor): the value to be evaluated. ... # value (Tensor): the value to be evaluated.
>>> ...
>>> # Examples of `prob`. ... # Examples of `prob`.
>>> # Similar calls can be made to other probability functions ... # Similar calls can be made to other probability functions
>>> # by replacing 'prob' by the name of the function. ... # by replacing 'prob' by the name of the function.
>>> ans = self.g1.prob(value) ... ans = self.g1.prob(value)
>>> ...
>>> # Functions `mean`, `mode`, sd`, `var`, and `entropy` do not take in any argument. ... # Functions `mean`, `mode`, sd`, `var`, and `entropy` do not take in any argument.
>>> ans = self.g1.mean() ... ans = self.g1.mean()
>>> ans = self.g1.mode() ... ans = self.g1.mode()
>>> ans = self.g1.sd() ... ans = self.g1.sd()
>>> ans = self.g1.entropy() ... ans = self.g1.entropy()
>>> ans = self.g1.var() ... ans = self.g1.var()
>>> ...
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same: ... # Interfaces of 'kl_loss' and 'cross_entropy' are the same:
>>> # Args: ... # Args:
>>> # dist (str): the type of the distributions. Only "Gumbel" is supported. ... # dist (str): the type of the distributions. Only "Gumbel" is supported.
>>> # loc_b (Tensor): the loc of distribution b. ... # loc_b (Tensor): the loc of distribution b.
>>> # scale_b (Tensor): the scale distribution b. ... # scale_b (Tensor): the scale distribution b.
>>> ...
>>> # Examples of `kl_loss`. `cross_entropy` is similar. ... # Examples of `kl_loss`. `cross_entropy` is similar.
>>> ans = self.g1.kl_loss('Gumbel', loc_b, scale_b) ... ans = self.g1.kl_loss('Gumbel', loc_b, scale_b)
>>> ans = self.g1.cross_entropy('Gumbel', loc_b, scale_b) ... ans = self.g1.cross_entropy('Gumbel', loc_b, scale_b)
>>> ...
>>> # Examples of `sample`. ... # Examples of `sample`.
>>> # Args: ... # Args:
>>> # shape (tuple): the shape of the sample. Default: () ... # shape (tuple): the shape of the sample. Default: ()
>>> ...
>>> ans = self.g1.sample() ... ans = self.g1.sample()
>>> ans = self.g1.sample((2,3)) ... ans = self.g1.sample((2,3))
...
""" """
def __init__(self, def __init__(self,

@ -53,75 +53,76 @@ class LogNormal(msd.TransformedDistribution):
>>> >>>
>>> # To use a LogNormal distribution in a network. >>> # To use a LogNormal distribution in a network.
>>> class net(Cell): >>> class net(Cell):
>>> def __init__(self): ... def __init__(self):
>>> super(net, self).__init__(): ... super(net, self).__init__():
>>> self.n1 = msd.LogNormal(0.0, 1.0, dtype=mstype.float32) ... self.n1 = msd.LogNormal(0.0, 1.0, dtype=mstype.float32)
>>> self.n2 = msd.LogNormal(dtype=mstype.float32) ... self.n2 = msd.LogNormal(dtype=mstype.float32)
>>> ...
>>> # The following calls are valid in construct. ... # The following calls are valid in construct.
>>> def construct(self, value, loc_b, scale_b, loc_a, scale_a): ... def construct(self, value, loc_b, scale_b, loc_a, scale_a):
>>> ...
>>> # Private interfaces of probability functions corresponding to public interfaces, including ... # Private interfaces of probability functions corresponding to public interfaces, including
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same
>>> # arguments as follows. ... # arguments as follows.
>>> # Args: ... # Args:
>>> # value (Tensor): the value to be evaluated. ... # value (Tensor): the value to be evaluated.
>>> # loc (Tensor): the loc of distribution. Default: None. If `loc` is passed in as None, ... # loc (Tensor): the loc of distribution. Default: None. If `loc` is passed in as None,
>>> # the mean of the underlying Normal distribution will be used. ... # the mean of the underlying Normal distribution will be used.
>>> # scale (Tensor): the scale of distribution. Default: None. If `scale` is passed in as None, ... # scale (Tensor): the scale of distribution. Default: None. If `scale` is passed in as None,
>>> # the standard deviation of the underlying Normal distribution will be used. ... # the standard deviation of the underlying Normal distribution will be used.
>>> ...
>>> # Examples of `prob`. ... # Examples of `prob`.
>>> # Similar calls can be made to other probability functions ... # Similar calls can be made to other probability functions
>>> # by replacing 'prob' by the name of the function. ... # by replacing 'prob' by the name of the function.
>>> ans = self.n1.prob(value) ... ans = self.n1.prob(value)
>>> # Evaluate with respect to distribution b. ... # Evaluate with respect to distribution b.
>>> ans = self.n1.prob(value, loc_b, scale_b) ... ans = self.n1.prob(value, loc_b, scale_b)
>>> # `loc` and `scale` must be passed in during function calls since they were not passed in construct. ... # `loc` and `scale` must be passed in during function calls since they were not passed in construct.
>>> ans = self.n2.prob(value, loc_a, scale_a) ... ans = self.n2.prob(value, loc_a, scale_a)
>>> ...
>>> ...
>>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. ... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
>>> # Args: ... # Args:
>>> # loc (Tensor): the loc of distribution. Default: None. If `loc` is passed in as None, ... # loc (Tensor): the loc of distribution. Default: None. If `loc` is passed in as None,
>>> # the mean of the underlying Normal distribution will be used. ... # the mean of the underlying Normal distribution will be used.
>>> # scale (Tensor): the scale of distribution. Default: None. If `scale` is passed in as None, ... # scale (Tensor): the scale of distribution. Default: None. If `scale` is passed in as None,
>>> # the standard deviation of the underlying Normal distribution will be used. ... # the standard deviation of the underlying Normal distribution will be used.
>>> ...
>>> # Example of `mean`. `sd`, `var`, and `entropy` are similar. ... # Example of `mean`. `sd`, `var`, and `entropy` are similar.
>>> ans = self.n1.mean() # return 0.0 ... ans = self.n1.mean() # return 0.0
>>> ans = self.n1.mean(loc_b, scale_b) # return mean_b ... ans = self.n1.mean(loc_b, scale_b) # return mean_b
>>> # `loc` and `scale` must be passed in during function calls since they were not passed in construct. ... # `loc` and `scale` must be passed in during function calls since they were not passed in construct.
>>> ans = self.n2.mean(loc_a, scale_a) ... ans = self.n2.mean(loc_a, scale_a)
>>> ...
>>> ...
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same: ... # Interfaces of 'kl_loss' and 'cross_entropy' are the same:
>>> # Args: ... # Args:
>>> # dist (str): the type of the distributions. Only "Normal" is supported. ... # dist (str): the type of the distributions. Only "Normal" is supported.
>>> # loc_b (Tensor): the loc of distribution b. ... # loc_b (Tensor): the loc of distribution b.
>>> # scale_b (Tensor): the scale distribution b. ... # scale_b (Tensor): the scale distribution b.
>>> # loc_a (Tensor): the loc of distribution a. Default: None. If `loc` is passed in as None, ... # loc_a (Tensor): the loc of distribution a. Default: None. If `loc` is passed in as None,
>>> # the mean of the underlying Normal distribution will be used. ... # the mean of the underlying Normal distribution will be used.
>>> # scale_a (Tensor): the scale distribution a. Default: None. If `scale` is passed in as None, ... # scale_a (Tensor): the scale distribution a. Default: None. If `scale` is passed in as None,
>>> # the standard deviation of the underlying Normal distribution will be used. ... # the standard deviation of the underlying Normal distribution will be used.
>>> ...
>>> # Examples of `kl_loss`. `cross_entropy` is similar. ... # Examples of `kl_loss`. `cross_entropy` is similar.
>>> ans = self.n1.kl_loss('Normal', loc_b, scale_b) ... ans = self.n1.kl_loss('Normal', loc_b, scale_b)
>>> ans = self.n1.kl_loss('Normal', loc_b, scale_b, loc_a, scale_a) ... ans = self.n1.kl_loss('Normal', loc_b, scale_b, loc_a, scale_a)
>>> # Additional `loc` and `scale` must be passed in since they were not passed in construct. ... # Additional `loc` and `scale` must be passed in since they were not passed in construct.
>>> ans = self.n2.kl_loss('Normal', loc_b, scale_b, loc_a, scale_a) ... ans = self.n2.kl_loss('Normal', loc_b, scale_b, loc_a, scale_a)
>>> ...
>>> # Examples of `sample`. ... # Examples of `sample`.
>>> # Args: ... # Args:
>>> # shape (tuple): the shape of the sample. Default: () ... # shape (tuple): the shape of the sample. Default: ()
>>> # loc (Tensor): the loc of the distribution. Default: None. If `loc` is passed in as None, ... # loc (Tensor): the loc of the distribution. Default: None. If `loc` is passed in as None,
>>> # the mean of the underlying Normal distribution will be used. ... # the mean of the underlying Normal distribution will be used.
>>> # scale (Tensor): the scale of the distribution. Default: None. If `scale` is passed in as None, ... # scale (Tensor): the scale of the distribution. Default: None. If `scale` is passed in as None,
>>> # the standard deviation of the underlying Normal distribution will be used. ... # the standard deviation of the underlying Normal distribution will be used.
>>> ans = self.n1.sample() ... ans = self.n1.sample()
>>> ans = self.n1.sample((2,3)) ... ans = self.n1.sample((2,3))
>>> ans = self.n1.sample((2,3), loc_b, scale_b) ... ans = self.n1.sample((2,3), loc_b, scale_b)
>>> ans = self.n2.sample((2,3), loc_a, scale_a) ... ans = self.n2.sample((2,3), loc_a, scale_a)
...
""" """
def __init__(self, def __init__(self,

@ -53,50 +53,51 @@ class Logistic(Distribution):
>>> >>>
>>> # To use a Normal distribution in a network. >>> # To use a Normal distribution in a network.
>>> class net(Cell): >>> class net(Cell):
>>> def __init__(self): ... def __init__(self):
>>> super(net, self).__init__(): ... super(net, self).__init__():
>>> self.l1 = msd.Logistic(0.0, 1.0, dtype=mstype.float32) ... self.l1 = msd.Logistic(0.0, 1.0, dtype=mstype.float32)
>>> self.l2 = msd.Logistic(dtype=mstype.float32) ... self.l2 = msd.Logistic(dtype=mstype.float32)
>>> ...
>>> # The following calls are valid in construct. ... # The following calls are valid in construct.
>>> def construct(self, value, loc_b, scale_b, loc_a, scale_a): ... def construct(self, value, loc_b, scale_b, loc_a, scale_a):
>>> ...
>>> # Private interfaces of probability functions corresponding to public interfaces, including ... # Private interfaces of probability functions corresponding to public interfaces, including
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows. ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows.
>>> # Args: ... # Args:
>>> # value (Tensor): the value to be evaluated. ... # value (Tensor): the value to be evaluated.
>>> # loc (Tensor): the location of the distribution. Default: self.loc. ... # loc (Tensor): the location of the distribution. Default: self.loc.
>>> # scale (Tensor): the scale of the distribution. Default: self.scale. ... # scale (Tensor): the scale of the distribution. Default: self.scale.
>>> ...
>>> # Examples of `prob`. ... # Examples of `prob`.
>>> # Similar calls can be made to other probability functions ... # Similar calls can be made to other probability functions
>>> # by replacing 'prob' by the name of the function ... # by replacing 'prob' by the name of the function
>>> ans = self.l1.prob(value) ... ans = self.l1.prob(value)
>>> # Evaluate with respect to distribution b. ... # Evaluate with respect to distribution b.
>>> ans = self.l1.prob(value, loc_b, scale_b) ... ans = self.l1.prob(value, loc_b, scale_b)
>>> # `loc` and `scale` must be passed in during function calls ... # `loc` and `scale` must be passed in during function calls
>>> ans = self.l2.prob(value, loc_a, scale_a) ... ans = self.l2.prob(value, loc_a, scale_a)
>>> ...
>>> # Functions `mean`, `mode`, `sd`, `var`, and `entropy` have the same arguments. ... # Functions `mean`, `mode`, `sd`, `var`, and `entropy` have the same arguments.
>>> # Args: ... # Args:
>>> # loc (Tensor): the location of the distribution. Default: self.loc. ... # loc (Tensor): the location of the distribution. Default: self.loc.
>>> # scale (Tensor): the scale of the distribution. Default: self.scale. ... # scale (Tensor): the scale of the distribution. Default: self.scale.
>>> ...
>>> # Example of `mean`. `mode`, `sd`, `var`, and `entropy` are similar. ... # Example of `mean`. `mode`, `sd`, `var`, and `entropy` are similar.
>>> ans = self.l1.mean() # return 0.0 ... ans = self.l1.mean() # return 0.0
>>> ans = self.l1.mean(loc_b, scale_b) # return loc_b ... ans = self.l1.mean(loc_b, scale_b) # return loc_b
>>> # `loc` and `scale` must be passed in during function calls. ... # `loc` and `scale` must be passed in during function calls.
>>> ans = self.l2.mean(loc_a, scale_a) ... ans = self.l2.mean(loc_a, scale_a)
>>> ...
>>> # Examples of `sample`. ... # Examples of `sample`.
>>> # Args: ... # Args:
>>> # shape (tuple): the shape of the sample. Default: () ... # shape (tuple): the shape of the sample. Default: ()
>>> # loc (Tensor): the location of the distribution. Default: self.loc. ... # loc (Tensor): the location of the distribution. Default: self.loc.
>>> # scale (Tensor): the scale of the distribution. Default: self.scale. ... # scale (Tensor): the scale of the distribution. Default: self.scale.
>>> ans = self.l1.sample() ... ans = self.l1.sample()
>>> ans = self.l1.sample((2,3)) ... ans = self.l1.sample((2,3))
>>> ans = self.l1.sample((2,3), loc_b, scale_b) ... ans = self.l1.sample((2,3), loc_b, scale_b)
>>> ans = self.l2.sample((2,3), loc_a, scale_a) ... ans = self.l2.sample((2,3), loc_a, scale_a)
...
""" """
def __init__(self, def __init__(self,

@ -53,66 +53,67 @@ class Normal(Distribution):
>>> >>>
>>> # To use a Normal distribution in a network. >>> # To use a Normal distribution in a network.
>>> class net(Cell): >>> class net(Cell):
>>> def __init__(self): ... def __init__(self):
>>> super(net, self).__init__(): ... super(net, self).__init__():
>>> self.n1 = msd.Nomral(0.0, 1.0, dtype=mstype.float32) ... self.n1 = msd.Nomral(0.0, 1.0, dtype=mstype.float32)
>>> self.n2 = msd.Normal(dtype=mstype.float32) ... self.n2 = msd.Normal(dtype=mstype.float32)
>>> ...
>>> # The following calls are valid in construct. ... # The following calls are valid in construct.
>>> def construct(self, value, mean_b, sd_b, mean_a, sd_a): ... def construct(self, value, mean_b, sd_b, mean_a, sd_a):
>>> ...
>>> # Private interfaces of probability functions corresponding to public interfaces, including ... # Private interfaces of probability functions corresponding to public interfaces, including
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows. ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows.
>>> # Args: ... # Args:
>>> # value (Tensor): the value to be evaluated. ... # value (Tensor): the value to be evaluated.
>>> # mean (Tensor): the mean of distribution. Default: self._mean_value. ... # mean (Tensor): the mean of distribution. Default: self._mean_value.
>>> # sd (Tensor): the standard deviation of distribution. Default: self._sd_value. ... # sd (Tensor): the standard deviation of distribution. Default: self._sd_value.
>>> ...
>>> # Examples of `prob`. ... # Examples of `prob`.
>>> # Similar calls can be made to other probability functions ... # Similar calls can be made to other probability functions
>>> # by replacing 'prob' by the name of the function ... # by replacing 'prob' by the name of the function
>>> ans = self.n1.prob(value) ... ans = self.n1.prob(value)
>>> # Evaluate with respect to distribution b. ... # Evaluate with respect to distribution b.
>>> ans = self.n1.prob(value, mean_b, sd_b) ... ans = self.n1.prob(value, mean_b, sd_b)
>>> # `mean` and `sd` must be passed in during function calls ... # `mean` and `sd` must be passed in during function calls
>>> ans = self.n2.prob(value, mean_a, sd_a) ... ans = self.n2.prob(value, mean_a, sd_a)
>>> ...
>>> ...
>>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. ... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
>>> # Args: ... # Args:
>>> # mean (Tensor): the mean of distribution. Default: self._mean_value. ... # mean (Tensor): the mean of distribution. Default: self._mean_value.
>>> # sd (Tensor): the standard deviation of distribution. Default: self._sd_value. ... # sd (Tensor): the standard deviation of distribution. Default: self._sd_value.
>>> ...
>>> # Example of `mean`. `sd`, `var`, and `entropy` are similar. ... # Example of `mean`. `sd`, `var`, and `entropy` are similar.
>>> ans = self.n1.mean() # return 0.0 ... ans = self.n1.mean() # return 0.0
>>> ans = self.n1.mean(mean_b, sd_b) # return mean_b ... ans = self.n1.mean(mean_b, sd_b) # return mean_b
>>> # `mean` and `sd` must be passed in during function calls. ... # `mean` and `sd` must be passed in during function calls.
>>> ans = self.n2.mean(mean_a, sd_a) ... ans = self.n2.mean(mean_a, sd_a)
>>> ...
>>> ...
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same: ... # Interfaces of 'kl_loss' and 'cross_entropy' are the same:
>>> # Args: ... # Args:
>>> # dist (str): the type of the distributions. Only "Normal" is supported. ... # dist (str): the type of the distributions. Only "Normal" is supported.
>>> # mean_b (Tensor): the mean of distribution b. ... # mean_b (Tensor): the mean of distribution b.
>>> # sd_b (Tensor): the standard deviation distribution b. ... # sd_b (Tensor): the standard deviation distribution b.
>>> # mean_a (Tensor): the mean of distribution a. Default: self._mean_value. ... # mean_a (Tensor): the mean of distribution a. Default: self._mean_value.
>>> # sd_a (Tensor): the standard deviation distribution a. Default: self._sd_value. ... # sd_a (Tensor): the standard deviation distribution a. Default: self._sd_value.
>>> ...
>>> # Examples of `kl_loss`. `cross_entropy` is similar. ... # Examples of `kl_loss`. `cross_entropy` is similar.
>>> ans = self.n1.kl_loss('Normal', mean_b, sd_b) ... ans = self.n1.kl_loss('Normal', mean_b, sd_b)
>>> ans = self.n1.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a) ... ans = self.n1.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a)
>>> # Additional `mean` and `sd` must be passed in. ... # Additional `mean` and `sd` must be passed in.
>>> ans = self.n2.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a) ... ans = self.n2.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a)
>>> ...
>>> # Examples of `sample`. ... # Examples of `sample`.
>>> # Args: ... # Args:
>>> # shape (tuple): the shape of the sample. Default: () ... # shape (tuple): the shape of the sample. Default: ()
>>> # mean (Tensor): the mean of the distribution. Default: self._mean_value. ... # mean (Tensor): the mean of the distribution. Default: self._mean_value.
>>> # sd (Tensor): the standard deviation of the distribution. Default: self._sd_value. ... # sd (Tensor): the standard deviation of the distribution. Default: self._sd_value.
>>> ans = self.n1.sample() ... ans = self.n1.sample()
>>> ans = self.n1.sample((2,3)) ... ans = self.n1.sample((2,3))
>>> ans = self.n1.sample((2,3), mean_b, sd_b) ... ans = self.n1.sample((2,3), mean_b, sd_b)
>>> ans = self.n2.sample((2,3), mean_a, sd_a) ... ans = self.n2.sample((2,3), mean_a, sd_a)
...
""" """
def __init__(self, def __init__(self,

@ -54,19 +54,20 @@ class TransformedDistribution(Distribution):
>>> import mindspore.nn.probability.distribution as msd >>> import mindspore.nn.probability.distribution as msd
>>> import mindspore.nn.probability.bijector as msb >>> import mindspore.nn.probability.bijector as msb
>>> ln = msd.TransformedDistribution(msb.Exp(), >>> ln = msd.TransformedDistribution(msb.Exp(),
>>> msd.Normal(0.0, 1.0, dtype=mstype.float32)) ... msd.Normal(0.0, 1.0, dtype=mstype.float32))
>>> ...
>>> # To use a transformed distribution in a network. >>> # To use a transformed distribution in a network.
>>> class net(Cell): >>> class net(Cell):
>>> def __init__(self): ... def __init__(self):
>>> super(net, self).__init__(): ... super(net, self).__init__():
>>> self.ln = msd.TransformedDistribution(msb.Exp(), ... self.ln = msd.TransformedDistribution(msb.Exp(),
>>> msd.Normal(0.0, 1.0, dtype=mstype.float32)) ... msd.Normal(0.0, 1.0, dtype=mstype.float32))
>>> ...
>>> def construct(self, value): ... def construct(self, value):
>>> # Similar calls can be made to other functions ... # Similar calls can be made to other functions
>>> # by replacing 'sample' by the name of the function. ... # by replacing 'sample' by the name of the function.
>>> ans = self.ln.sample(shape=(2, 3)) ... ans = self.ln.sample(shape=(2, 3))
...
""" """
def __init__(self, def __init__(self,

@ -52,66 +52,67 @@ class Uniform(Distribution):
>>> >>>
>>> # To use a Uniform distribution in a network. >>> # To use a Uniform distribution in a network.
>>> class net(Cell): >>> class net(Cell):
>>> def __init__(self) ... def __init__(self)
>>> super(net, self).__init__(): ... super(net, self).__init__():
>>> self.u1 = msd.Uniform(0.0, 1.0, dtype=mstype.float32) ... self.u1 = msd.Uniform(0.0, 1.0, dtype=mstype.float32)
>>> self.u2 = msd.Uniform(dtype=mstype.float32) ... self.u2 = msd.Uniform(dtype=mstype.float32)
>>> ...
>>> # All the following calls in construct are valid. ... # All the following calls in construct are valid.
>>> def construct(self, value, low_b, high_b, low_a, high_a): ... def construct(self, value, low_b, high_b, low_a, high_a):
>>> ...
>>> # Private interfaces of probability functions corresponding to public interfaces, including ... # Private interfaces of probability functions corresponding to public interfaces, including
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments. ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments.
>>> # Args: ... # Args:
>>> # value (Tensor): the value to be evaluated. ... # value (Tensor): the value to be evaluated.
>>> # low (Tensor): the lower bound of distribution. Default: self.low. ... # low (Tensor): the lower bound of distribution. Default: self.low.
>>> # high (Tensor): the higher bound of distribution. Default: self.high. ... # high (Tensor): the higher bound of distribution. Default: self.high.
>>> ...
>>> # Examples of `prob`. ... # Examples of `prob`.
>>> # Similar calls can be made to other probability functions ... # Similar calls can be made to other probability functions
>>> # by replacing 'prob' by the name of the function. ... # by replacing 'prob' by the name of the function.
>>> ans = self.u1.prob(value) ... ans = self.u1.prob(value)
>>> # Evaluate with respect to distribution b. ... # Evaluate with respect to distribution b.
>>> ans = self.u1.prob(value, low_b, high_b) ... ans = self.u1.prob(value, low_b, high_b)
>>> # `high` and `low` must be passed in during function calls. ... # `high` and `low` must be passed in during function calls.
>>> ans = self.u2.prob(value, low_a, high_a) ... ans = self.u2.prob(value, low_a, high_a)
>>> ...
>>> ...
>>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. ... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
>>> # Args: ... # Args:
>>> # low (Tensor): the lower bound of distribution. Default: self.low. ... # low (Tensor): the lower bound of distribution. Default: self.low.
>>> # high (Tensor): the higher bound of distribution. Default: self.high. ... # high (Tensor): the higher bound of distribution. Default: self.high.
>>> ...
>>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar. ... # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
>>> ans = self.u1.mean() # return 0.5 ... ans = self.u1.mean() # return 0.5
>>> ans = self.u1.mean(low_b, high_b) # return (low_b + high_b) / 2 ... ans = self.u1.mean(low_b, high_b) # return (low_b + high_b) / 2
>>> # `high` and `low` must be passed in during function calls. ... # `high` and `low` must be passed in during function calls.
>>> ans = self.u2.mean(low_a, high_a) ... ans = self.u2.mean(low_a, high_a)
>>> ...
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same. ... # Interfaces of 'kl_loss' and 'cross_entropy' are the same.
>>> # Args: ... # Args:
>>> # dist (str): the type of the distributions. Should be "Uniform" in this case. ... # dist (str): the type of the distributions. Should be "Uniform" in this case.
>>> # low_b (Tensor): the lower bound of distribution b. ... # low_b (Tensor): the lower bound of distribution b.
>>> # high_b (Tensor): the upper bound of distribution b. ... # high_b (Tensor): the upper bound of distribution b.
>>> # low_a (Tensor): the lower bound of distribution a. Default: self.low. ... # low_a (Tensor): the lower bound of distribution a. Default: self.low.
>>> # high_a (Tensor): the upper bound of distribution a. Default: self.high. ... # high_a (Tensor): the upper bound of distribution a. Default: self.high.
>>> ...
>>> # Examples of `kl_loss`. `cross_entropy` is similar. ... # Examples of `kl_loss`. `cross_entropy` is similar.
>>> ans = self.u1.kl_loss('Uniform', low_b, high_b) ... ans = self.u1.kl_loss('Uniform', low_b, high_b)
>>> ans = self.u1.kl_loss('Uniform', low_b, high_b, low_a, high_a) ... ans = self.u1.kl_loss('Uniform', low_b, high_b, low_a, high_a)
>>> # Additional `high` and `low` must be passed in. ... # Additional `high` and `low` must be passed in.
>>> ans = self.u2.kl_loss('Uniform', low_b, high_b, low_a, high_a) ... ans = self.u2.kl_loss('Uniform', low_b, high_b, low_a, high_a)
>>> ...
>>> ...
>>> # Examples of `sample`. ... # Examples of `sample`.
>>> # Args: ... # Args:
>>> # shape (tuple): the shape of the sample. Default: () ... # shape (tuple): the shape of the sample. Default: ()
>>> # low (Tensor): the lower bound of the distribution. Default: self.low. ... # low (Tensor): the lower bound of the distribution. Default: self.low.
>>> # high (Tensor): the upper bound of the distribution. Default: self.high. ... # high (Tensor): the upper bound of the distribution. Default: self.high.
>>> ans = self.u1.sample() ... ans = self.u1.sample()
>>> ans = self.u1.sample((2,3)) ... ans = self.u1.sample((2,3))
>>> ans = self.u1.sample((2,3), low_b, high_b) ... ans = self.u1.sample((2,3), low_b, high_b)
>>> ans = self.u2.sample((2,3), low_a, high_a) ... ans = self.u2.sample((2,3), low_a, high_a)
...
""" """
def __init__(self, def __init__(self,

@ -31,14 +31,14 @@ class SparseToDense(Cell):
Examples: Examples:
>>> class SparseToDenseCell(nn.Cell): >>> class SparseToDenseCell(nn.Cell):
>>> def __init__(self, dense_shape): ... def __init__(self, dense_shape):
>>> super(SparseToDenseCell, self).__init__() ... super(SparseToDenseCell, self).__init__()
>>> self.dense_shape = dense_shape ... self.dense_shape = dense_shape
>>> self.sparse_to_dense = nn.SparseToDense() ... self.sparse_to_dense = nn.SparseToDense()
>>> def construct(self, indices, values): ... def construct(self, indices, values):
>>> sparse = SparseTensor(indices, values, self.dense_shape) ... sparse = SparseTensor(indices, values, self.dense_shape)
>>> return self.sparse_to_dense(sparse) ... return self.sparse_to_dense(sparse)
>>> ...
>>> indices = Tensor([[0, 1], [1, 2]]) >>> indices = Tensor([[0, 1], [1, 2]])
>>> values = Tensor([1, 2], dtype=ms.float32) >>> values = Tensor([1, 2], dtype=ms.float32)
>>> dense_shape = (3, 4) >>> dense_shape = (3, 4)

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save