fix some errors in docs of probability

pull/14169/head
zhangxinfeng3 4 years ago
parent 669a37739e
commit defb589076

@ -43,7 +43,7 @@ class Invert(Bijector):
... return self.invert.forward(x_)
>>> forward = Net()
>>> x = np.array([2.0, 3.0, 4.0, 5.0]).astype(np.float32)
>>> ans = forward(Tensor(x, dtype=dtype.float32))
>>> ans = forward(Tensor(x, dtype=mindspore.float32))
"""
def __init__(self,

@ -52,13 +52,13 @@ class Gumbel(TransformedDistribution):
>>> class Prob(nn.Cell):
... def __init__(self):
... super(Prob, self).__init__()
... self.gum = msd.Gumbel(np.array([0.0]), np.array([[1.0], [2.0]]), dtype=dtype.float32)
... self.gum = msd.Gumbel(np.array([0.0]), np.array([[1.0], [2.0]]), dtype=mindspore.float32)
...
... def construct(self, x_):
... return self.gum.prob(x_)
>>> value = np.array([1.0, 2.0]).astype(np.float32)
>>> pdf = Prob()
>>> output = pdf(Tensor(value, dtype=dtype.float32))
>>> output = pdf(Tensor(value, dtype=mindspore.float32))
"""
def __init__(self,

@ -48,14 +48,14 @@ class LogNormal(msd.TransformedDistribution):
>>> import mindspore.nn as nn
>>> import mindspore.nn.probability.distribution as msd
>>> from mindspore import Tensor
... class Prob(nn.Cell):
... def __init__(self):
... super(Prob, self).__init__()
... self.ln = msd.LogNormal(np.array([0.3]), np.array([[0.2], [0.4]]), dtype=dtype.float32)
... def construct(self, x_):
... return self.ln.prob(x_)
>>> class Prob(nn.Cell):
... def __init__(self):
... super(Prob, self).__init__()
... self.ln = msd.LogNormal(np.array([0.3]), np.array([[0.2], [0.4]]), dtype=mindspore.float32)
... def construct(self, x_):
... return self.ln.prob(x_)
>>> pdf = Prob()
>>> output = pdf(Tensor([1.0, 2.0], dtype=dtype.float32))
>>> output = pdf(Tensor([1.0, 2.0], dtype=mindspore.float32))
"""
def __init__(self,

@ -35,7 +35,7 @@ class Poisson(Distribution):
name (str): The name of the distribution. Default: 'Poisson'.
Supported Platforms:
``Ascend`` ``GPU``
``Ascend``
Note:
`rate` must be strictly greater than 0.
@ -82,14 +82,14 @@ class Poisson(Distribution):
>>> # Examples of `mean`, `sd`, `mode`, and `var` are similar.
>>> ans = p1.mean() # return 2
>>> print(ans.shape)
()
(1,)
>>> ans = p1.mean(rate_b) # return 1 / rate_b
>>> print(ans.shape)
(3,)
>>> # `rate` must be passed in during function calls.
>>> ans = p2.mean(rate_a)
>>> print(ans.shape)
()
(1,)
>>> # Examples of `sample`.
>>> # Args:
>>> # shape (tuple): the shape of the sample. Default: ()

@ -58,7 +58,7 @@ class TransformedDistribution(Distribution):
>>> import mindspore.nn.probability.bijector as msb
>>> from mindspore import Tensor
>>> class Net(nn.Cell):
... def __init__(self, shape, dtype=dtype.float32, seed=0, name='transformed_distribution'):
... def __init__(self, shape, dtype=mindspore.float32, seed=0, name='transformed_distribution'):
... super(Net, self).__init__()
... # create TransformedDistribution distribution
... self.exp = msb.Exp()
@ -73,7 +73,7 @@ class TransformedDistribution(Distribution):
>>> shape = (2, 3)
>>> net = Net(shape=shape, name="LogNormal")
>>> x = np.array([2.0, 3.0, 4.0, 5.0]).astype(np.float32)
>>> tx = Tensor(x, dtype=dtype.float32)
>>> tx = Tensor(x, dtype=mindspore.float32)
>>> cdf, sample = net(tx)
"""

Loading…
Cancel
Save