!10418 Changed the docs in distribution and bijector classes

From: @shallydeng
Reviewed-by: @zichun_ye,@wang_zi_dong
Signed-off-by: @zichun_ye
pull/10418/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit 6414f4b961

@ -41,19 +41,26 @@ class Exp(PowerTransform):
>>> exp_bijector = nn.probability.bijector.Exp()
>>> value = Tensor([1, 2, 3], dtype=mindspore.float32)
>>> ans1 = exp_bijector.forward(value)
[ 2.7182817 7.389056 20.085537 ]
>>> print(ans1)
>>> print(ans1.shape)
(3,)
>>> ans2 = exp_bijector.inverse(value)
[0. 0.6931472 1.0986123]
>>> print(ans2)
>>> print(ans2.shape)
(3,)
>>> ans3 = exp_bijector.forward_log_jacobian(value)
>>> print(ans3)
[1. 2. 3.]
>>> print(ans3.shape)
(3,)
>>> ans4 = exp_bijector.inverse_log_jacobian(value)
>>> print(ans4)
[-0. -0.6931472 -1.0986123]
>>> print(ans4.shape)
(3,)
"""
def __init__(self,
name='Exp'):
super(Exp, self).__init__(name=name)
def extend_repr(self):
if self.is_scalar_batch:
str_info = 'exp'
else:
str_info = f'batch_shape = {self.batch_shape}'
return str_info

@ -58,17 +58,17 @@ class GumbelCDF(Bijector):
>>> x = Tensor([1, 2, 3], dtype=mindspore.float32)
>>> y = Tensor([0.1, 0.2, 0.3], dtype=mindspore.float32)
>>> ans1 = gumbel_cdf.forward(x)
>>> print(ans1)
[0.36787945 0.5452392 0.69220066]
>>> print(ans1.shape)
(3,)
>>> ans2 = gumbel_cdf.inverse(y)
>>> print(ans2)
[-0.66806495 0.04822993 0.62874645]
>>> print(ans2.shape)
(3,)
>>> ans3 = gumbel_cdf.forward_log_jacobian(x)
>>> print(ans3)
[-1.6931472 -1.7996778 -2.0610266]
>>> print(ans3.shape)
(3,)
>>> ans4 = gumbel_cdf.inverse_log_jacobian(y)
>>> print(ans4)
[2.1616998 1.8267001 1.7114931]
>>> print(ans4.shape)
(3,)
"""
def __init__(self,

@ -40,17 +40,17 @@ class Invert(Bijector):
>>> inv_exp = msb.Invert(msb.Exp())
>>> value = Tensor([1, 2, 3], dtype=mindspore.float32)
>>> ans1 = inv_exp.forward(value)
>>> print(ans1)
[0. 0.6931472 1.0986123]
>>> print(ans1.shape)
(3,)
>>> ans2 = inv_exp.inverse(value)
>>> print(ans2)
[ 2.718282 7.389056 20.085537]
>>> print(ans2.shape)
(3,)
>>> ans3 = inv_exp.forward_log_jacobian(value)
>>> print(ans3)
[-0. -0.6931472 -1.0986123]
>>> print(ans3.shape)
(3,)
>>> ans4 = inv_exp.inverse_log_jacobian(value)
>>> print(ans4)
[1. 2. 3.]
>>> print(ans4.shape)
(3,)
"""
def __init__(self,

@ -56,17 +56,17 @@ class PowerTransform(Bijector):
>>> powertransform = msb.PowerTransform(0.5)
>>> value = Tensor([1, 2, 3], dtype=mindspore.float32)
>>> ans1 = powertransform.forward(value)
>>> print(ans1)
[2.25 4. 6.25]
>>> print(ans1.shape)
(3,)
>>> ans2 = powertransform.inverse(value)
>>> print(ans2)
[0. 0.82842714 1.4641017 ]
>>> print(ans2.shape)
(3,)
>>> ans3 = powertransform.forward_log_jacobian(value)
>>> print(ans3)
[0.40546513 0.6931472 0.91629076]
>>> print(ans3.shape)
(3,)
>>> ans4 = powertransform.inverse_log_jacobian(value)
>>> print(ans4)
[-0. -0.3465736 -0.54930615]
>>> print(ans4.shape)
(3,)
"""
def __init__(self,

@ -54,18 +54,17 @@ class ScalarAffine(Bijector):
>>> scalaraffine = nn.probability.bijector.ScalarAffine(1.0, 2.0)
>>> value = Tensor([1, 2, 3], dtype=mindspore.float32)
>>> ans1 = scalaraffine.forward(value)
>>> print(ans1)
[3. 4. 5.]
>>> print(ans1.shape)
(3,)
>>> ans2 = scalaraffine.inverse(value)
>>> print(ans2)
[-1. 0. 1.]
>>> print(ans2.shape)
(3,)
>>> ans3 = scalaraffine.forward_log_jacobian(value)
>>> print(ans3)
0.0
>>> print(ans3.shape)
()
>>> ans4 = scalaraffine.inverse_log_jacobian(value)
>>> print(ans4)
0.0
...
>>> print(ans4.shape)
()
"""
def __init__(self,

@ -53,17 +53,17 @@ class Softplus(Bijector):
>>> # To use a ScalarAffine bijector in a network.
>>> value = Tensor([1, 2, 3], dtype=mindspore.float32)
>>> ans1 = softplus.forward(value)
>>> print(ans1)
[1.063464 2.009075 3.0012379]
>>> print(ans1.shape)
(3,)
>>> ans2 = softplus.inverse(value)
>>> print(ans2)
[0.9272933 1.9907573 2.998759 ]
>>> print(ans2.shape)
(3,)
>>> ans3 = softplus.forward_log_jacobian(value)
>>> print(ans3)
[-0.12692806 -0.01814996 -0.00247564]
>>> print(ans3.shape)
(3,)
>>> ans4 = softplus.inverse_log_jacobian(value)
>>> print(ans4)
[0.1454134 0.01848531 0.00248194]
>>> print(ans4.shape)
(3,)
"""
def __init__(self,

@ -64,30 +64,30 @@ class Bernoulli(Distribution):
>>> # Similar calls can be made to other probability functions
>>> # by replacing `prob` by the name of the function.
>>> ans = b1.prob(value)
>>> print(ans)
[0.5 0.5 0.5]
>>> print(ans.shape)
(3,)
>>> # Evaluate `prob` with respect to distribution b.
>>> ans = b1.prob(value, probs_b)
>>> print(ans)
[0.2 0.7 0.4]
>>> print(ans.shape)
(3,)
>>> # `probs` must be passed in during function calls.
>>> ans = b2.prob(value, probs_a)
>>> print(ans)
[0.6 0.4 0.6]
>>> print(ans.shape)
(3,)
>>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
>>> # Args:
>>> # probs1 (Tensor): the probability of success. Default: self.probs.
>>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
>>> ans = b1.mean() # return 0.5
>>> print(ans)
0.5
>>> print(ans.shape)
()
>>> ans = b1.mean(probs_b) # return probs_b
>>> print(ans)
[0.2 0.5 0.4]
>>> print(ans.shape)
(3,)
>>> # `probs` must be passed in during function calls.
>>> ans = b2.mean(probs_a)
[0.6]
>>> print(ans)
(1,)
>>> print(ans.shape)
>>> # Interfaces of `kl_loss` and `cross_entropy` are the same as follows:
>>> # Args:
>>> # dist (str): the name of the distribution. Only 'Bernoulli' is supported.
@ -95,15 +95,15 @@ class Bernoulli(Distribution):
>>> # probs1_a (Tensor): the probability of success of distribution a. Default: self.probs.
>>> # Examples of kl_loss. `cross_entropy` is similar.
>>> ans = b1.kl_loss('Bernoulli', probs_b)
>>> print(ans)
[0.22314356 0. 0.02041098]
>>> print(ans.shape)
(3,)
>>> ans = b1.kl_loss('Bernoulli', probs_b, probs_a)
>>> print(ans)
[0.38190854 0.02013553 0.08109301]
>>> print(ans.shape)
(3,)
>>> # An additional `probs_a` must be passed in.
>>> ans = b2.kl_loss('Bernoulli', probs_b, probs_a)
>>> print(ans)
[0.38190854 0.02013553 0.08109301]
>>> print(ans.shape)
(3,)
>>> # Examples of `sample`.
>>> # Args:
>>> # shape (tuple): the shape of the sample. Default: ().

@ -71,31 +71,31 @@ class Beta(Distribution):
>>> # Similar calls can be made to other probability functions
>>> # by replacing 'prob' by the name of the function
>>> ans = b1.prob(value)
>>> print(ans)
[0.43740022 1.8750011 0.30720013]
>>> print(ans.shape)
(3,)
>>> # Evaluate with respect to the distribution b.
>>> ans = b1.prob(value, concentration1_b, concentration0_b)
>>> print(ans)
[0.99999964 1.0606599 0.39999983]
>>> print(ans.shape)
(3,)
>>> # `concentration1` and `concentration0` must be passed in during function calls
>>> ans = b2.prob(value, concentration1_a, concentration0_a)
>>> print(ans)
[0.5400001 1.5000001 0.96000004]
>>> print(ans.shape)
(3,)
>>> # Functions `mean`, `sd`, `mode`, `var`, and `entropy` have the same arguments.
>>> # Args:
>>> # concentration1 (Tensor): the concentration1 of the distribution. Default: self._concentration1.
>>> # concentration0 (Tensor): the concentration0 of the distribution. Default: self._concentration0.
>>> # Example of `mean`, `sd`, `mode`, `var`, and `entropy` are similar.
>>> ans = b1.mean()
>>> print(ans)
[0.42857143]
>>> print(ans.shape)
(1,)
>>> ans = b1.mean(concentration1_b, concentration0_b)
>>> print(ans)
[0.5 0.4 0.33333334]
>>> print(ans.shape)
(3,)
>>> # `concentration1` and `concentration0` must be passed in during function calls.
>>> ans = b2.mean(concentration1_a, concentration0_a)
>>> print(ans)
[0.5 0.5 0.5]
>>> print(ans.shape)
(3,)
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same:
>>> # Args:
>>> # dist (str): the type of the distributions. Only "Beta" is supported.
@ -107,15 +107,15 @@ class Beta(Distribution):
>>> # Default: self._concentration0.
>>> # Examples of `kl_loss`. `cross_entropy` is similar.
>>> ans = b1.kl_loss('Beta', concentration1_b, concentration0_b)
>>> print(ans)
[0.34434414 0.24721336 0.26786423]
>>> print(ans.shape)
(3,)
>>> ans = b1.kl_loss('Beta', concentration1_b, concentration0_b, concentration1_a, concentration0_a)
>>> print(ans)
[0.12509346 0.13629508 0.26527953]
>>> print(ans.shape)
(3,)
>>> # Additional `concentration1` and `concentration0` must be passed in.
>>> ans = b2.kl_loss('Beta', concentration1_b, concentration0_b, concentration1_a, concentration0_a)
>>> print(ans)
[0.12509346 0.13629508 0.26527953]
>>> print(ans.shape)
(3,)
>>> # Examples of `sample`.
>>> # Args:
>>> # shape (tuple): the shape of the sample. Default: ()

@ -65,30 +65,30 @@ class Categorical(Distribution):
>>> # Similar calls can be made to other probability functions
>>> # by replacing `prob` by the name of the function.
>>> ans = ca1.prob(value)
>>> print(ans)
[0.8 0.2]
>>> print(ans.shape)
(2,)
>>> # Evaluate `prob` with respect to distribution b.
>>> ans = ca1.prob(value, probs_b)
>>> print(ans)
[0.65 0.35]
>>> print(ans.shape)
(2,)
>>> # `probs` must be passed in during function calls.
>>> ans = ca2.prob(value, probs_a)
>>> print(ans)
[0.5 0.5]
>>> print(ans.shape)
(2,)
>>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
>>> # Args:
>>> # probs (Tensor): event probabilities. Default: self.probs.
>>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
>>> ans = ca1.mean() # return 0.8
>>> print(ans)
[0.8]
>>> print(ans.shape)
(1,)
>>> ans = ca1.mean(probs_b)
>>> print(ans)
[0.65]
>>> print(ans.shape)
(1,)
>>> # `probs` must be passed in during function calls.
>>> ans = ca2.mean(probs_a)
>>> print(ans)
[0.5]
>>> print(ans.shape)
(1,)
>>> # Interfaces of `kl_loss` and `cross_entropy` are the same as follows:
>>> # Args:
>>> # dist (str): the name of the distribution. Only 'Categorical' is supported.
@ -96,15 +96,15 @@ class Categorical(Distribution):
>>> # probs (Tensor): event probabilities of distribution a. Default: self.probs.
>>> # Examples of kl_loss. `cross_entropy` is similar.
>>> ans = ca1.kl_loss('Categorical', probs_b)
>>> print(ans)
0.05418826
>>> print(ans.shape)
()
>>> ans = ca1.kl_loss('Categorical', probs_b, probs_a)
>>> print(ans)
0.04715523
>>> print(ans.shape)
()
>>> # An additional `probs` must be passed in.
>>> ans = ca2.kl_loss('Categorical', probs_b, probs_a)
>>> print(ans)
0.04715523
>>> print(ans.shape)
()
>>> # Examples of `sample`.
>>> # Args:
>>> # shape (tuple): the shape of the sample. Default: ().

@ -69,31 +69,31 @@ class Cauchy(Distribution):
>>> # Similar calls can be made to other probability functions
>>> # by replacing 'prob' by the name of the function
>>> ans = cauchy1.prob(value)
>>> print(ans)
[0.06366198 0.07489645 0.07957748]
>>> print(ans.shape)
(3,)
>>> # Evaluate with respect to distribution b.
>>> ans = cauchy1.prob(value, loc_b, scale_b)
>>> print(ans)
[0.31830987 0.14691226 0.07957747]
>>> print(ans.shape)
(3,)
>>> # `loc` and `scale` must be passed in during function calls
>>> ans = cauchy2.prob(value, loc_a, scale_a)
>>> print(ans)
[0.12732396 0.15915494 0.12732396]
>>> print(ans.shape)
(3,)
>>> # Functions `mode` and `entropy` have the same arguments.
>>> # Args:
>>> # loc (Tensor): the location of the distribution. Default: self.loc.
>>> # scale (Tensor): the scale of the distribution. Default: self.scale.
>>> # Example of `mode`.
>>> ans = cauchy1.mode() # return 3.0
>>> print(ans)
3.0
>>> print(ans.shape)
()
>>> ans = cauchy1.mode(loc_b, scale_b) # return loc_b
>>> print(ans)
[1. 1. 1.]
>>> print(ans.shape)
(3,)
>>> # `loc` and `scale` must be passed in during function calls.
>>> ans = cauchy2.mode(loc_a, scale_a)
>>> print(ans)
[2. 2. 2.]
>>> print(ans.shape)
(3,)
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same:
>>> # Args:
>>> # dist (str): the type of the distributions. Only "Cauchy" is supported.
@ -103,15 +103,15 @@ class Cauchy(Distribution):
>>> # scale (Tensor): the scale distribution a. Default: self.scale.
>>> # Examples of `kl_loss`. `cross_entropy` is similar.
>>> ans = cauchy1.kl_loss('Cauchy', loc_b, scale_b)
>>> print(ans)
[0.594707 0.35563278 0.22314358]
>>> print(ans.shape)
(3,)
>>> ans = cauchy1.kl_loss('Cauchy', loc_b, scale_b, loc_a, scale_a)
>>> print(ans)
[0.22314358 0.09909081 0.0606246 ]
>>> print(ans.shape)
(3,)
>>> # Additional `loc` and `scale` must be passed in.
>>> ans = cauchy2.kl_loss('Cauchy', loc_b, scale_b, loc_a, scale_a)
>>> print(ans)
[0.22314358 0.09909081 0.0606246 ]
>>> print(ans.shape)
(3,)
>>> # Examples of `sample`.
>>> # Args:
>>> # shape (tuple): the shape of the sample. Default: ()

@ -66,30 +66,30 @@ class Exponential(Distribution):
>>> # Similar calls can be made to other probability functions
>>> # by replacing `prob` by the name of the function.
>>> ans = e1.prob(value)
>>> print(ans)
[0.30326533 0.18393973 0.11156508]
>>> print(ans.shape)
(3,)
>>> # Evaluate with respect to distribution b.
>>> ans = e1.prob(value, rate_b)
>>> print(ans)
[0.16374615 0.18393973 0.12047768]
>>> print(ans.shape)
(3,)
>>> # `rate` must be passed in during function calls.
>>> ans = e2.prob(value, rate_a)
>>> print(ans)
[0.329287 0.18071651 0.09917933]
>>> print(ans.shape)
(3,)
>>> # Functions `mean`, `sd`, 'var', and 'entropy' have the same arguments as follows.
>>> # Args:
>>> # rate (Tensor): the rate of the distribution. Default: self.rate.
>>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
>>> ans = e1.mean() # return 2
>>> print(ans)
2.0
>>> print(ans.shape)
()
>>> ans = e1.mean(rate_b) # return 1 / rate_b
>>> print(ans)
[5. 2. 2.5]
>>> print(ans.shape)
(3,)
>>> # `rate` must be passed in during function calls.
>>> ans = e2.mean(rate_a)
>>> print(ans)
[1.6666666]
>>> print(ans.shape)
(1,)
>>> # Interfaces of `kl_loss` and `cross_entropy` are the same.
>>> # Args:
>>> # dist (str): The name of the distribution. Only 'Exponential' is supported.
@ -97,15 +97,15 @@ class Exponential(Distribution):
>>> # rate_a (Tensor): the rate of distribution a. Default: self.rate.
>>> # Examples of `kl_loss`. `cross_entropy` is similar.
>>> ans = e1.kl_loss('Exponential', rate_b)
>>> print(ans)
[0.31629074 0. 0.02314353]
>>> print(ans.shape)
(3,)
>>> ans = e1.kl_loss('Exponential', rate_b, rate_a)
>>> print(ans)
[0.43194556 0.01565492 0.07213175]
>>> print(ans.shape)
(3,)
>>> # An additional `rate` must be passed in.
>>> ans = e2.kl_loss('Exponential', rate_b, rate_a)
>>> print(ans)
[0.43194556 0.01565492 0.07213175]
>>> print(ans.shape)
(3,)
>>> # Examples of `sample`.
>>> # Args:
>>> # shape (tuple): the shape of the sample. Default: ()

@ -72,31 +72,31 @@ class Gamma(Distribution):
>>> # Similar calls can be made to other probability functions
>>> # by replacing 'prob' by the name of the function
>>> ans = g1.prob(value)
>>> print(ans)
[0.58610016 0.0429392 0.00176953]
>>> print(ans.shape)
(3,)
>>> # Evaluate with respect to the distribution b.
>>> ans = g1.prob(value, concentration_b, rate_b)
>>> print(ans)
[0.3678793 0.07468057 0.0049575 ]
>>> print(ans.shape)
(3,)
>>> # `concentration` and `rate` must be passed in during function calls for g2.
>>> ans = g2.prob(value, concentration_a, rate_a)
>>> print(ans)
[0.54134095 0.14652506 0.02974501]
>>> print(ans.shape)
(3,)
>>> # Functions `mean`, `sd`, `mode`, `var`, and `entropy` have the same arguments.
>>> # Args:
>>> # concentration (Tensor): the concentration of the distribution. Default: self._concentration.
>>> # rate (Tensor): the rate of the distribution. Default: self._rate.
>>> # Example of `mean`, `sd`, `mode`, `var`, and `entropy` are similar.
>>> ans = g1.mean()
>>> print(ans)
[0.75]
>>> print(ans.shape)
(1,)
>>> ans = g1.mean(concentration_b, rate_b)
>>> print(ans)
[1. 0.6666667 0.5 ]
>>> print(ans.shape)
(3,)
>>> # `concentration` and `rate` must be passed in during function calls.
>>> ans = g2.mean(concentration_a, rate_a)
>>> print(ans)
[1. 1. 1.]
>>> print(ans.shape)
(3,)
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same:
>>> # Args:
>>> # dist (str): the type of the distributions. Only "Gamma" is supported.
@ -106,15 +106,15 @@ class Gamma(Distribution):
>>> # rate_a (Tensor): the rate of distribution a. Default: self._rate.
>>> # Examples of `kl_loss`. `cross_entropy` is similar.
>>> ans = g1.kl_loss('Gamma', concentration_b, rate_b)
>>> print(ans)
[0.28871584 0.2582507 0.34556866]
>>> print(ans.shape)
(3,)
>>> ans = g1.kl_loss('Gamma', concentration_b, rate_b, concentration_a, rate_a)
>>> print(ans)
[0.11593175 0.21046662 0.42278457]
>>> print(ans.shape)
(3,)
>>> # Additional `concentration` and `rate` must be passed in.
>>> ans = g2.kl_loss('Gamma', concentration_b, rate_b, concentration_a, rate_a)
>>> print(ans)
[0.11593175 0.21046662 0.42278457]
>>> print(ans.shape)
(3,)
>>> # Examples of `sample`.
>>> # Args:
>>> # shape (tuple): the shape of the sample. Default: ()

@ -67,30 +67,30 @@ class Geometric(Distribution):
>>> # Similar calls can be made to other probability functions
>>> # by replacing `prob` by the name of the function.
>>> ans = g1.prob(value)
>>> print(ans)
[0.25 0.5 0.25]
>>> print(ans.shape)
(3,)
>>> # Evaluate with respect to distribution b.
>>> ans = g1.prob(value, probs_b)
>>> print(ans)
[0.16 0.5 0.24]
>>> print(ans.shape)
(3,)
>>> # `probs` must be passed in during function calls.
>>> ans = g2.prob(value, probs_a)
>>> print(ans)
[0.24 0.6 0.24]
>>> print(ans.shape)
(3,)
>>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
>>> # Args:
>>> # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs.
>>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
>>> ans = g1.mean() # return 1.0
1.0
>>> print(ans)
>>> print(ans.shape)
()
>>> ans = g1.mean(probs_b)
>>> print(ans)
[4. 1. 1.5]
>>> print(ans.shape)
(3,)
>>> # Probs must be passed in during function calls
>>> ans = g2.mean(probs_a)
>>> print(ans)
[0.6666666]
>>> print(ans.shape)
(1,)
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same.
>>> # Args:
>>> # dist (str): the name of the distribution. Only 'Geometric' is supported.
@ -98,15 +98,15 @@ class Geometric(Distribution):
>>> # probs1_a (Tensor): the probability of success of a Bernoulli trail of distribution a. Default: self.probs.
>>> # Examples of `kl_loss`. `cross_entropy` is similar.
>>> ans = g1.kl_loss('Geometric', probs_b)
>>> print(ans)
[0.44628713 0. 0.04082197]
>>> print(ans.shape)
(3,)
>>> ans = g1.kl_loss('Geometric', probs_b, probs_a)
>>> print(ans)
[0.6365142 0.0335592 0.13515502]
>>> print(ans.shape)
(3,)
>>> # An additional `probs` must be passed in.
>>> ans = g2.kl_loss('Geometric', probs_b, probs_a)
>>> print(ans)
[0.6365142 0.0335592 0.13515502]
>>> print(ans.shape)
(3,)
>>> # Examples of `sample`.
>>> # Args:
>>> # shape (tuple): the shape of the sample. Default: ()

@ -29,8 +29,8 @@ class Gumbel(TransformedDistribution):
Gumbel distribution.
Args:
loc (int, float, list, numpy.ndarray, Tensor): The location of Gumbel distribution.
scale (int, float, list, numpy.ndarray, Tensor): The scale of Gumbel distribution.
loc (float, list, numpy.ndarray, Tensor): The location of Gumbel distribution.
scale (float, list, numpy.ndarray, Tensor): The scale of Gumbel distribution.
seed (int): the seed used in sampling. The global seed is used if it is None. Default: None.
dtype (mindspore.dtype): type of the distribution. Default: mstype.float32.
name (str): the name of the distribution. Default: 'Gumbel'.
@ -63,12 +63,12 @@ class Gumbel(TransformedDistribution):
>>> # by replacing 'prob' by the name of the function.
>>> value = Tensor([1.0, 2.0, 3.0], dtype=mindspore.float32)
>>> ans = gumbel.prob(value)
>>> print(ans)
[0.07926048 0.08889319 0.09196986]
>>> print(ans.shape)
(3,)
>>> # Functions `mean`, `mode`, sd`, `var`, and `entropy` do not take in any argument.
>>> ans = gumbel.mean()
>>> print(ans)
5.3088627
>>> print(ans.shape)
()
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same:
>>> # Args:
>>> # dist (str): the type of the distributions. Only "Gumbel" is supported.
@ -78,8 +78,8 @@ class Gumbel(TransformedDistribution):
>>> loc_b = Tensor([1.0], dtype=mindspore.float32)
>>> scale_b = Tensor([1.0, 1.5, 2.0], dtype=mindspore.float32)
>>> ans = gumbel.kl_loss('Gumbel', loc_b, scale_b)
>>> print(ans)
[ 2.5934026 0.03880269 -0.38017237]
>>> print(ans.shape)
(3,)
>>> # Examples of `sample`.
>>> # Args:
>>> # shape (tuple): the shape of the sample. Default: ()

@ -76,16 +76,16 @@ class LogNormal(msd.TransformedDistribution):
>>> # Similar calls can be made to other probability functions
>>> # by replacing 'prob' by the name of the function.
>>> ans = n1.prob(value)
>>> print(ans)
[0.07528435 0.04222769 0.02969363]
>>> print(ans.shape)
(3,)
>>> # Evaluate with respect to distribution b.
>>> ans = n1.prob(value, loc_b, scale_b)
>>> print(ans)
[0.24197072 0.13022715 0.0664096 ]
>>> print(ans.shape)
(3,)
>>> # `loc` and `scale` must be passed in during function calls since they were not passed in construct.
>>> ans = n2.prob(value, loc_a, scale_a)
>>> print(ans)
[0.12098535 0.08056299 0.06006904]
>>> print(ans.shape)
(3,)
>>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
>>> # Args:
>>> # loc (Tensor): the loc of distribution. Default: None. If `loc` is passed in as None,
@ -94,15 +94,15 @@ class LogNormal(msd.TransformedDistribution):
>>> # the standard deviation of the underlying Normal distribution will be used.
>>> # Example of `mean`. `sd`, `var`, and `entropy` are similar.
>>> ans = n1.mean()
>>> print(ans)
59874.14
>>> print(ans.shape)
()
>>> ans = n1.mean(loc_b, scale_b)
>>> print(ans)
[ 4.481689 8.372897 20.085537]
>>> print(ans.shape)
(3,)
>>> # `loc` and `scale` must be passed in during function calls since they were not passed in construct.
>>> ans = n2.mean(loc_a, scale_a)
>>> print(ans)
[54.59815 54.59815 54.59815]
>>> print(ans.shape)
(3,)
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same:
>>> # Args:
>>> # dist (str): the type of the distributions. Only "Normal" is supported.
@ -114,15 +114,15 @@ class LogNormal(msd.TransformedDistribution):
>>> # the standard deviation of the underlying Normal distribution will be used.
>>> # Examples of `kl_loss`. `cross_entropy` is similar.
>>> ans = n1.kl_loss('LogNormal', loc_b, scale_b)
>>> print(ans)
[8.113706 2.963615 1.3068528]
>>> print(ans.shape)
(3,)
>>> ans = n1.kl_loss('LogNormal', loc_b, scale_b, loc_a, scale_a)
>>> print(ans)
[1.3068528 0.32342905 0.125 ]
>>> print(ans.shape)
(3,)
>>> # Additional `loc` and `scale` must be passed in since they were not passed in construct.
>>> ans = n2.kl_loss('LogNormal', loc_b, scale_b, loc_a, scale_a)
>>> print(ans)
[1.3068528 0.32342905 0.125 ]
>>> print(ans.shape)
(3,)
>>> # Examples of `sample`.
>>> # Args:
>>> # shape (tuple): the shape of the sample. Default: ()

@ -70,31 +70,31 @@ class Logistic(Distribution):
>>> # Similar calls can be made to other probability functions
>>> # by replacing 'prob' by the name of the function
>>> ans = l1.prob(value)
>>> print(ans)
[0.05875093 0.06153352 0.0625 ]
>>> print(ans.shape)
(3,)
>>> # Evaluate with respect to distribution b.
>>> ans = l1.prob(value, loc_b, scale_b)
>>> print(ans)
[0.25 0.14943825 0.09830598]
>>> print(ans.shape)
(3,)
>>> # `loc` and `scale` must be passed in during function calls
>>> ans = l1.prob(value, loc_a, scale_a)
>>> print(ans)
[0.11750185 0.125 0.11750185]
>>> print(ans.shape)
(3,)
>>> # Functions `mean`, `mode`, `sd`, `var`, and `entropy` have the same arguments.
>>> # Args:
>>> # loc (Tensor): the location of the distribution. Default: self.loc.
>>> # scale (Tensor): the scale of the distribution. Default: self.scale.
>>> # Example of `mean`. `mode`, `sd`, `var`, and `entropy` are similar.
>>> ans = l1.mean()
>>> print(ans)
3.0
>>> print(ans.shape)
()
>>> ans = l1.mean(loc_b, scale_b)
>>> print(ans)
[1. 1. 1.]
>>> print(ans.shape)
(3,)
>>> # `loc` and `scale` must be passed in during function calls.
>>> ans = l1.mean(loc_a, scale_a)
>>> print(ans)
[2. 2. 2.]
>>> print(ans.shape)
(3,)
>>> # Examples of `sample`.
>>> # Args:
>>> # shape (tuple): the shape of the sample. Default: ()

@ -70,31 +70,31 @@ class Normal(Distribution):
>>> # Similar calls can be made to other probability functions
>>> # by replacing 'prob' by the name of the function
>>> ans = n1.prob(value)
>>> print(ans)
[0.08801632 0.09666702 0.09973556]
>>> print(ans.shape)
(3,)
>>> # Evaluate with respect to the distribution b.
>>> ans = n1.prob(value, mean_b, sd_b)
>>> print(ans)
[0.3989423 0.21296532 0.12098535]
>>> print(ans.shape)
(3,)
>>> # `mean` and `sd` must be passed in during function calls
>>> ans = n2.prob(value, mean_a, sd_a)
>>> print(ans)
[0.17603266 0.19947115 0.17603266]
>>> print(ans.shape)
(3,)
>>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
>>> # Args:
>>> # mean (Tensor): the mean of the distribution. Default: self._mean_value.
>>> # sd (Tensor): the standard deviation of the distribution. Default: self._sd_value.
>>> # Example of `mean`. `sd`, `var`, and `entropy` are similar.
>>> ans = n1.mean() # return 0.0
>>> print(ans)
3.0
>>> print(ans.shape)
()
>>> ans = n1.mean(mean_b, sd_b) # return mean_b
>>> print(ans)
[1. 1. 1.]
>>> print(ans.shape)
(3,)
>>> # `mean` and `sd` must be passed in during function calls.
>>> ans = n2.mean(mean_a, sd_a)
>>> print(ans)
[2. 2. 2.]
>>> print(ans.shape)
(3,)
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same:
>>> # Args:
>>> # dist (str): the type of the distributions. Only "Normal" is supported.
@ -104,15 +104,15 @@ class Normal(Distribution):
>>> # sd_a (Tensor): the standard deviation of distribution a. Default: self._sd_value.
>>> # Examples of `kl_loss`. `cross_entropy` is similar.
>>> ans = n1.kl_loss('Normal', mean_b, sd_b)
>>> print(ans)
[8.113706 2.963615 1.3068528]
>>> print(ans.shape)
(3,)
>>> ans = n1.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a)
>>> print(ans)
[1.3068528 0.32342905 0.125 ]
>>> print(ans.shape)
(3,)
>>> # Additional `mean` and `sd` must be passed in.
>>> ans = n2.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a)
>>> print(ans)
[1.3068528 0.32342905 0.125 ]
>>> print(ans.shape)
(3,)
>>> # Examples of `sample`.
>>> # Args:
>>> # shape (tuple): the shape of the sample. Default: ()

@ -66,30 +66,30 @@ class Poisson(Distribution):
>>> # Similar calls can be made to other probability functions
>>> # by replacing `prob` by the name of the function.
>>> ans = p1.prob(value)
>>> print(ans)
[0.3032652 0.0758163 0.01263604]
>>> print(ans.shape)
(3,)
>>> # Evaluate with respect to distribution b.
>>> ans = p1.prob(value, rate_b)
>>> print(ans)
[0.16374607 0.0758163 0.00715008]
>>> print(ans.shape)
(3,)
>>> # `rate` must be passed in during function calls.
>>> ans = p2.prob(value, rate_a)
>>> print(ans)
[0.32928684 0.09878606 0.01975721]
>>> print(ans.shape)
(3,)
>>> # Functions `mean`, `mode`, `sd`, and 'var' have the same arguments as follows.
>>> # Args:
>>> # rate (Tensor): the rate of the distribution. Default: self.rate.
>>> # Examples of `mean`, `sd`, `mode`, and `var` are similar.
>>> ans = p1.mean() # return 2
>>> print(ans)
0.5
>>> print(ans.shape)
()
>>> ans = p1.mean(rate_b) # return 1 / rate_b
>>> print(ans)
[0.2 0.5 0.4]
>>> print(ans.shape)
(3,)
>>> # `rate` must be passed in during function calls.
>>> ans = p2.mean(rate_a)
>>> print(ans)
[0.6]
>>> print(ans.shape)
(1,)
>>> # Examples of `sample`.
>>> # Args:
>>> # shape (tuple): the shape of the sample. Default: ()

@ -67,8 +67,8 @@ class TransformedDistribution(Distribution):
>>>
>>> value = Tensor([1.0, 2.0, 3.0], dtype=mindspore.float32)
>>> prob = trans_dist.prob(value)
>>> print(prob)
[0.3989423 0.15687403 0.07272825]
>>> print(prob.shape)
(3,)
>>> sample = trans_dist.sample(shape=(2, 3))
>>> print(sample.shape)
(2, 3)

@ -71,31 +71,31 @@ class Uniform(Distribution):
>>> # Similar calls can be made to other probability functions
>>> # by replacing 'prob' by the name of the function.
>>> ans = u1.prob(value)
>>> print(ans)
[1. 1.]
>>> print(ans.shape)
(2,)
>>> # Evaluate with respect to distribution b.
>>> ans = u1.prob(value, low_b, high_b)
>>> print(ans)
[0.25 0.15384614]
>>> print(ans.shape)
(2,)
>>> # `high` and `low` must be passed in during function calls.
>>> ans = u2.prob(value, low_a, high_a)
>>> print(ans)
[0.5 0.25]
>>> print(ans.shape)
(2,)
>>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
>>> # Args:
>>> # low (Tensor): the lower bound of the distribution. Default: self.low.
>>> # high (Tensor): the higher bound of the distribution. Default: self.high.
>>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
>>> ans = u1.mean() # return 0.5
>>> print(ans)
0.5
>>> print(ans.shape)
()
>>> ans = u1.mean(low_b, high_b) # return (low_b + high_b) / 2
>>> print(ans)
[0.5 1.75]
>>> print(ans.shape)
(2,)
>>> # `high` and `low` must be passed in during function calls.
>>> ans = u2.mean(low_a, high_a)
>>> print(ans)
[1. 2.]
>>> print(ans.shape)
(2,)
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same.
>>> # Args:
>>> # dist (str): the type of the distributions. Should be "Uniform" in this case.
@ -105,15 +105,15 @@ class Uniform(Distribution):
>>> # high_a (Tensor): the upper bound of distribution a. Default: self.high.
>>> # Examples of `kl_loss`. `cross_entropy` is similar.
>>> ans = u1.kl_loss('Uniform', low_b, high_b)
>>> print(ans)
[1.3862944 1.8718022]
>>> print(ans.shape)
(2,)
>>> ans = u1.kl_loss('Uniform', low_b, high_b, low_a, high_a)
>>> print(ans)
[0.6931472 0.48550785]
>>> print(ans.shape)
(2,)
>>> # Additional `high` and `low` must be passed in.
>>> ans = u2.kl_loss('Uniform', low_b, high_b, low_a, high_a)
>>> print(ans)
[0.6931472 0.48550785]
>>> print(ans.shape)
(2,)
>>> # Examples of `sample`.
>>> # Args:
>>> # shape (tuple): the shape of the sample. Default: ()

Loading…
Cancel
Save