edited docs in distribution and bijector

pull/9247/head
Xun Deng 4 years ago
parent af3c27d354
commit a411334191

@ -35,6 +35,9 @@ class Bijector(Cell):
dtype (mindspore.dtype): The type of the distributions that the Bijector can operate on. Default: None.
param (dict): The parameters used to initialize the Bijector. Default: None.
Supported Platforms:
``Ascend`` ``GPU``
Note:
`dtype` of bijector represents the type of the distributions that the bijector could operate on.
When `dtype` is None, there is no enforcement on the type of input value except that the input value

@ -27,6 +27,9 @@ class Exp(PowerTransform):
Args:
name (str): The name of the Bijector. Default: 'Exp'.
Supported Platforms:
``Ascend`` ``GPU``
Examples:
>>> import mindspore
>>> import mindspore.nn as nn

@ -32,6 +32,9 @@ class GumbelCDF(Bijector):
scale (float, list, numpy.ndarray, Tensor): The scale. Default: 1.0.
name (str): The name of the Bijector. Default: 'Gumbel_CDF'.
Supported Platforms:
``Ascend`` ``GPU``
Note:
For `inverse` and `inverse_log_jacobian`, input should be in range of (0, 1).
The dtype of `loc` and `scale` must be float.

@ -25,6 +25,9 @@ class Invert(Bijector):
bijector (Bijector): Base Bijector.
name (str): The name of the Bijector. Default: Invert.
Supported Platforms:
``Ascend`` ``GPU``
Examples:
>>> import mindspore
>>> import mindspore.nn as nn

@ -36,6 +36,9 @@ class PowerTransform(Bijector):
power (float, list, numpy.ndarray, Tensor): The scale factor. Default: 0.
name (str): The name of the bijector. Default: 'PowerTransform'.
Supported Platforms:
``Ascend`` ``GPU``
Note:
The dtype of `power` must be float.

@ -32,6 +32,9 @@ class ScalarAffine(Bijector):
shift (float, list, numpy.ndarray, Tensor): The shift factor. Default: 0.0.
name (str): The name of the bijector. Default: 'ScalarAffine'.
Supported Platforms:
``Ascend`` ``GPU``
Note:
The dtype of `shift` and `scale` must be float.
If `shift`, `scale` are passed in as numpy.ndarray or tensor, they have to have

@ -33,6 +33,9 @@ class Softplus(Bijector):
sharpness (float, list, numpy.ndarray, Tensor): The scale factor. Default: 1.0.
name (str): The name of the Bijector. Default: 'Softplus'.
Supported Platforms:
``Ascend`` ``GPU``
Note:
The dtype of `sharpness` must be float.

@ -32,6 +32,9 @@ class Bernoulli(Distribution):
dtype (mindspore.dtype): The type of the event samples. Default: mstype.int32.
name (str): The name of the distribution. Default: 'Bernoulli'.
Supported Platforms:
``Ascend`` ``GPU``
Note:
`probs` must be a proper probability (0 < p < 1).
`dist_spec_args` is `probs`.

@ -37,6 +37,9 @@ class Beta(Distribution):
dtype (mindspore.dtype): The type of the event samples. Default: mstype.float32.
name (str): The name of the distribution. Default: 'Beta'.
Supported Platforms:
``Ascend`` ``GPU``
Note:
`concentration1` and `concentration0` must be greater than zero.
`dist_spec_args` are `concentration1` and `concentration0`.

@ -36,6 +36,9 @@ class Categorical(Distribution):
dtype (mindspore.dtype): The type of the event samples. Default: mstype.int32.
name (str): The name of the distribution. Default: Categorical.
Supported Platforms:
``Ascend`` ``GPU``
Note:
`probs` must have rank at least 1, values are proper probabilities and sum to 1.

@ -34,6 +34,9 @@ class Cauchy(Distribution):
dtype (mindspore.dtype): The type of the event samples. Default: mstype.float32.
name (str): The name of the distribution. Default: 'Cauchy'.
Supported Platforms:
``Ascend``
Note:
`scale` must be greater than zero.
`dist_spec_args` are `loc` and `scale`.

@ -33,6 +33,9 @@ class Distribution(Cell):
name (str): The name of the distribution.
param (dict): The parameters used to initialize the distribution.
Supported Platforms:
``Ascend`` ``GPU``
Note:
Derived class must override operations such as `_mean`, `_prob`,
and `_log_prob`. Required arguments, such as `value` for `_prob`,
@ -711,7 +714,8 @@ class Distribution(Cell):
Note:
Names of supported functions include:
'prob', 'log_prob', 'cdf', 'log_cdf', 'survival_function', 'log_survival',
'var', 'sd', 'mode', 'mean', 'entropy', 'kl_loss', 'cross_entropy', and 'sample'.
'var', 'sd', 'mode', 'mean', 'entropy', 'kl_loss', 'cross_entropy', 'sample',
'get_dist_args', and 'get_dist_type'.
Args:
name (str): The name of the function.

@ -33,6 +33,9 @@ class Exponential(Distribution):
dtype (mindspore.dtype): The type of the event samples. Default: mstype.float32.
name (str): The name of the distribution. Default: 'Exponential'.
Supported Platforms:
``Ascend`` ``GPU``
Note:
`rate` must be strictly greater than 0.
`dist_spec_args` is `rate`.

@ -37,6 +37,9 @@ class Gamma(Distribution):
dtype (mindspore.dtype): The type of the event samples. Default: mstype.float32.
name (str): The name of the distribution. Default: 'Gamma'.
Supported Platforms:
``Ascend`` ``GPU``
Note:
`concentration` and `rate` must be greater than zero.
`dist_spec_args` are `concentration` and `rate`.
@ -69,10 +72,16 @@ class Gamma(Distribution):
>>> # Similar calls can be made to other probability functions
>>> # by replacing 'prob' by the name of the function
>>> # ans = g1.prob(value)
>>> # # Evaluate with respect to the distribution b.
>>> # ans = g1.prob(value, concentration_b, rate_b)
>>> # # `concentration` and `rate` must be passed in during function calls
>>> # ans = g2.prob(value, concentration_a, rate_a)
>>> print(ans)
[0.58610016 0.0429392 0.00176953]
>>> # Evaluate with respect to the distribution b.
>>> ans = g1.prob(value, concentration_b, rate_b)
>>> print(ans)
[0.3678793 0.07468057 0.0049575 ]
>>> # `concentration` and `rate` must be passed in during function calls for g2.
>>> ans = g2.prob(value, concentration_a, rate_a)
>>> print(ans)
[0.54134095 0.14652506 0.02974501]
>>> # Functions `mean`, `sd`, `mode`, `var`, and `entropy` have the same arguments.
>>> # Args:
>>> # concentration (Tensor): the concentration of the distribution. Default: self._concentration.

@ -35,6 +35,9 @@ class Geometric(Distribution):
dtype (mindspore.dtype): The type of the event samples. Default: mstype.int32.
name (str): The name of the distribution. Default: 'Geometric'.
Supported Platforms:
``Ascend`` ``GPU``
Note:
`probs` must be a proper probability (0 < p < 1).
`dist_spec_args` is `probs`.

@ -35,6 +35,9 @@ class Gumbel(TransformedDistribution):
dtype (mindspore.dtype): type of the distribution. Default: mstype.float32.
name (str): the name of the distribution. Default: 'Gumbel'.
Supported Platforms:
``Ascend`` ``GPU``
Note:
`scale` must be greater than zero.
`dist_spec_args` are `loc` and `scale`.

@ -35,6 +35,9 @@ class LogNormal(msd.TransformedDistribution):
dtype (mindspore.dtype): type of the distribution. Default: mstype.float32.
name (str): the name of the distribution. Default: 'LogNormal'.
Supported Platforms:
``Ascend`` ``GPU``
Note:
`scale` must be greater than zero.
`dist_spec_args` are `loc` and `scale`.

@ -34,6 +34,9 @@ class Logistic(Distribution):
dtype (mindspore.dtype): The type of the event samples. Default: mstype.float32.
name (str): The name of the distribution. Default: 'Logistic'.
Supported Platforms:
``Ascend`` ``GPU``
Note:
`scale` must be greater than zero.
`dist_spec_args` are `loc` and `scale`.

@ -34,6 +34,9 @@ class Normal(Distribution):
dtype (mindspore.dtype): The type of the event samples. Default: mstype.float32.
name (str): The name of the distribution. Default: 'Normal'.
Supported Platforms:
``Ascend`` ``GPU``
Note:
`sd` must be greater than zero.
`dist_spec_args` are `mean` and `sd`.

@ -34,6 +34,9 @@ class Poisson(Distribution):
dtype (mindspore.dtype): The type of the event samples. Default: mstype.float32.
name (str): The name of the distribution. Default: 'Poisson'.
Supported Platforms:
``Ascend`` ``GPU``
Note:
`rate` must be strictly greater than 0.
`dist_spec_args` is `rate`.

@ -37,6 +37,9 @@ class TransformedDistribution(Distribution):
will use this seed; elsewise, the underlying distribution's seed will be used.
name (str): The name of the transformed distribution. Default: 'transformed_distribution'.
Supported Platforms:
``Ascend`` ``GPU``
Note:
The arguments used to initialize the original distribution cannot be None.
For example, mynormal = nn.Normal(dtype=dtyple.float32) cannot be used to initialized a

@ -13,6 +13,7 @@
# limitations under the License.
# ============================================================================
"""Uniform Distribution"""
import numpy as np
from mindspore.ops import operations as P
from mindspore.ops import composite as C
from mindspore._checkparam import Validator
@ -33,6 +34,9 @@ class Uniform(Distribution):
dtype (mindspore.dtype): The type of the event samples. Default: mstype.float32.
name (str): The name of the distribution. Default: 'Uniform'.
Supported Platforms:
``Ascend`` ``GPU``
Note:
`low` must be stricly less than `high`.
`dist_spec_args` are `high` and `low`.
@ -296,7 +300,8 @@ class Uniform(Distribution):
kl = self.log(high_b - low_b) - self.log(high_a - low_a)
comp = self.logicaland(self.lessequal(
low_b, low_a), self.lessequal(high_a, high_b))
return self.select(comp, kl, self.log(self.zeroslike(kl)))
inf = self.fill(self.dtypeop(kl), self.shape(kl), np.inf)
return self.select(comp, kl, inf)
def _cdf(self, value, low=None, high=None):
r"""

Loading…
Cancel
Save