diff --git a/mindspore/nn/probability/distribution/beta.py b/mindspore/nn/probability/distribution/beta.py index 6d2d31b6ad..b4b7873f12 100644 --- a/mindspore/nn/probability/distribution/beta.py +++ b/mindspore/nn/probability/distribution/beta.py @@ -43,84 +43,95 @@ class Beta(Distribution): `dtype` must be a float type because Beta distributions are continuous. Examples: - >>> # To initialize a Beta distribution of the concentration1 3.0 and the concentration0 4.0. + >>> import mindspore + >>> import mindspore.nn as nn >>> import mindspore.nn.probability.distribution as msd - >>> b = msd.Beta(3.0, 4.0, dtype=mstype.float32) - >>> - >>> # The following creates two independent Beta distributions. - >>> b = msd.Beta([3.0, 3.0], [4.0, 4.0], dtype=mstype.float32) - >>> + >>> from mindspore import Tensor + >>> # To initialize a Beta distribution of the concentration1 3.0 and the concentration0 4.0. + >>> b1 = msd.Beta([3.0], [4.0], dtype=mindspore.float32) >>> # A Beta distribution can be initilized without arguments. >>> # In this case, `concentration1` and `concentration0` must be passed in through arguments. - >>> b = msd.Beta(dtype=mstype.float32) - >>> - >>> # To use a Beta distribution in a network. - >>> class net(Cell): - ... def __init__(self): - ... super(net, self).__init__(): - ... self.b1 = msd.Beta(1.0, 1.0, dtype=mstype.float32) - ... self.b2 = msd.Beta(dtype=mstype.float32) - ... - ... # The following calls are valid in construct. - ... def construct(self, value, concentration1_b, concentration0_b, concentration1_a, concentration0_a): - ... - ... # Private interfaces of probability functions corresponding to public interfaces, including - ... # `prob` and `log_prob`, have the same arguments as follows. - ... # Args: - ... # value (Tensor): the value to be evaluated. - ... # concentration1 (Tensor): the concentration1 of the distribution. Default: self._concentration1. - ... # concentration0 (Tensor): the concentration0 of the distribution. Default: self._concentration0. - ... - ... # Examples of `prob`. - ... # Similar calls can be made to other probability functions - ... # by replacing 'prob' by the name of the function - ... ans = self.b1.prob(value) - ... # Evaluate with respect to the distribution b. - ... ans = self.b1.prob(value, concentration1_b, concentration0_b) - ... # `concentration1` and `concentration0` must be passed in during function calls - ... ans = self.b2.prob(value, concentration1_a, concentration0_a) - ... - ... - ... # Functions `mean`, `sd`, `mode`, `var`, and `entropy` have the same arguments. - ... # Args: - ... # concentration1 (Tensor): the concentration1 of the distribution. Default: self._concentration1. - ... # concentration0 (Tensor): the concentration0 of the distribution. Default: self._concentration0. - ... - ... # Example of `mean`, `sd`, `mode`, `var`, and `entropy` are similar. - ... ans = self.b1.concentration1() # return 1.0 - ... ans = self.b1.concentration1(concentration1_b, concentration0_b) # return concentration1_b - ... # `concentration1` and `concentration0` must be passed in during function calls. - ... ans = self.b2.concentration1(concentration1_a, concentration0_a) - ... - ... - ... # Interfaces of 'kl_loss' and 'cross_entropy' are the same: - ... # Args: - ... # dist (str): the type of the distributions. Only "Beta" is supported. - ... # concentration1_b (Tensor): the concentration1 of distribution b. - ... # concentration0_b (Tensor): the concentration0 of distribution b. - ... # concentration1_a (Tensor): the concentration1 of distribution a. - ... # Default: self._concentration1. - ... # concentration0_a (Tensor): the concentration0 of distribution a. - ... # Default: self._concentration0. - ... - ... # Examples of `kl_loss`. `cross_entropy` is similar. - ... ans = self.b1.kl_loss('Beta', concentration1_b, concentration0_b) - ... ans = self.b1.kl_loss('Beta', concentration1_b, concentration0_b, - ... concentration1_a, concentration0_a) - ... # Additional `concentration1` and `concentration0` must be passed in. - ... ans = self.b2.kl_loss('Beta', concentration1_b, concentration0_b, - ... concentration1_a, concentration0_a) - ... - ... - ... # Examples of `sample`. - ... # Args: - ... # shape (tuple): the shape of the sample. Default: () - ... # concentration1 (Tensor): the concentration1 of the distribution. Default: self._concentration1. - ... # concentration0 (Tensor): the concentration0 of the distribution. Default: self._concentration0. - ... ans = self.b1.sample() - ... ans = self.b1.sample((2,3)) - ... ans = self.b1.sample((2,3), concentration1_b, concentration0_b) - ... ans = self.b2.sample((2,3), concentration1_a, concentration0_a) + >>> b2 = msd.Beta(dtype=mindspore.float32) + >>> # Here are some tensors used below for testing + >>> value = Tensor([0.1, 0.5, 1.5], dtype=mindspore.float32) + >>> concentration1_a = Tensor([2.0], dtype=mindspore.float32) + >>> concentration0_a = Tensor([2.0, 2.0, 2.0], dtype=mindspore.float32) + >>> concentration1_b = Tensor([1.0], dtype=mindspore.float32) + >>> concentration0_b = Tensor([1.0, 1.5, 2.0], dtype=mindspore.float32) + >>> # Private interfaces of probability functions corresponding to public interfaces, including + >>> # `prob` and `log_prob`, have the same arguments as follows. + >>> # Args: + >>> # value (Tensor): the value to be evaluated. + >>> # concentration1 (Tensor): the concentration1 of the distribution. Default: self._concentration1. + >>> # concentration0 (Tensor): the concentration0 of the distribution. Default: self._concentration0. + >>> # Examples of `prob`. + >>> # Similar calls can be made to other probability functions + >>> # by replacing 'prob' by the name of the function + >>> ans = b1.prob(value) + >>> print(ans) + [0.43740022 1.8750011 nan] + >>> # Evaluate with respect to the distribution b. + >>> ans = b1.prob(value, concentration1_b, concentration0_b) + >>> print(ans) + [0.99999964 1.0606599 nan] + >>> # `concentration1` and `concentration0` must be passed in during function calls + >>> ans = b2.prob(value, concentration1_a, concentration0_a) + >>> print(ans) + [0.5400001 1.5000001 nan] + >>> # Functions `mean`, `sd`, `mode`, `var`, and `entropy` have the same arguments. + >>> # Args: + >>> # concentration1 (Tensor): the concentration1 of the distribution. Default: self._concentration1. + >>> # concentration0 (Tensor): the concentration0 of the distribution. Default: self._concentration0. + >>> # Example of `mean`, `sd`, `mode`, `var`, and `entropy` are similar. + >>> ans = b1.mean() + >>> print(ans) + [0.42857143] + >>> ans = b1.mean(concentration1_b, concentration0_b) + >>> print(ans) + [0.5 0.4 0.33333334] + >>> # `concentration1` and `concentration0` must be passed in during function calls. + >>> ans = b2.mean(concentration1_a, concentration0_a) + >>> print(ans) + [0.5 0.5 0.5] + >>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same: + >>> # Args: + >>> # dist (str): the type of the distributions. Only "Beta" is supported. + >>> # concentration1_b (Tensor): the concentration1 of distribution b. + >>> # concentration0_b (Tensor): the concentration0 of distribution b. + >>> # concentration1_a (Tensor): the concentration1 of distribution a. + >>> # Default: self._concentration1. + >>> # concentration0_a (Tensor): the concentration0 of distribution a. + >>> # Default: self._concentration0. + >>> # Examples of `kl_loss`. `cross_entropy` is similar. + >>> ans = b1.kl_loss('Beta', concentration1_b, concentration0_b) + >>> print(ans) + [0.34434414 0.24721336 0.26786423] + >>> ans = b1.kl_loss('Beta', concentration1_b, concentration0_b, + >>> concentration1_a, concentration0_a) + >>> print(ans) + [0.12509346 0.13629508 0.26527953] + >>> # Additional `concentration1` and `concentration0` must be passed in. + >>> ans = b2.kl_loss('Beta', concentration1_b, concentration0_b, + >>> concentration1_a, concentration0_a) + >>> print(ans) + [0.12509346 0.13629508 0.26527953] + >>> # Examples of `sample`. + >>> # Args: + >>> # shape (tuple): the shape of the sample. Default: () + >>> # concentration1 (Tensor): the concentration1 of the distribution. Default: self._concentration1. + >>> # concentration0 (Tensor): the concentration0 of the distribution. Default: self._concentration0. + >>> ans = b1.sample() + >>> print(ans.shape) + (1,) + >>> ans = b1.sample((2,3)) + >>> print(ans.shape) + (2, 3, 1) + >>> ans = b1.sample((2,3), concentration1_b, concentration0_b) + >>> print(ans.shape) + (2, 3, 3) + >>> ans = b2.sample((2,3), concentration1_a, concentration0_a) + >>> print(ans.shape) + (2, 3, 3) """ def __init__(self, diff --git a/mindspore/nn/probability/distribution/categorical.py b/mindspore/nn/probability/distribution/categorical.py index 2af21d9f49..6b986ce883 100644 --- a/mindspore/nn/probability/distribution/categorical.py +++ b/mindspore/nn/probability/distribution/categorical.py @@ -154,6 +154,7 @@ class Categorical(Distribution): self.expand_dim = P.ExpandDims() self.fill = P.Fill() self.gather = P.GatherNd() + self.greater = P.Greater() self.issubclass = P.IsSubClass() self.less = P.Less() self.log = log_generic @@ -277,16 +278,21 @@ class Categorical(Distribution): probs (Tensor): Event probabilities. Default: self.probs. """ value = self._check_value(value, 'value') - # cast value to int to find the right integer to compute index - if self.issubclass(self.dtype, mstype.float_): - value = self.cast(value, self.index_type) - else: - value = self.cast(value, self.dtype) - # cast int to float for the broadcasting below - value = self.cast(value, mstype.float32) + probs = self._check_param_type(probs) logits = self.log(probs) + # find the right integer to compute index + # here we simulate casting to int but still keeping float dtype + value = self.cast(value, self.dtypeop(probs)) + + zeros = self.fill(self.dtypeop(value), self.shape(value), 0.0) + between_zero_neone = self.logicand(self.less(value, 0,), + self.greater(value, -1.)) + value = self.select(between_zero_neone, + zeros, + P.Floor()(value)) + # handle the case when value is of shape () and probs is a scalar batch drop_dim = False if self.shape(value) == () and self.shape(probs)[:-1] == (): @@ -314,8 +320,6 @@ class Categorical(Distribution): out_of_bound = self.squeeze_last_axis(self.logicor(\ self.less(value, 0.0), self.less(num_classes-1, value))) # deal with the case the there is only one class. - zeros = self.fill(mstype.float32, self.shape(out_of_bound), 0.0) - out_of_bound = self.logicand(out_of_bound, self.less(zeros, num_classes-1)) value_clipped = self.clip_by_value(value, 0.0, num_classes - 1) value_clipped = self.cast(value_clipped, self.index_type) # create index from 0 ... NumOfLabels @@ -341,12 +345,19 @@ class Categorical(Distribution): probs (Tensor): Event probabilities. Default: self.probs. """ value = self._check_value(value, 'value') - if self.issubclass(self.dtype, mstype.float_): - value = self.cast(value, self.index_type) - else: - value = self.cast(value, self.dtype) probs = self._check_param_type(probs) + # find the right integer to compute index + # here we simulate casting to int but still keeping float dtype + value = self.cast(value, self.dtypeop(probs)) + + zeros = self.fill(self.dtypeop(value), self.shape(value), 0.0) + between_zero_neone = self.logicand(self.less(value, 0,), + self.greater(value, -1.)) + value = self.select(between_zero_neone, + zeros, + P.Floor()(value)) + # handle the case when value is of shape () and probs is a scalar batch drop_dim = False if self.shape(value) == () and self.shape(probs)[:-1] == (): diff --git a/mindspore/nn/probability/distribution/exponential.py b/mindspore/nn/probability/distribution/exponential.py index b92f030ba5..3582f96160 100644 --- a/mindspore/nn/probability/distribution/exponential.py +++ b/mindspore/nn/probability/distribution/exponential.py @@ -40,12 +40,10 @@ class Exponential(Distribution): Examples: >>> import mindspore - >>> import mindspore.context as context >>> import mindspore.nn as nn >>> import mindspore.nn.probability.distribution as msd >>> from mindspore import Tensor - >>> context.set_context(mode=context.GRAPH_MODE, device_target="GPU") - >>> # To initialize a Bernoulli distribution of the probability 0.5. + >>> # To initialize a Exponential distribution of the probability 0.5. >>> e1 = msd.Exponential(0.5, dtype=mindspore.float32) >>> # An Exponential distribution can be initialized without arguments. >>> # In this case, `rate` must be passed in through `args` during function calls. diff --git a/mindspore/nn/probability/distribution/gamma.py b/mindspore/nn/probability/distribution/gamma.py index 93c3fea834..056fe02bcc 100644 --- a/mindspore/nn/probability/distribution/gamma.py +++ b/mindspore/nn/probability/distribution/gamma.py @@ -43,80 +43,86 @@ class Gamma(Distribution): `dtype` must be a float type because Gamma distributions are continuous. Examples: - >>> # To initialize a Gamma distribution of the concentration 3.0 and the rate 4.0. + >>> import mindspore + >>> import mindspore.nn as nn >>> import mindspore.nn.probability.distribution as msd - >>> g = msd.Gamma(3.0, 4.0, dtype=mstype.float32) - >>> - >>> # The following creates two independent Gamma distributions. - >>> g = msd.Gamma([3.0, 3.0], [4.0, 4.0], dtype=mstype.float32) - >>> + >>> from mindspore import Tensor + >>> # To initialize a Gamma distribution of the concentration 3.0 and the rate 4.0. + >>> g1 = msd.Gamma([3.0], [4.0], dtype=mindspore.float32) >>> # A Gamma distribution can be initilized without arguments. >>> # In this case, `concentration` and `rate` must be passed in through arguments. - >>> g = msd.Gamma(dtype=mstype.float32) + >>> g2 = msd.Gamma(dtype=mindspore.float32) + >>> # Here are some tensors used below for testing + >>> value = Tensor([1.0, 2.0, 3.0], dtype=mindspore.float32) + >>> concentration_a = Tensor([2.0], dtype=mindspore.float32) + >>> rate_a = Tensor([2.0, 2.0, 2.0], dtype=mindspore.float32) + >>> concentration_b = Tensor([1.0], dtype=mindspore.float32) + >>> rate_b = Tensor([1.0, 1.5, 2.0], dtype=mindspore.float32) >>> - >>> # To use a Gamma distribution in a network. - >>> class net(Cell): - ... def __init__(self): - ... super(net, self).__init__(): - ... self.g1 = msd.Gamma(1.0, 1.0, dtype=mstype.float32) - ... self.g2 = msd.Gamma(dtype=mstype.float32) - ... - ... # The following calls are valid in construct. - ... def construct(self, value, concentration_b, rate_b, concentration_a, rate_a): - ... - ... # Private interfaces of probability functions corresponding to public interfaces, including - ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows. - ... # Args: - ... # value (Tensor): the value to be evaluated. - ... # concentration (Tensor): the concentration of the distribution. Default: self._concentration. - ... # rate (Tensor): the rate of the distribution. Default: self._rate. - ... - ... # Examples of `prob`. - ... # Similar calls can be made to other probability functions - ... # by replacing 'prob' by the name of the function - ... ans = self.g1.prob(value) - ... # Evaluate with respect to the distribution b. - ... ans = self.g1.prob(value, concentration_b, rate_b) - ... # `concentration` and `rate` must be passed in during function calls - ... ans = self.g2.prob(value, concentration_a, rate_a) - ... - ... - ... # Functions `mean`, `sd`, `mode`, `var`, and `entropy` have the same arguments. - ... # Args: - ... # concentration (Tensor): the concentration of the distribution. Default: self._concentration. - ... # rate (Tensor): the rate of the distribution. Default: self._rate. - ... - ... # Example of `mean`, `sd`, `mode`, `var`, and `entropy` are similar. - ... ans = self.g1.concentration() # return 1.0 - ... ans = self.g1.concentration(concentration_b, rate_b) # return concentration_b - ... # `concentration` and `rate` must be passed in during function calls. - ... ans = self.g2.concentration(concentration_a, rate_a) - ... - ... - ... # Interfaces of 'kl_loss' and 'cross_entropy' are the same: - ... # Args: - ... # dist (str): the type of the distributions. Only "Gamma" is supported. - ... # concentration_b (Tensor): the concentration of distribution b. - ... # rate_b (Tensor): the rate of distribution b. - ... # concentration_a (Tensor): the concentration of distribution a. Default: self._concentration. - ... # rate_a (Tensor): the rate of distribution a. Default: self._rate. - ... - ... # Examples of `kl_loss`. `cross_entropy` is similar. - ... ans = self.g1.kl_loss('Gamma', concentration_b, rate_b) - ... ans = self.g1.kl_loss('Gamma', concentration_b, rate_b, concentration_a, rate_a) - ... # Additional `concentration` and `rate` must be passed in. - ... ans = self.g2.kl_loss('Gamma', concentration_b, rate_b, concentration_a, rate_a) - ... - ... - ... # Examples of `sample`. - ... # Args: - ... # shape (tuple): the shape of the sample. Default: () - ... # concentration (Tensor): the concentration of the distribution. Default: self._concentration. - ... # rate (Tensor): the rate of the distribution. Default: self._rate. - ... ans = self.g1.sample() - ... ans = self.g1.sample((2,3)) - ... ans = self.g1.sample((2,3), concentration_b, rate_b) - ... ans = self.g2.sample((2,3), concentration_a, rate_a) + >>> # Private interfaces of probability functions corresponding to public interfaces, including + >>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows. + >>> # Args: + >>> # value (Tensor): the value to be evaluated. + >>> # concentration (Tensor): the concentration of the distribution. Default: self._concentration. + >>> # rate (Tensor): the rate of the distribution. Default: self._rate. + >>> # Examples of `prob`. + >>> # Similar calls can be made to other probability functions + >>> # by replacing 'prob' by the name of the function + >>> # ans = g1.prob(value) + >>> # # Evaluate with respect to the distribution b. + >>> # ans = g1.prob(value, concentration_b, rate_b) + >>> # # `concentration` and `rate` must be passed in during function calls + >>> # ans = g2.prob(value, concentration_a, rate_a) + >>> # Functions `mean`, `sd`, `mode`, `var`, and `entropy` have the same arguments. + >>> # Args: + >>> # concentration (Tensor): the concentration of the distribution. Default: self._concentration. + >>> # rate (Tensor): the rate of the distribution. Default: self._rate. + >>> # Example of `mean`, `sd`, `mode`, `var`, and `entropy` are similar. + >>> ans = g1.mean() + >>> print(ans) + [0.75] + >>> ans = g1.mean(concentration_b, rate_b) + >>> print(ans) + [1. 0.6666667 0.5 ] + >>> # `concentration` and `rate` must be passed in during function calls. + >>> ans = g2.mean(concentration_a, rate_a) + >>> print(ans) + [1. 1. 1.] + >>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same: + >>> # Args: + >>> # dist (str): the type of the distributions. Only "Gamma" is supported. + >>> # concentration_b (Tensor): the concentration of distribution b. + >>> # rate_b (Tensor): the rate of distribution b. + >>> # concentration_a (Tensor): the concentration of distribution a. Default: self._concentration. + >>> # rate_a (Tensor): the rate of distribution a. Default: self._rate. + >>> # Examples of `kl_loss`. `cross_entropy` is similar. + >>> ans = g1.kl_loss('Gamma', concentration_b, rate_b) + >>> print(ans) + [0.28871584 0.2582507 0.34556866] + >>> ans = g1.kl_loss('Gamma', concentration_b, rate_b, concentration_a, rate_a) + >>> print(ans) + [0.11593175 0.21046662 0.42278457] + >>> # Additional `concentration` and `rate` must be passed in. + >>> ans = g2.kl_loss('Gamma', concentration_b, rate_b, concentration_a, rate_a) + >>> print(ans) + [0.11593175 0.21046662 0.42278457] + >>> # Examples of `sample`. + >>> # Args: + >>> # shape (tuple): the shape of the sample. Default: () + >>> # concentration (Tensor): the concentration of the distribution. Default: self._concentration. + >>> # rate (Tensor): the rate of the distribution. Default: self._rate. + >>> ans = g1.sample() + >>> print(ans.shape) + (1,) + >>> ans = g1.sample((2,3)) + >>> print(ans.shape) + (2, 3, 1) + >>> ans = g1.sample((2,3), concentration_b, rate_b) + >>> print(ans.shape) + (2, 3, 3) + >>> ans = g2.sample((2,3), concentration_a, rate_a) + >>> print(ans.shape) + (2, 3, 3) """ def __init__(self, diff --git a/mindspore/nn/probability/distribution/geometric.py b/mindspore/nn/probability/distribution/geometric.py index b86f3f4db2..ffc8ce5a9f 100644 --- a/mindspore/nn/probability/distribution/geometric.py +++ b/mindspore/nn/probability/distribution/geometric.py @@ -44,9 +44,9 @@ class Geometric(Distribution): >>> import mindspore.nn as nn >>> import mindspore.nn.probability.distribution as msd >>> from mindspore import Tensor - >>> # To initialize a Bernoulli distribution of the probability 0.5. + >>> # To initialize a Geometric distribution of the probability 0.5. >>> g1 = msd.Geometric(0.5, dtype=mindspore.int32) - >>> # A Bernoulli distribution can be initialized without arguments. + >>> # A Geometric distribution can be initialized without arguments. >>> # In this case, `probs` must be passed in through arguments during function calls. >>> g2 = msd.Geometric(dtype=mindspore.int32) >>> diff --git a/mindspore/nn/probability/distribution/gumbel.py b/mindspore/nn/probability/distribution/gumbel.py index f6e8257e91..2cb134ad89 100644 --- a/mindspore/nn/probability/distribution/gumbel.py +++ b/mindspore/nn/probability/distribution/gumbel.py @@ -47,7 +47,7 @@ class Gumbel(TransformedDistribution): >>> import mindspore.nn as nn >>> import mindspore.nn.probability.distribution as msd >>> from mindspore import Tensor - >>> context.set_context(mode=1, device_target="GPU") + >>> context.set_context(mode=1) >>> # To initialize a Gumbel distribution of `loc` 3.0 and `scale` 4.0. >>> gumbel = msd.Gumbel(3.0, 4.0, dtype=mindspore.float32) >>> # Private interfaces of probability functions corresponding to public interfaces, including @@ -236,8 +236,8 @@ class Gumbel(TransformedDistribution): scale_b = self._check_value(scale_b, 'scale_b') loc_b = self.cast(loc_b, self.parameter_type) scale_b = self.cast(scale_b, self.parameter_type) - return self.log(scale_b) - self.log(self.scale) +\ - np.euler_gamma * (self.scale / scale_b - 1.) +\ + return self.log(scale_b / self.scale) +\ + np.euler_gamma * (self.scale / scale_b - 1.) + (self.loc - loc_b) / scale_b +\ self.expm1((loc_b - self.loc) / scale_b + self.lgamma(self.scale / scale_b + 1.)) def _sample(self, shape=()): diff --git a/mindspore/nn/probability/distribution/logistic.py b/mindspore/nn/probability/distribution/logistic.py index d5ad182b2e..e3983b3648 100644 --- a/mindspore/nn/probability/distribution/logistic.py +++ b/mindspore/nn/probability/distribution/logistic.py @@ -134,6 +134,7 @@ class Logistic(Distribution): # ops needed for the class self.cast = P.Cast() self.const = P.ScalarToArray() + self.consttensor = P.ScalarToTensor() self.dtypeop = P.DType() self.exp = exp_generic self.expm1 = P.Expm1() @@ -154,6 +155,7 @@ class Logistic(Distribution): self.threshold = np.log(np.finfo(np.float32).eps) + 1. self.tiny = np.finfo(np.float).tiny + self.sd_const = np.pi/np.sqrt(3) def _softplus(self, x): too_small = self.less(x, self.threshold) @@ -219,8 +221,8 @@ class Logistic(Distribution): """ The standard deviation of the distribution. """ - loc, scale = self._check_param_type(loc, scale) - return scale * self.const(np.pi) / self.sqrt(self.const(3.0)) + _, scale = self._check_param_type(loc, scale) + return scale * self.consttensor(self.sd_const, self.dtypeop(scale)) def _entropy(self, loc=None, scale=None): r""" diff --git a/mindspore/nn/probability/distribution/poisson.py b/mindspore/nn/probability/distribution/poisson.py index d726f197e0..592bd78c26 100644 --- a/mindspore/nn/probability/distribution/poisson.py +++ b/mindspore/nn/probability/distribution/poisson.py @@ -39,62 +39,70 @@ class Poisson(Distribution): `dist_spec_args` is `rate`. Examples: - >>> # To initialize an Poisson distribution of the rate 0.5. + >>> import mindspore + >>> import mindspore.nn as nn >>> import mindspore.nn.probability.distribution as msd - >>> p = msd.Poisson(0.5, dtype=mstype.float32) - >>> - >>> # The following creates two independent Poisson distributions. - >>> p = msd.Poisson([0.5, 0.5], dtype=mstype.float32) - >>> + >>> from mindspore import Tensor + >>> # To initialize an Poisson distribution of the rate 0.5. + >>> p1 = msd.Poisson(0.5, dtype=mindspore.float32) >>> # An Poisson distribution can be initilized without arguments. >>> # In this case, `rate` must be passed in through `args` during function calls. - >>> p = msd.Poisson(dtype=mstype.float32) + >>> p2 = msd.Poisson(dtype=mindspore.float32) + >>> + >>> # Here are some tensors used below for testing + >>> value = Tensor([1, 2, 3], dtype=mindspore.int32) + >>> rate_a = Tensor([0.6], dtype=mindspore.float32) + >>> rate_b = Tensor([0.2, 0.5, 0.4], dtype=mindspore.float32) >>> - >>> # To use an Poisson distribution in a network. - >>> class net(Cell): - ... def __init__(self): - ... super(net, self).__init__(): - ... self.p1 = msd.Poisson(0.5, dtype=mstype.float32) - ... self.p2 = msd.Poisson(dtype=mstype.float32) - ... - ... # All the following calls in construct are valid. - ... def construct(self, value, rate_b, rate_a): - ... - ... # Private interfaces of probability functions corresponding to public interfaces, including - ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows. - ... # Args: - ... # value (Tensor): the value to be evaluated. - ... # rate (Tensor): the rate of the distribution. Default: self.rate. - ... - ... # Examples of `prob`. - ... # Similar calls can be made to other probability functions - ... # by replacing `prob` by the name of the function. - ... ans = self.p1.prob(value) - ... # Evaluate with respect to distribution b. - ... ans = self.p1.prob(value, rate_b) - ... # `rate` must be passed in during function calls. - ... ans = self.p2.prob(value, rate_a) - ... - ... - ... # Functions `mean`, `mode`, `sd`, and 'var' have the same arguments as follows. - ... # Args: - ... # rate (Tensor): the rate of the distribution. Default: self.rate. - ... - ... # Examples of `mean`, `sd`, `mode`, `var`, and `entropy` are similar. - ... ans = self.p1.mean() # return 2 - ... ans = self.p1.mean(rate_b) # return 1 / rate_b - ... # `rate` must be passed in during function calls. - ... ans = self.p2.mean(rate_a) - ... - ... - ... # Examples of `sample`. - ... # Args: - ... # shape (tuple): the shape of the sample. Default: () - ... # probs1 (Tensor): the rate of the distribution. Default: self.rate. - ... ans = self.p1.sample() - ... ans = self.p1.sample((2,3)) - ... ans = self.p1.sample((2,3), rate_b) - ... ans = self.p2.sample((2,3), rate_a) + >>> # Private interfaces of probability functions corresponding to public interfaces, including + >>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows. + >>> # Args: + >>> # value (Tensor): the value to be evaluated. + >>> # rate (Tensor): the rate of the distribution. Default: self.rate. + >>> # Examples of `prob`. + >>> # Similar calls can be made to other probability functions + >>> # by replacing `prob` by the name of the function. + >>> ans = p1.prob(value) + >>> print(ans) + [0.3032652 0.0758163 0.01263604] + >>> # Evaluate with respect to distribution b. + >>> ans = p1.prob(value, rate_b) + >>> print(ans) + [0.16374607 0.0758163 0.00715008] + >>> # `rate` must be passed in during function calls. + >>> ans = p2.prob(value, rate_a) + >>> print(ans) + [0.32928684 0.09878606 0.01975721] + >>> # Functions `mean`, `mode`, `sd`, and 'var' have the same arguments as follows. + >>> # Args: + >>> # rate (Tensor): the rate of the distribution. Default: self.rate. + >>> # Examples of `mean`, `sd`, `mode`, `var`, and `entropy` are similar. + >>> ans = p1.mean() # return 2 + >>> print(ans) + 0.5 + >>> ans = p1.mean(rate_b) # return 1 / rate_b + >>> print(ans) + [0.2 0.5 0.4] + >>> # `rate` must be passed in during function calls. + >>> ans = p2.mean(rate_a) + >>> print(ans) + [0.6] + >>> # Examples of `sample`. + >>> # Args: + >>> # shape (tuple): the shape of the sample. Default: () + >>> # probs1 (Tensor): the rate of the distribution. Default: self.rate. + >>> ans = p1.sample() + >>> print(ans.shape) + () + >>> ans = p1.sample((2,3)) + >>> print(ans.shape) + (2, 3) + >>> ans = p1.sample((2,3), rate_b) + >>> print(ans.shape) + (2, 3, 3) + >>> ans = p2.sample((2,3), rate_a) + >>> print(ans.shape) + (2, 3, 1) """ def __init__(self, diff --git a/mindspore/nn/probability/distribution/uniform.py b/mindspore/nn/probability/distribution/uniform.py index 192110a708..c3d036eb04 100644 --- a/mindspore/nn/probability/distribution/uniform.py +++ b/mindspore/nn/probability/distribution/uniform.py @@ -44,7 +44,7 @@ class Uniform(Distribution): >>> import mindspore.nn as nn >>> import mindspore.nn.probability.distribution as msd >>> from mindspore import Tensor - >>> context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + >>> context.set_context(mode=context.GRAPH_MODE) >>> # To initialize a Uniform distribution of the lower bound 0.0 and the higher bound 1.0. >>> u1 = msd.Uniform(0.0, 1.0, dtype=mindspore.float32) >>> # A Uniform distribution can be initialized without arguments.