From 8107cb828341e9ad416b3f38a22565d24387151c Mon Sep 17 00:00:00 2001 From: hedongdong Date: Thu, 28 Jan 2021 19:53:05 +0800 Subject: [PATCH] [Docs] update formulas for math and array operators --- mindspore/ops/composite/clip_ops.py | 11 ++- mindspore/ops/operations/array_ops.py | 136 +++++++++++++++++++++----- mindspore/ops/operations/math_ops.py | 66 +++++++++---- 3 files changed, 168 insertions(+), 45 deletions(-) diff --git a/mindspore/ops/composite/clip_ops.py b/mindspore/ops/composite/clip_ops.py index 14fa798456..fa8221b62e 100644 --- a/mindspore/ops/composite/clip_ops.py +++ b/mindspore/ops/composite/clip_ops.py @@ -32,12 +32,21 @@ def _check_shape(input_shape, out_shape): def clip_by_value(x, clip_value_min, clip_value_max): - """ + r""" Clips tensor values to a specified min and max. Limits the value of :math:`x` to a range, whose lower limit is 'clip_value_min' and upper limit is 'clip_value_max'. + .. math:: + + out_i= \left\{ + \begin{array}{align} + clip\_value_{max} & \text{ if } x_i\ge clip\_value_{max} \\ + x_i & \text{ if } clip\_value_{min} \lt x_i \lt clip\_value_{max} \\ + clip\_value_{min} & \text{ if } x_i \le clip\_value_{min} \\ + \end{array}\right. + Note: 'clip_value_min' needs to be less than or equal to 'clip_value_max'. diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index 66e98c5390..b513720e7d 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -2096,14 +2096,13 @@ class Concat(PrimitiveWithInfer): Connect input tensors along with the given axis. - Note: - The input data is a tuple of tensors. These tensors have the same rank `R`. Set the given axis as `m`, and - :math:`0 \le m < R`. Set the number of input tensors as `N`. For the :math:`i`-th tensor :math:`t_i`, it has - the shape of :math:`(x_1, x_2, ..., x_{mi}, ..., x_R)`. :math:`x_{mi}` is the :math:`m`-th dimension of the - :math:`i`-th tensor. Then, the shape of the output tensor is + The input data is a tuple of tensors. These tensors have the same rank `R`. Set the given axis as `m`, and + :math:`0 \le m < R`. Set the number of input tensors as `N`. For the :math:`i`-th tensor :math:`t_i`, it has + the shape of :math:`(x_1, x_2, ..., x_{mi}, ..., x_R)`. :math:`x_{mi}` is the :math:`m`-th dimension of the + :math:`i`-th tensor. Then, the shape of the output tensor is - .. math:: - (x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R) + .. math:: + (x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R) Args: axis (int): The specified axis. Default: 0. @@ -2980,14 +2979,24 @@ class Eye(PrimitiveWithInfer): class ScatterNd(PrimitiveWithInfer): - """ + r""" Scatters a tensor into a new tensor depending on the specified indices. - Creates an empty tensor, and set values by scattering the update tensor depending on indices. + Creates an empty tensor with the given `shape`, and set values by scattering the update tensor depending on indices. + + The empty tensor has rank P and `indices` has rank Q where `Q >= 2`. + + `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)` where `N <= P`. + + The last dimension of `indices` (with length `N` ) indicates slices along the `N` th dimension of the empty tensor. + + `updates` is a tensor of rank `Q-1+P-N`. Its shape is: :math:`(i_0, i_1, ..., i_{Q-2}, shape_N, ..., shape_{P-1})`. Inputs: - **indices** (Tensor) - The index of scattering in the new tensor with int32 data type. - - **update** (Tensor) - The source Tensor to be scattered. + The rank of indices must be at least 2 and `indices_shape[-1] <= len(shape)`. + - **updates** (Tensor) - The source Tensor to be scattered. + It has shape `indices_shape[:-1] + shape[indices_shape[-1]:]`. - **shape** (tuple[int]) - Define the shape of the output tensor, has the same type as indices. Outputs: @@ -2999,9 +3008,9 @@ class ScatterNd(PrimitiveWithInfer): Examples: >>> op = ops.ScatterNd() >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32) - >>> update = Tensor(np.array([3.2, 1.1]), mindspore.float32) + >>> updates = Tensor(np.array([3.2, 1.1]), mindspore.float32) >>> shape = (3, 3) - >>> output = op(indices, update, shape) + >>> output = op(indices, updates, shape) >>> print(output) [[0. 3.2 0. ] [0. 1.1 0. ] @@ -3167,11 +3176,17 @@ class TensorScatterUpdate(PrimitiveWithInfer): class ScatterUpdate(_ScatterOp_Dynamic): - """ + r""" Updates tensor values by using input indices and value. Using given values to update tensor value, along with the input indices. + .. math:: + \begin{array}{l} + \text {for each i, ..., j in indices.shape:} \\ + input\_x[indices[i, ..., j], :] = updates[i, ..., j, :] + \end{array} + Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent. If they have different data types, lower priority data type will be converted to relatively highest priority data type. @@ -3214,11 +3229,20 @@ class ScatterUpdate(_ScatterOp_Dynamic): class ScatterNdUpdate(_ScatterNdOp): - """ + r""" Updates tensor values by using input indices and value. Using given values to update tensor value, along with the input indices. + `input_x` has rank P and `indices` has rank Q where `Q >= 2`. + + `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)` where `N <= P`. + + The last dimension of `indices` (with length `N` ) indicates slices along the `N` th dimension of `input_x`. + + `updates` is a tensor of rank `Q-1+P-N`. Its shape is: + :math:`(i_0, i_1, ..., i_{Q-2}, x\_shape_N, ..., x\_shape_{P-1})`. + Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent. If they have different data types, lower priority data type will be converted to relatively highest priority data type. @@ -3230,7 +3254,9 @@ class ScatterNdUpdate(_ScatterNdOp): Inputs: - **input_x** (Parameter) - The target tensor, with data type of Parameter. - **indices** (Tensor) - The index of input tensor, with int32 data type. - - **update** (Tensor) - The tensor to be updated to the input tensor, has the same type as input. + The rank of indices must be at least 2 and `indices_shape[-1] <= len(shape)`. + - **updates** (Tensor) - The tensor to be updated to the input tensor, has the same type as input. + the shape is `indices_shape[:-1] + x_shape[indices_shape[-1]:]`. Outputs: Tensor, has the same shape and type as `input_x`. @@ -3242,9 +3268,9 @@ class ScatterNdUpdate(_ScatterNdOp): >>> np_x = np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]) >>> input_x = mindspore.Parameter(Tensor(np_x, mindspore.float32), name="x") >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32) - >>> update = Tensor(np.array([1.0, 2.2]), mindspore.float32) + >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32) >>> op = ops.ScatterNdUpdate() - >>> output = op(input_x, indices, update) + >>> output = op(input_x, indices, updates) >>> print(output) [[ 1. 0.3 3.6] [ 0.4 2.2 -3.2]] @@ -3264,12 +3290,18 @@ class ScatterNdUpdate(_ScatterNdOp): class ScatterMax(_ScatterOp): - """ + r""" Updates the value of the input tensor through the maximum operation. Using given values to update tensor value through the max operation, along with the input indices. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value. + .. math:: + \begin{array}{l} + \text {for each i, ..., j in indices.shape:} \\ + input\_x[indices[i, ..., j], :] = max(input\_x[indices[i, ..., j], :], updates[i, ..., j, :]) + \end{array} + Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent. If they have different data types, lower priority data type will be converted to relatively highest priority data type. @@ -3293,9 +3325,9 @@ class ScatterMax(_ScatterOp): Examples: >>> input_x = Parameter(Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32), name="input_x") >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32) - >>> update = Tensor(np.ones([2, 2, 3]) * 88, mindspore.float32) + >>> updates = Tensor(np.ones([2, 2, 3]) * 88, mindspore.float32) >>> scatter_max = ops.ScatterMax() - >>> output = scatter_max(input_x, indices, update) + >>> output = scatter_max(input_x, indices, updates) >>> print(output) [[88. 88. 88.] [88. 88. 88.]] @@ -3309,12 +3341,18 @@ class ScatterMax(_ScatterOp): class ScatterMin(_ScatterOp): - """ + r""" Updates the value of the input tensor through the minimum operation. Using given values to update tensor value through the min operation, along with the input indices. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value. + .. math:: + \begin{array}{l} + \text {for each i, ..., j in indices.shape:} \\ + input\_x[indices[i, ..., j], :] = min(input\_x[indices[i, ..., j], :], updates[i, ..., j, :]) + \end{array} + Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent. If they have different data types, lower priority data type will be converted to relatively highest priority data type. @@ -3348,12 +3386,18 @@ class ScatterMin(_ScatterOp): class ScatterAdd(_ScatterOp_Dynamic): - """ + r""" Updates the value of the input tensor through the addition operation. Using given values to update tensor value through the add operation, along with the input indices. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value. + .. math:: + \begin{array}{l} + \text {for each i, ..., j in indices.shape:} \\ + input\_x[indices[i, ..., j], :] \mathrel{+}= updates[i, ..., j, :] + \end{array} + Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent. If they have different data types, lower priority data type will be converted to relatively highest priority data type. @@ -3393,12 +3437,18 @@ class ScatterAdd(_ScatterOp_Dynamic): class ScatterSub(_ScatterOp): - """ + r""" Updates the value of the input tensor through the subtraction operation. Using given values to update tensor value through the subtraction operation, along with the input indices. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value. + .. math:: + \begin{array}{l} + \text {for each i, ..., j in indices.shape:} \\ + input\_x[indices[i, ..., j], :] \mathrel{-}= updates[i, ..., j, :] + \end{array} + Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent. If they have different data types, lower priority data type will be converted to relatively highest priority data type. @@ -3433,12 +3483,18 @@ class ScatterSub(_ScatterOp): class ScatterMul(_ScatterOp): - """ + r""" Updates the value of the input tensor through the multiply operation. Using given values to update tensor value through the mul operation, along with the input indices. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value. + .. math:: + \begin{array}{l} + \text {for each i, ..., j in indices.shape:} \\ + input\_x[indices[i, ..., j], :] \mathrel{*}= updates[i, ..., j, :] + \end{array} + Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent. If they have different data types, lower priority data type will be converted to relatively highest priority data type. @@ -3472,12 +3528,18 @@ class ScatterMul(_ScatterOp): class ScatterDiv(_ScatterOp): - """ + r""" Updates the value of the input tensor through the divide operation. Using given values to update tensor value through the div operation, along with the input indices. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value. + .. math:: + \begin{array}{l} + \text {for each i, ..., j in indices.shape:} \\ + input\_x[indices[i, ..., j], :] \mathrel{/}= updates[i, ..., j, :] + \end{array} + Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent. If they have different data types, lower priority data type will be converted to relatively highest priority data type. @@ -3511,12 +3573,21 @@ class ScatterDiv(_ScatterOp): class ScatterNdAdd(_ScatterNdOp): - """ + r""" Applies sparse addition to individual values or slices in a tensor. Using given values to update tensor value through the add operation, along with the input indices. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value. + `input_x` has rank P and `indices` has rank Q where `Q >= 2`. + + `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)` where `N <= P`. + + The last dimension of `indices` (with length `N` ) indicates slices along the `N` th dimension of `input_x`. + + `updates` is a tensor of rank `Q-1+P-N`. Its shape is: + :math:`(i_0, i_1, ..., i_{Q-2}, x\_shape_N, ..., x\_shape_{P-1})`. + Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent. If they have different data types, lower priority data type will be converted to relatively highest priority data type. @@ -3528,6 +3599,7 @@ class ScatterNdAdd(_ScatterNdOp): Inputs: - **input_x** (Parameter) - The target parameter. - **indices** (Tensor) - The index to do add operation whose data type must be mindspore.int32. + The rank of indices must be at least 2 and `indices_shape[-1] <= len(shape)`. - **updates** (Tensor) - The tensor doing the add operation with `input_x`, the data type is same as `input_x`, the shape is `indices_shape[:-1] + x_shape[indices_shape[-1]:]`. @@ -3549,12 +3621,21 @@ class ScatterNdAdd(_ScatterNdOp): class ScatterNdSub(_ScatterNdOp): - """ + r""" Applies sparse subtraction to individual values or slices in a tensor. Using given values to update tensor value through the subtraction operation, along with the input indices. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value. + `input_x` has rank P and `indices` has rank Q where `Q >= 2`. + + `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)` where `N <= P`. + + The last dimension of `indices` (with length `N` ) indicates slices along the `N` th dimension of `input_x`. + + `updates` is a tensor of rank `Q-1+P-N`. Its shape is: + :math:`(i_0, i_1, ..., i_{Q-2}, x\_shape_N, ..., x\_shape_{P-1})`. + Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent. If they have different data types, lower priority data type will be converted to relatively highest priority data type. @@ -3566,6 +3647,7 @@ class ScatterNdSub(_ScatterNdOp): Inputs: - **input_x** (Parameter) - The target parameter. - **indices** (Tensor) - The index to do add operation whose data type must be mindspore.int32. + The rank of indices must be at least 2 and `indices_shape[-1] <= len(shape)`. - **updates** (Tensor) - The tensor that performs the subtraction operation with `input_x`, the data type is the same as `input_x`, the shape is `indices_shape[:-1] + x_shape[indices_shape[-1]:]`. diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index fd6ad3d118..51c117a9ba 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -115,7 +115,7 @@ class _BitwiseBinaryOp(_MathBinaryOp): class TensorAdd(_MathBinaryOp): - """ + r""" Adds two input tensors element-wise. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent. @@ -125,6 +125,10 @@ class TensorAdd(_MathBinaryOp): When the inputs are one tensor and one scalar, the scalar could only be a constant. + .. math:: + + out_{i} = x_{i} + y_{i} + Inputs: - **input_x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool, or a tensor whose data type is number or bool. @@ -690,7 +694,7 @@ class CumProd(PrimitiveWithInfer): class MatMul(PrimitiveWithCheck): - """ + r""" Multiplies matrix `a` and matrix `b`. The rank of input tensors must equal to `2`. @@ -1257,6 +1261,10 @@ class Mul(_MathBinaryOp): When the inputs are one tensor and one scalar, the scalar could only be a constant. + .. math:: + + out_{i} = x_{i} * y_{i} + Inputs: - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or a bool or a tensor whose data type is number or bool. @@ -1544,9 +1552,13 @@ class Pow(_MathBinaryOp): class Exp(PrimitiveWithInfer): - """ + r""" Returns exponential of a tensor element-wise. + .. math:: + + out_i = e^{x_i} + Inputs: - **input_x** (Tensor) - The input tensor. The data type mast be float16 or float32. @@ -1586,9 +1598,13 @@ class Exp(PrimitiveWithInfer): class Expm1(PrimitiveWithInfer): - """ + r""" Returns exponential then minus 1 of a tensor element-wise. + .. math:: + + out_i = e^{x_i} - 1 + Inputs: - **input_x** (Tensor) - The input tensor. With float16 or float32 data type. @@ -1748,7 +1764,7 @@ class Erf(PrimitiveWithInfer): .. math:: - \text{erf}(x) = \frac{2}{\sqrt{\pi}}$\int$_{0}^{x}\exp(-t**2)dt + erf(x)=\frac{2} {\sqrt{\pi}} \int\limits_0^{x} e^{-t^{2}} dt Inputs: - **input_x** (Tensor) - The input tensor. The data type must be float16 or float32. @@ -1784,6 +1800,10 @@ class Erfc(PrimitiveWithInfer): r""" Computes the complementary error function of `input_x` element-wise. + .. math:: + + erfc(x) = 1 - \frac{2} {\sqrt{\pi}} \int\limits_0^{x} e^{-t^{2}} dt + Inputs: - **input_x** (Tensor) - The input tensor. The data type must be float16 or float32. @@ -1944,7 +1964,7 @@ class RealDiv(_MathBinaryOp): class Div(_MathBinaryOp): - """ + r""" Computes the quotient of dividing the first input tensor by the second input tensor element-wise. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent. @@ -1954,6 +1974,10 @@ class Div(_MathBinaryOp): When the inputs are one tensor and one scalar, the scalar could only be a constant. + .. math:: + + out_{i} = \frac{x_i}{y_i} + Inputs: - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or a bool or a tensor whose data type is number or bool. @@ -2233,9 +2257,13 @@ class Mod(_MathBinaryOp): class Floor(PrimitiveWithInfer): - """ + r""" Rounds a tensor down to the closest integer element-wise. + .. math:: + + out_i = \lfloor x_i \rfloor + Inputs: - **input_x** (Tensor) - The input tensor. Its element data type must be float. @@ -2300,12 +2328,12 @@ class FloorMod(_MathBinaryOp): class Ceil(PrimitiveWithInfer): - """ + r""" Rounds a tensor up to the closest integer element-wise. .. math:: - out_i = [input_i] = [input_i] + 1 + out_i = \lceil x_i \rceil = \lfloor x_i \rfloor + 1 Inputs: - **input_x** (Tensor) - The input tensor. It's element data type must be float16 or float32. @@ -2484,7 +2512,7 @@ class Cosh(PrimitiveWithInfer): class Asinh(PrimitiveWithInfer): - """ + r""" Computes inverse hyperbolic sine of the input element-wise. .. math:: @@ -3312,12 +3340,12 @@ class Cos(PrimitiveWithInfer): class ACos(PrimitiveWithInfer): - """ + r""" Computes arccosine of input tensors element-wise. .. math:: - out_i = cos^{-1}(input_i) + out_i = cos^{-1}(x_i) Inputs: - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. @@ -3382,9 +3410,13 @@ class Sin(PrimitiveWithInfer): class Asin(PrimitiveWithInfer): - """ + r""" Computes arcsine of input tensors element-wise. + .. math:: + + out_i = sin^{-1}(x_i) + Inputs: - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. @@ -3480,12 +3512,12 @@ class NMSWithMask(PrimitiveWithInfer): class Abs(PrimitiveWithInfer): - """ + r""" Returns absolute value of a tensor element-wise. .. math:: - out_i = |input_i| + out_i = |x_i| Inputs: - **input_x** (Tensor) - The input tensor. The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. @@ -3633,12 +3665,12 @@ class Tan(PrimitiveWithInfer): class Atan(PrimitiveWithInfer): - """ + r""" Computes the trigonometric inverse tangent of the input element-wise. .. math:: - out_i = tan^{-1}(input_i) + out_i = tan^{-1}(x_i) Inputs: - **input_x** (Tensor): The input tensor. The data type should be one of the following types: float16, float32.