Fix ops doc for some ops

Fix ops doc for some ops
musl/disable_test_yolov3_temporarily
Noel 4 years ago committed by GitHub
parent 770395cb93
commit da71173bc9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -203,7 +203,7 @@ $$out = x - \\frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$
UNUSED constexpr char SqrtDoc[] = R"DOC(
Sqrt Activation Operator.
.. math:: out=\\sqrt{x}=x^{1/2}
$$out=\\sqrt{x}=x^{1/2}$$
**Note**:
input value must be greater than or equal to zero.
@ -229,14 +229,14 @@ $$out = |x|$$
UNUSED constexpr char CeilDoc[] = R"DOC(
Ceil Operator. Computes ceil of x element-wise.
$$out = \\left \\lceil x \\right \\rceil$$
$$out = \\lceil x \\rceil$$
)DOC";
UNUSED constexpr char FloorDoc[] = R"DOC(
Floor Activation Operator. Computes floor of x element-wise.
$$out = \\left \\lfloor x \\right \\rfloor$$
$$out = \\lfloor x \\rfloor$$
)DOC";
@ -273,7 +273,7 @@ $$out = cosh(x)$$
UNUSED constexpr char RoundDoc[] = R"DOC(
The OP rounds the values in the input to the nearest integer value.
.. code-block:: python
.. code-block:: text
input:
x.shape = [4]
@ -592,7 +592,7 @@ class STanhOpMaker : public framework::OpProtoAndCheckerMaker {
void Make() override {
AddInput("X",
"Input of STanh operator."
" A LoDTensor or Tensor with type float32, float64.");
" A Tensor with type float32, float64.");
AddOutput("Out", "Output of STanh operator. A Tensor with type float32.");
AddAttr<float>("scale_a", "The scale parameter of a for the input. ")
.SetDefault(0.67f);

@ -82,7 +82,7 @@ class CosSimOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X",
"The 1st input of cos_sim op, LoDTensor with shape ``[N_1, N_2, "
"The 1st input of cos_sim op, Tensor with shape ``[N_1, N_2, "
"..., N_k]``, the data type is float32.");
AddInput("Y",
"The 2nd input of cos_sim op, Tensor with shape ``[N_1 or 1, N_2, "
@ -110,9 +110,6 @@ of input Y could be just 1 (different from input X), which will be
broadcasted to match the shape of input X before computing their cosine
similarity.
Both the input X and Y can carry the LoD (Level of Details) information,
or not. But the output only shares the LoD information with input X.
)DOC");
}
};

@ -40,15 +40,11 @@ class ElementwiseMaxOpMaker : public ElementwiseOpMaker {
std::string GetEquation() const override { return "Out = max(X, Y)"; }
void AddInputX() override {
AddInput(
"X",
"(Variable), The first tensor holding the elements to be compared.");
AddInput("X", "The first tensor holding the elements to be compared.");
}
void AddInputY() override {
AddInput(
"Y",
"(Variable), The second tensor holding the elements to be compared.");
AddInput("Y", "The second tensor holding the elements to be compared.");
}
std::string GetOpFuntionality() const override {

@ -40,15 +40,11 @@ class ElementwiseMinOpMaker : public ElementwiseOpMaker {
std::string GetEquation() const override { return "Out = min(X, Y)"; }
void AddInputX() override {
AddInput(
"X",
"(Variable), The first tensor holding the elements to be compared.");
AddInput("X", "The first tensor holding the elements to be compared.");
}
void AddInputY() override {
AddInput(
"Y",
"(Variable), The second tensor holding the elements to be compared.");
AddInput("Y", "The second tensor holding the elements to be compared.");
}
std::string GetOpFuntionality() const override {

@ -1583,19 +1583,16 @@ def create_array(dtype):
@templatedoc()
def less_than(x, y, force_cpu=None, cond=None, name=None):
"""
:alias_main: paddle.less_than
:alias: paddle.less_than,paddle.tensor.less_than,paddle.tensor.logic.less_than
:old_api: paddle.fluid.layers.less_than
${comment}
Args:
x(${x_type}): ${x_comment}.
y(${y_type}): ${y_comment}.
x(Tensor): ${x_comment}.
y(Tensor): ${y_comment}.
force_cpu(${force_cpu_type}): ${force_cpu_comment}.
cond(Variable, optional): Optional output which can be any created Variable
cond(Tensor, optional): Optional output which can be any created Tensor
that meets the requirements to store the result of *less_than*.
if cond is None, a new Varibale will be created to store the result.
if cond is None, a new Tensor will be created to store the result.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
@ -1604,25 +1601,13 @@ def less_than(x, y, force_cpu=None, cond=None, name=None):
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# Graph Organizing
x = fluid.layers.data(name='x', shape=[2], dtype='float64')
y = fluid.layers.data(name='y', shape=[2], dtype='float64')
result = fluid.layers.less_than(x=x, y=y)
# The comment lists another available method.
# result = fluid.layers.fill_constant(shape=[2], dtype='float64', value=0)
# fluid.layers.less_than(x=x, y=y, cond=result)
# Create an executor using CPU as example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[1, 2], [3, 4]]).astype(np.float64)
y_i = np.array([[2, 2], [1, 3]]).astype(np.float64)
result_value, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[result])
print(result_value) # [[True, False], [False, False]]
import paddle
x = paddle.to_tensor([1, 2, 3, 4], dtype='float32')
y = paddle.to_tensor([2, 2, 1, 3], dtype='float32')
result = paddle.less_than(x, y)
print(result) # [True, False, False, False]
"""
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"less_than")

@ -55,10 +55,11 @@ _two_bang_pattern_ = re.compile(r"!!([^!]+)!!")
def escape_math(text):
return _two_bang_pattern_.sub(
r'$$\1$$',
_single_dollar_pattern_.sub(r':math:`\1`',
_two_dollar_pattern_.sub(r"!!\1!!", text)))
#return _two_bang_pattern_.sub(
# r'$$\1$$',
# _single_dollar_pattern_.sub(r':math:\n`\1`',
# _two_dollar_pattern_.sub(r"!!\1!!", text)))
return _two_dollar_pattern_.sub(r':math:`\1`', text)
def _generate_doc_string_(op_proto,

@ -377,9 +377,7 @@ def edit_distance(input,
So the edit distance between A and B is 3.
The input is a LoDTensor or Tensor.
If it is a LoDTensor, The separation is specified by the LoD information.
If it is a Tensor, The input_length and label_length should be supported.
The input is a Tensor, the input_length and label_length should be supported.
The `batch_size` of labels should be same as `input`.
@ -388,59 +386,36 @@ def edit_distance(input,
the edit distance value will be divided by the length of label.
Parameters:
input(Variable): The input variable which is a tensor or LoDTensor, its rank should be equal to 2 and its data type should be int64.
label(Variable): The label variable which is a tensor or LoDTensor, its rank should be equal to 2 and its data type should be int64.
input(Tensor): The input tensor, its rank should be equal to 2 and its data type should be int64.
label(Tensor): The label tensor, its rank should be equal to 2 and its data type should be int64.
normalized(bool, default True): Indicated whether to normalize the edit distance.
ignored_tokens(list<int>, default None): Tokens that will be removed before
calculating edit distance.
input_length(Variable): The length for each sequence in `input` if it's of Tensor type, it should have shape `(batch_size, )` and its data type should be int64.
label_length(Variable): The length for each sequence in `label` if it's of Tensor type, it should have shape `(batch_size, )` and its data type should be int64.
input_length(Tensor): The length for each sequence in `input` if it's of Tensor type, it should have shape `(batch_size, )` and its data type should be int64.
label_length(Tensor): The length for each sequence in `label` if it's of Tensor type, it should have shape `(batch_size, )` and its data type should be int64.
NOTE: To be avoid unexpected result, the value of every elements in input_length and label_length should be equal to the value of the second dimension of input and label. For example, The input: [[1,2,3,4],[5,6,7,8],[9,10,11,12]], the shape of input is [3,4] and the input_length should be [4,4,4]
NOTE: This Api is different from fluid.metrics.EditDistance
Returns:
Tuple:
distance(Variable): edit distance result, its data type is float32, and its shape is (batch_size, 1).
sequence_num(Variable): sequence number, its data type is float32, and its shape is (1,).
distance(Tensor): edit distance result, its data type is float32, and its shape is (batch_size, 1).
sequence_num(Tensor): sequence number, its data type is float32, and its shape is (1,).
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# using LoDTensor
x_lod = fluid.data(name='x_lod', shape=[None,1], dtype='int64', lod_level=1)
y_lod = fluid.data(name='y_lod', shape=[None,1], dtype='int64', lod_level=1)
distance_lod, seq_num_lod = fluid.layers.edit_distance(input=x_lod, label=y_lod)
# using Tensor
input_data = np.array([[1,2,3],[4,5,6],[4,4,4],[1,1,1]]).astype('int64')
label_data = np.array([[1,3,4,1],[4,5,8,1],[7,7,7,1],[1,1,1,1]]).astype('int64')
input_len = np.array([3,3,3,3]).astype('int64')
label_len = np.array([4,4,4,4]).astype('int64')
input_t = fluid.data(name='input', shape=[None,3], dtype='int64')
label_t = fluid.data(name='label', shape=[None,4], dtype='int64')
input_len_t = fluid.data(name='input_length', shape=[None], dtype='int64')
label_len_t = fluid.data(name='label_length', shape=[None], dtype='int64')
import paddle
import paddle.nn.functional as F
distance, sequence_num = fluid.layers.edit_distance(input=input_t, label=label_t, input_length=input_len_t, label_length=label_len_t,normalized=False)
input = paddle.to_tensor([[1,2,3],[4,5,6],[4,4,4],[1,1,1]], dtype='int64')
label = paddle.to_tensor([[1,3,4,1],[4,5,8,1],[7,7,7,1],[1,1,1,1]], dtype='int64')
input_len = paddle.to_tensor([3,3,3,3], dtype='int64')
label_len = paddle.to_tensor([4,4,4,4], dtype='int64')
# print(input_data.shape, label_data.shape)
# ((4,3), (4,4))
distance, sequence_num = F.loss.edit_distance(input=input, label=label, input_length=input_len, label_length=label_len, normalized=False)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
dis, seq_num = exe.run(fluid.default_main_program(),
feed={"input":input_data,
"label":label_data,
"input_length": input_len,
"label_length": label_len},
fetch_list=[distance,sequence_num])
# print(dis)
# print(distance)
# [[3.]
# [2.]
# [4.]
@ -451,7 +426,7 @@ def edit_distance(input,
# [1. ]
# [0.25]
#
# print(seq_num)
# print(sequence_num)
# [4]
"""
@ -1434,18 +1409,15 @@ def sigmoid_cross_entropy_with_logits(x,
name=None,
normalize=False):
"""
:alias_main: paddle.nn.functional.sigmoid_cross_entropy_with_logits
:alias: paddle.nn.functional.sigmoid_cross_entropy_with_logits,paddle.nn.functional.loss.sigmoid_cross_entropy_with_logits
:old_api: paddle.fluid.layers.sigmoid_cross_entropy_with_logits
${comment}
Args:
x(Variable): a 2-D tensor with shape N x D, where N is the batch size and
x(Tensor): a 2-D tensor with shape N x D, where N is the batch size and
D is the number of classes. This input is a tensor of logits computed
by the previous operator. Logits are unscaled log probabilities given
as log(p/(1-p)) The data type should be float32 or float64.
label (Variable): a 2-D tensor of the same type and shape as X.
label (Tensor): a 2-D tensor of the same type and shape as X.
This input is a tensor of probabalistic labels for each logit.
ignore_index(int): Specifies a target value that is ignored and
does not contribute to the input gradient.
@ -1456,22 +1428,19 @@ def sigmoid_cross_entropy_with_logits(x,
targets != ignore_index.
Returns:
out(${out_type}): ${out_comment}
out(Tensor): ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(
name='data', shape=[10], dtype='float32')
label = fluid.data(
name='data', shape=[10], dtype='float32')
loss = fluid.layers.sigmoid_cross_entropy_with_logits(
x=input,
label=label,
ignore_index=-1,
normalize=True) # or False
# loss = fluid.layers.reduce_sum(loss) # summation of loss
import paddle
input = paddle.rand(shape=[10], dtype='float32')
label = paddle.rand(shape=[10], dtype='float32')
loss = paddle.fluid.layers.sigmoid_cross_entropy_with_logits(input, label,
ignore_index=-1, normalize=True)
print(loss)
"""
check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'],
'sigmoid_cross_entropy_with_logits')
@ -1619,47 +1588,44 @@ def huber_loss(input, label, delta):
@templatedoc()
def kldiv_loss(x, target, reduction='mean', name=None):
"""
:alias_main: paddle.nn.functional.kldiv_loss
:alias: paddle.nn.functional.kldiv_loss,paddle.nn.functional.loss.kldiv_loss
:old_api: paddle.fluid.layers.kldiv_loss
${comment}
Args:
x (Variable): ${x_comment}
target (Variable): ${target_comment}
reduction (Variable): ${reduction_comment}
x (Tensor): ${x_comment}
target (Tensor): ${target_comment}
reduction (Tensor): ${reduction_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable(Tensor): The KL divergence loss. The data type is same as input tensor
Tensor: The KL divergence loss. The data type is same as input tensor
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
# 'batchmean' reduction, loss shape will be [N]
x = fluid.data(name='x', shape=[None,4,2,2], dtype='float32') # shape=[-1, 4, 2, 2]
target = fluid.layers.data(name='target', shape=[4,2,2], dtype='float32')
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='batchmean') # shape=[-1]
x = paddle.rand(shape=[3,4,2,2], dtype='float32')
target = paddle.rand(shape=[3,4,2,2], dtype='float32')
# 'batchmean' reduction, loss shape will be [1]
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='batchmean')
print(loss.shape) # shape=[1]
# 'mean' reduction, loss shape will be [1]
x = fluid.data(name='x', shape=[None,4,2,2], dtype='float32') # shape=[-1, 4, 2, 2]
target = fluid.layers.data(name='target', shape=[4,2,2], dtype='float32')
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='mean') # shape=[1]
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='mean')
print(loss.shape) # shape=[1]
# 'sum' reduction, loss shape will be [1]
x = fluid.data(name='x', shape=[None,4,2,2], dtype='float32') # shape=[-1, 4, 2, 2]
target = fluid.layers.data(name='target', shape=[4,2,2], dtype='float32')
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='sum') # shape=[1]
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='sum')
print(loss.shape) # shape=[1]
# 'none' reduction, loss shape is same with X shape
x = fluid.data(name='x', shape=[None,4,2,2], dtype='float32') # shape=[-1, 4, 2, 2]
target = fluid.layers.data(name='target', shape=[4,2,2], dtype='float32')
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='none') # shape=[-1, 4, 2, 2]
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='none')
print(loss.shape) # shape=[3, 4, 2, 2]
"""
helper = LayerHelper('kldiv_loss', **locals())

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -1271,28 +1271,26 @@ def has_nan(x):
def isfinite(x):
"""
:alias_main: paddle.isfinite
:alias: paddle.isfinite,paddle.tensor.isfinite,paddle.tensor.logic.isfinite
:old_api: paddle.fluid.layers.isfinite
Test if any of x contains an infinity/NAN number. If all the elements are finite,
returns true, else false.
Args:
x(variable): The Tensor/LoDTensor to be checked.
x(Tensor): The Tensor to be checked.
Returns:
Variable: The tensor variable storing the output, contains a bool value.
Tensor: The tensor storing the output, contains a bool value.
Examples:
.. code-block:: python
import paddle.fluid as fluid
var = fluid.layers.data(name="data",
shape=(4, 6),
dtype="float32")
out = fluid.layers.isfinite(var)
import paddle
x = paddle.rand(shape=[4, 6], dtype='float32')
y = paddle.fluid.layers.isfinite(x)
print(y)
"""
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"isfinite")

@ -120,11 +120,10 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean',
import paddle
paddle.disable_static()
input = paddle.to_tensor([0.5, 0.6, 0.7], 'float32')
label = paddle.to_tensor([1.0, 0.0, 1.0], 'float32')
output = paddle.nn.functional.binary_cross_entropy(input, label)
print(output.numpy()) # [0.65537095]
print(output) # [0.65537095]
"""
if reduction not in ['sum', 'mean', 'none']:
@ -200,16 +199,16 @@ def binary_cross_entropy_with_logits(logit,
.. math::
Out = -Labels * \\log(\\sigma(Logit)) - (1 - Labels) * \\log(1 - \\sigma(Logit))
We know that :math:`\\sigma(Logit) = \\frac{1}{1 + \\e^{-Logit}}`. By substituting this we get:
We know that :math:`\\sigma(Logit) = \\frac{1}{1 + e^{-Logit}}`. By substituting this we get:
.. math::
Out = Logit - Logit * Labels + \\log(1 + \\e^{-Logit})
Out = Logit - Logit * Labels + \\log(1 + e^{-Logit})
For stability and to prevent overflow of :math:`\\e^{-Logit}` when Logit < 0,
For stability and to prevent overflow of :math:`e^{-Logit}` when Logit < 0,
we reformulate the loss as follows:
.. math::
Out = \\max(Logit, 0) - Logit * Labels + \\log(1 + \\e^{-\|Logit\|})
Out = \\max(Logit, 0) - Logit * Labels + \\log(1 + e^{-\|Logit\|})
Then, if ``weight`` or ``pos_weight`` is not None, this operator multiply the
weight tensor on the loss `Out`. The ``weight`` tensor will attach different
@ -254,11 +253,11 @@ def binary_cross_entropy_with_logits(logit,
.. code-block:: python
import paddle
paddle.disable_static()
logit = paddle.to_tensor([5.0, 1.0, 3.0])
label = paddle.to_tensor([1.0, 0.0, 1.0])
output = paddle.nn.functional.binary_cross_entropy_with_logits(logit, label)
print(output.numpy()) # [0.45618808]
print(output) # [0.45618808]
"""
if reduction not in ['sum', 'mean', 'none']:
@ -577,13 +576,12 @@ def margin_ranking_loss(input,
.. code-block:: python
import paddle
paddle.disable_static()
input = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32')
other = paddle.to_tensor([[2, 1], [2, 4]], dtype='float32')
label = paddle.to_tensor([[1, -1], [-1, -1]], dtype='float32')
loss = paddle.nn.functional.margin_ranking_loss(input, other, label)
print(loss.numpy()) # [0.75]
print(loss) # [0.75]
"""
if reduction not in ['sum', 'mean', 'none']:
raise ValueError(
@ -651,22 +649,22 @@ def l1_loss(input, label, reduction='mean', name=None):
If `reduction` set to ``'none'``, the loss is:
.. math::
Out = \lvert input - label\rvert
Out = \\lvert input - label \\rvert
If `reduction` set to ``'mean'``, the loss is:
.. math::
Out = MEAN(\lvert input - label\rvert)
Out = MEAN(\\lvert input - label \\rvert)
If `reduction` set to ``'sum'``, the loss is:
.. math::
Out = SUM(\lvert input - label\rvert)
Out = SUM(\\lvert input - label\\rvert)
Parameters:
input (Tensor): The input tensor. The shapes is [N, *], where N is batch size and `*` means any number of additional dimensions. It's data type should be float32, float64, int32, int64.
label (Tensor): label. The shapes is [N, *], same shape as ``input`` . It's data type should be float32, float64, int32, int64.
input (Tensor): The input tensor. The shapes is [N, `*`], where N is batch size and `*` means any number of additional dimensions. It's data type should be float32, float64, int32, int64.
label (Tensor): label. The shapes is [N, `*`], same shape as ``input`` . It's data type should be float32, float64, int32, int64.
reduction (str, optional): Indicate the reduction to apply to the loss,
the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
If `reduction` is ``'none'``, the unreduced loss is returned;
@ -674,12 +672,15 @@ def l1_loss(input, label, reduction='mean', name=None):
If `reduction` is ``'sum'``, the reduced sum loss is returned.
Default is ``'mean'``.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the L1 Loss of Tensor ``input`` and ``label``.
If `reduction` is ``'none'``, the shape of output loss is [N, *], the same as ``input`` .
If `reduction` is ``'mean'`` or ``'sum'``, the shape of output loss is [1].
Examples:
.. code-block:: python
import paddle
paddle.disable_static()

@ -40,15 +40,15 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None):
.. math::
y = \frac{x}{ \max\left( \lvert \lvert x \rvert \rvert_p, epsilon\right) }
y = \\frac{x}{ \\max\\left( \\lvert \\lvert x \\rvert \\rvert_p, epsilon\\right) }
.. math::
\lvert \lvert x \rvert \rvert_p = \left(\sum_i {\lvert x_i\rvert^p} \right)^{1/p}
\\lvert \\lvert x \\rvert \\rvert_p = \\left( \\sum_i {\\lvert x_i \\rvert^p} \\right)^{1/p}
where, :math:`\sum_i{\lvert x_i\rvert^p}` is calculated along the ``axis`` dimension.
where, :math:`\\sum_i{\\lvert x_i \\rvert^p}` is calculated along the ``axis`` dimension.
Args:
Parameters:
x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64.
p (float|int, optional): The exponent value in the norm formulation. Default: 2
axis (int, optional): The axis on which to apply normalization. If `axis < 0`, the dimension to normalization is `x.ndim + axis`. -1 is the last dimension.

@ -838,9 +838,13 @@ class MarginRankingLoss(fluid.dygraph.Layer):
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Shape:
input: N-D Tensor, the shape is [N, *], N is batch size and `*` means any number of additional dimensions., available dtype is float32, float64.
input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means any number of additional dimensions, available dtype is float32, float64.
other: N-D Tensor, `other` have the same shape and dtype as `input`.
label: N-D Tensor, label have the same shape and dtype as `input`.
output: If :attr:`reduction` is ``'mean'`` or ``'sum'`` , the out shape is :math:`[1]`, otherwise the shape is the same as `input` .The same dtype as input tensor.
Returns:
@ -851,14 +855,13 @@ class MarginRankingLoss(fluid.dygraph.Layer):
.. code-block:: python
import paddle
paddle.disable_static()
input = paddle.to_tensor([[1, 2], [3, 4]]), dtype="float32")
other = paddle.to_tensor([[2, 1], [2, 4]]), dtype="float32")
label = paddle.to_tensor([[1, -1], [-1, -1]], dtype="float32")
margin_rank_loss = paddle.nn.MarginRankingLoss()
loss = margin_rank_loss(input, other, label)
print(loss.numpy()) # [0.75]
print(loss) # [0.75]
"""
def __init__(self, margin=0.0, reduction='mean', name=None):

@ -54,9 +54,6 @@ __all__ = [
def equal_all(x, y, name=None):
"""
:alias_main: paddle.equal_all
:alias: paddle.equal_all,paddle.tensor.equal_all,paddle.tensor.logic.equal_all
This OP returns the truth value of :math:`x == y`. True if two inputs have the same elements, False otherwise.
**NOTICE**: The output of this OP has no gradient.
@ -75,14 +72,13 @@ def equal_all(x, y, name=None):
import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 2, 3])
z = paddle.to_tensor([1, 4, 3])
result1 = paddle.equal_all(x, y)
print(result1.numpy()) # result1 = [True ]
print(result1) # result1 = [True ]
result2 = paddle.equal_all(x, z)
print(result2.numpy()) # result2 = [False ]
print(result2) # result2 = [False ]
"""
helper = LayerHelper("equal_all", **locals())
@ -122,8 +118,6 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
import paddle
paddle.disable_static()
x = paddle.to_tensor([10000., 1e-07])
y = paddle.to_tensor([10000.1, 1e-08])
result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
@ -189,10 +183,9 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
@templatedoc()
def equal(x, y, name=None):
"""
:alias_main: paddle.equal
:alias: paddle.equal,paddle.tensor.equal,paddle.tensor.logic.equal
This layer returns the truth value of :math:`x == y` elementwise.
**NOTICE**: The output of this OP has no gradient.
Args:
@ -210,11 +203,10 @@ def equal(x, y, name=None):
import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.equal(x, y)
print(result1.numpy()) # result1 = [True False False]
print(result1) # result1 = [True False False]
"""
if in_dygraph_mode():
return core.ops.equal(x, y)
@ -236,10 +228,8 @@ def equal(x, y, name=None):
@templatedoc()
def greater_equal(x, y, name=None):
"""
:alias_main: paddle.greater_equal
:alias: paddle.greater_equal,paddle.tensor.greater_equal,paddle.tensor.logic.greater_equal
This OP returns the truth value of :math:`x >= y` elementwise, which is equivalent function to the overloaded operator `>=`.
**NOTICE**: The output of this OP has no gradient.
Args:
@ -252,13 +242,13 @@ def greater_equal(x, y, name=None):
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.greater_equal(x, y)
print(result1.numpy()) # result1 = [True False True]
print(result1) # result1 = [True False True]
"""
if in_dygraph_mode():
return core.ops.greater_equal(x, y)
@ -282,10 +272,8 @@ def greater_equal(x, y, name=None):
@templatedoc()
def greater_than(x, y, name=None):
"""
:alias_main: paddle.greater_than
:alias: paddle.greater_than,paddle.tensor.greater_than,paddle.tensor.logic.greater_than
This OP returns the truth value of :math:`x > y` elementwise, which is equivalent function to the overloaded operator `>`.
**NOTICE**: The output of this OP has no gradient.
Args:
@ -298,13 +286,13 @@ def greater_than(x, y, name=None):
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.greater_than(x, y)
print(result1.numpy()) # result1 = [False False True]
print(result1) # result1 = [False False True]
"""
if in_dygraph_mode():
return core.ops.greater_than(x, y)
@ -328,10 +316,8 @@ def greater_than(x, y, name=None):
@templatedoc()
def less_equal(x, y, name=None):
"""
:alias_main: paddle.less_equal
:alias: paddle.less_equal,paddle.tensor.less_equal,paddle.tensor.logic.less_equal
This OP returns the truth value of :math:`x <= y` elementwise, which is equivalent function to the overloaded operator `<=`.
**NOTICE**: The output of this OP has no gradient.
Args:
@ -345,13 +331,13 @@ def less_equal(x, y, name=None):
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.less_equal(x, y)
print(result1.numpy()) # result1 = [True True False]
print(result1) # result1 = [True True False]
"""
if in_dygraph_mode():
return core.ops.less_equal(x, y)
@ -373,10 +359,8 @@ def less_equal(x, y, name=None):
@templatedoc()
def less_than(x, y, name=None):
"""
:alias_main: paddle.less_than
:alias: paddle.less_than,paddle.tensor.less_than,paddle.tensor.logic.less_than
This OP returns the truth value of :math:`x < y` elementwise, which is equivalent function to the overloaded operator `<`.
**NOTICE**: The output of this OP has no gradient.
Args:
@ -390,13 +374,13 @@ def less_than(x, y, name=None):
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.less_than(x, y)
print(result1.numpy()) # result1 = [False True False]
print(result1) # result1 = [False True False]
"""
if in_dygraph_mode():
return core.ops.less_than(x, y)
@ -418,10 +402,8 @@ def less_than(x, y, name=None):
@templatedoc()
def not_equal(x, y, name=None):
"""
:alias_main: paddle.not_equal
:alias: paddle.not_equal,paddle.tensor.not_equal,paddle.tensor.logic.not_equal
This OP returns the truth value of :math:`x != y` elementwise, which is equivalent function to the overloaded operator `!=`.
**NOTICE**: The output of this OP has no gradient.
Args:
@ -438,11 +420,10 @@ def not_equal(x, y, name=None):
import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.not_equal(x, y)
print(result1.numpy()) # result1 = [False True True]
print(result1) # result1 = [False True True]
"""
if in_dygraph_mode():
return core.ops.not_equal(x, y)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save