Fix ops doc for some ops

Fix ops doc for some ops
musl/disable_test_yolov3_temporarily
Noel 4 years ago committed by GitHub
parent 770395cb93
commit da71173bc9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -203,7 +203,7 @@ $$out = x - \\frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$
UNUSED constexpr char SqrtDoc[] = R"DOC( UNUSED constexpr char SqrtDoc[] = R"DOC(
Sqrt Activation Operator. Sqrt Activation Operator.
.. math:: out=\\sqrt{x}=x^{1/2} $$out=\\sqrt{x}=x^{1/2}$$
**Note**: **Note**:
input value must be greater than or equal to zero. input value must be greater than or equal to zero.
@ -229,14 +229,14 @@ $$out = |x|$$
UNUSED constexpr char CeilDoc[] = R"DOC( UNUSED constexpr char CeilDoc[] = R"DOC(
Ceil Operator. Computes ceil of x element-wise. Ceil Operator. Computes ceil of x element-wise.
$$out = \\left \\lceil x \\right \\rceil$$ $$out = \\lceil x \\rceil$$
)DOC"; )DOC";
UNUSED constexpr char FloorDoc[] = R"DOC( UNUSED constexpr char FloorDoc[] = R"DOC(
Floor Activation Operator. Computes floor of x element-wise. Floor Activation Operator. Computes floor of x element-wise.
$$out = \\left \\lfloor x \\right \\rfloor$$ $$out = \\lfloor x \\rfloor$$
)DOC"; )DOC";
@ -273,7 +273,7 @@ $$out = cosh(x)$$
UNUSED constexpr char RoundDoc[] = R"DOC( UNUSED constexpr char RoundDoc[] = R"DOC(
The OP rounds the values in the input to the nearest integer value. The OP rounds the values in the input to the nearest integer value.
.. code-block:: python .. code-block:: text
input: input:
x.shape = [4] x.shape = [4]
@ -592,7 +592,7 @@ class STanhOpMaker : public framework::OpProtoAndCheckerMaker {
void Make() override { void Make() override {
AddInput("X", AddInput("X",
"Input of STanh operator." "Input of STanh operator."
" A LoDTensor or Tensor with type float32, float64."); " A Tensor with type float32, float64.");
AddOutput("Out", "Output of STanh operator. A Tensor with type float32."); AddOutput("Out", "Output of STanh operator. A Tensor with type float32.");
AddAttr<float>("scale_a", "The scale parameter of a for the input. ") AddAttr<float>("scale_a", "The scale parameter of a for the input. ")
.SetDefault(0.67f); .SetDefault(0.67f);

@ -82,7 +82,7 @@ class CosSimOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() override { void Make() override {
AddInput("X", AddInput("X",
"The 1st input of cos_sim op, LoDTensor with shape ``[N_1, N_2, " "The 1st input of cos_sim op, Tensor with shape ``[N_1, N_2, "
"..., N_k]``, the data type is float32."); "..., N_k]``, the data type is float32.");
AddInput("Y", AddInput("Y",
"The 2nd input of cos_sim op, Tensor with shape ``[N_1 or 1, N_2, " "The 2nd input of cos_sim op, Tensor with shape ``[N_1 or 1, N_2, "
@ -110,9 +110,6 @@ of input Y could be just 1 (different from input X), which will be
broadcasted to match the shape of input X before computing their cosine broadcasted to match the shape of input X before computing their cosine
similarity. similarity.
Both the input X and Y can carry the LoD (Level of Details) information,
or not. But the output only shares the LoD information with input X.
)DOC"); )DOC");
} }
}; };

@ -40,15 +40,11 @@ class ElementwiseMaxOpMaker : public ElementwiseOpMaker {
std::string GetEquation() const override { return "Out = max(X, Y)"; } std::string GetEquation() const override { return "Out = max(X, Y)"; }
void AddInputX() override { void AddInputX() override {
AddInput( AddInput("X", "The first tensor holding the elements to be compared.");
"X",
"(Variable), The first tensor holding the elements to be compared.");
} }
void AddInputY() override { void AddInputY() override {
AddInput( AddInput("Y", "The second tensor holding the elements to be compared.");
"Y",
"(Variable), The second tensor holding the elements to be compared.");
} }
std::string GetOpFuntionality() const override { std::string GetOpFuntionality() const override {

@ -40,15 +40,11 @@ class ElementwiseMinOpMaker : public ElementwiseOpMaker {
std::string GetEquation() const override { return "Out = min(X, Y)"; } std::string GetEquation() const override { return "Out = min(X, Y)"; }
void AddInputX() override { void AddInputX() override {
AddInput( AddInput("X", "The first tensor holding the elements to be compared.");
"X",
"(Variable), The first tensor holding the elements to be compared.");
} }
void AddInputY() override { void AddInputY() override {
AddInput( AddInput("Y", "The second tensor holding the elements to be compared.");
"Y",
"(Variable), The second tensor holding the elements to be compared.");
} }
std::string GetOpFuntionality() const override { std::string GetOpFuntionality() const override {

@ -1583,19 +1583,16 @@ def create_array(dtype):
@templatedoc() @templatedoc()
def less_than(x, y, force_cpu=None, cond=None, name=None): def less_than(x, y, force_cpu=None, cond=None, name=None):
""" """
:alias_main: paddle.less_than
:alias: paddle.less_than,paddle.tensor.less_than,paddle.tensor.logic.less_than
:old_api: paddle.fluid.layers.less_than
${comment} ${comment}
Args: Args:
x(${x_type}): ${x_comment}. x(Tensor): ${x_comment}.
y(${y_type}): ${y_comment}. y(Tensor): ${y_comment}.
force_cpu(${force_cpu_type}): ${force_cpu_comment}. force_cpu(${force_cpu_type}): ${force_cpu_comment}.
cond(Variable, optional): Optional output which can be any created Variable cond(Tensor, optional): Optional output which can be any created Tensor
that meets the requirements to store the result of *less_than*. that meets the requirements to store the result of *less_than*.
if cond is None, a new Varibale will be created to store the result. if cond is None, a new Tensor will be created to store the result.
name(str, optional): The default value is None. Normally there is no need for name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`. user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns: Returns:
@ -1604,25 +1601,13 @@ def less_than(x, y, force_cpu=None, cond=None, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
import numpy as np
# Graph Organizing
x = fluid.layers.data(name='x', shape=[2], dtype='float64')
y = fluid.layers.data(name='y', shape=[2], dtype='float64')
result = fluid.layers.less_than(x=x, y=y)
# The comment lists another available method.
# result = fluid.layers.fill_constant(shape=[2], dtype='float64', value=0)
# fluid.layers.less_than(x=x, y=y, cond=result)
# Create an executor using CPU as example x = paddle.to_tensor([1, 2, 3, 4], dtype='float32')
exe = fluid.Executor(fluid.CPUPlace()) y = paddle.to_tensor([2, 2, 1, 3], dtype='float32')
result = paddle.less_than(x, y)
print(result) # [True, False, False, False]
# Execute
x_i = np.array([[1, 2], [3, 4]]).astype(np.float64)
y_i = np.array([[2, 2], [1, 3]]).astype(np.float64)
result_value, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[result])
print(result_value) # [[True, False], [False, False]]
""" """
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"], check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"less_than") "less_than")

@ -55,10 +55,11 @@ _two_bang_pattern_ = re.compile(r"!!([^!]+)!!")
def escape_math(text): def escape_math(text):
return _two_bang_pattern_.sub( #return _two_bang_pattern_.sub(
r'$$\1$$', # r'$$\1$$',
_single_dollar_pattern_.sub(r':math:`\1`', # _single_dollar_pattern_.sub(r':math:\n`\1`',
_two_dollar_pattern_.sub(r"!!\1!!", text))) # _two_dollar_pattern_.sub(r"!!\1!!", text)))
return _two_dollar_pattern_.sub(r':math:`\1`', text)
def _generate_doc_string_(op_proto, def _generate_doc_string_(op_proto,

@ -377,9 +377,7 @@ def edit_distance(input,
So the edit distance between A and B is 3. So the edit distance between A and B is 3.
The input is a LoDTensor or Tensor. The input is a Tensor, the input_length and label_length should be supported.
If it is a LoDTensor, The separation is specified by the LoD information.
If it is a Tensor, The input_length and label_length should be supported.
The `batch_size` of labels should be same as `input`. The `batch_size` of labels should be same as `input`.
@ -388,59 +386,36 @@ def edit_distance(input,
the edit distance value will be divided by the length of label. the edit distance value will be divided by the length of label.
Parameters: Parameters:
input(Variable): The input variable which is a tensor or LoDTensor, its rank should be equal to 2 and its data type should be int64. input(Tensor): The input tensor, its rank should be equal to 2 and its data type should be int64.
label(Variable): The label variable which is a tensor or LoDTensor, its rank should be equal to 2 and its data type should be int64. label(Tensor): The label tensor, its rank should be equal to 2 and its data type should be int64.
normalized(bool, default True): Indicated whether to normalize the edit distance. normalized(bool, default True): Indicated whether to normalize the edit distance.
ignored_tokens(list<int>, default None): Tokens that will be removed before ignored_tokens(list<int>, default None): Tokens that will be removed before
calculating edit distance. calculating edit distance.
input_length(Variable): The length for each sequence in `input` if it's of Tensor type, it should have shape `(batch_size, )` and its data type should be int64. input_length(Tensor): The length for each sequence in `input` if it's of Tensor type, it should have shape `(batch_size, )` and its data type should be int64.
label_length(Variable): The length for each sequence in `label` if it's of Tensor type, it should have shape `(batch_size, )` and its data type should be int64. label_length(Tensor): The length for each sequence in `label` if it's of Tensor type, it should have shape `(batch_size, )` and its data type should be int64.
NOTE: To be avoid unexpected result, the value of every elements in input_length and label_length should be equal to the value of the second dimension of input and label. For example, The input: [[1,2,3,4],[5,6,7,8],[9,10,11,12]], the shape of input is [3,4] and the input_length should be [4,4,4] NOTE: To be avoid unexpected result, the value of every elements in input_length and label_length should be equal to the value of the second dimension of input and label. For example, The input: [[1,2,3,4],[5,6,7,8],[9,10,11,12]], the shape of input is [3,4] and the input_length should be [4,4,4]
NOTE: This Api is different from fluid.metrics.EditDistance NOTE: This Api is different from fluid.metrics.EditDistance
Returns: Returns:
Tuple: Tuple:
distance(Variable): edit distance result, its data type is float32, and its shape is (batch_size, 1). distance(Tensor): edit distance result, its data type is float32, and its shape is (batch_size, 1).
sequence_num(Variable): sequence number, its data type is float32, and its shape is (1,). sequence_num(Tensor): sequence number, its data type is float32, and its shape is (1,).
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
import numpy as np import paddle.nn.functional as F
# using LoDTensor
x_lod = fluid.data(name='x_lod', shape=[None,1], dtype='int64', lod_level=1)
y_lod = fluid.data(name='y_lod', shape=[None,1], dtype='int64', lod_level=1)
distance_lod, seq_num_lod = fluid.layers.edit_distance(input=x_lod, label=y_lod)
# using Tensor
input_data = np.array([[1,2,3],[4,5,6],[4,4,4],[1,1,1]]).astype('int64')
label_data = np.array([[1,3,4,1],[4,5,8,1],[7,7,7,1],[1,1,1,1]]).astype('int64')
input_len = np.array([3,3,3,3]).astype('int64')
label_len = np.array([4,4,4,4]).astype('int64')
input_t = fluid.data(name='input', shape=[None,3], dtype='int64')
label_t = fluid.data(name='label', shape=[None,4], dtype='int64')
input_len_t = fluid.data(name='input_length', shape=[None], dtype='int64')
label_len_t = fluid.data(name='label_length', shape=[None], dtype='int64')
distance, sequence_num = fluid.layers.edit_distance(input=input_t, label=label_t, input_length=input_len_t, label_length=label_len_t,normalized=False) input = paddle.to_tensor([[1,2,3],[4,5,6],[4,4,4],[1,1,1]], dtype='int64')
label = paddle.to_tensor([[1,3,4,1],[4,5,8,1],[7,7,7,1],[1,1,1,1]], dtype='int64')
input_len = paddle.to_tensor([3,3,3,3], dtype='int64')
label_len = paddle.to_tensor([4,4,4,4], dtype='int64')
# print(input_data.shape, label_data.shape) distance, sequence_num = F.loss.edit_distance(input=input, label=label, input_length=input_len, label_length=label_len, normalized=False)
# ((4,3), (4,4))
place = fluid.CPUPlace() # print(distance)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
dis, seq_num = exe.run(fluid.default_main_program(),
feed={"input":input_data,
"label":label_data,
"input_length": input_len,
"label_length": label_len},
fetch_list=[distance,sequence_num])
# print(dis)
# [[3.] # [[3.]
# [2.] # [2.]
# [4.] # [4.]
@ -451,7 +426,7 @@ def edit_distance(input,
# [1. ] # [1. ]
# [0.25] # [0.25]
# #
# print(seq_num) # print(sequence_num)
# [4] # [4]
""" """
@ -1434,18 +1409,15 @@ def sigmoid_cross_entropy_with_logits(x,
name=None, name=None,
normalize=False): normalize=False):
""" """
:alias_main: paddle.nn.functional.sigmoid_cross_entropy_with_logits
:alias: paddle.nn.functional.sigmoid_cross_entropy_with_logits,paddle.nn.functional.loss.sigmoid_cross_entropy_with_logits
:old_api: paddle.fluid.layers.sigmoid_cross_entropy_with_logits
${comment} ${comment}
Args: Args:
x(Variable): a 2-D tensor with shape N x D, where N is the batch size and x(Tensor): a 2-D tensor with shape N x D, where N is the batch size and
D is the number of classes. This input is a tensor of logits computed D is the number of classes. This input is a tensor of logits computed
by the previous operator. Logits are unscaled log probabilities given by the previous operator. Logits are unscaled log probabilities given
as log(p/(1-p)) The data type should be float32 or float64. as log(p/(1-p)) The data type should be float32 or float64.
label (Variable): a 2-D tensor of the same type and shape as X. label (Tensor): a 2-D tensor of the same type and shape as X.
This input is a tensor of probabalistic labels for each logit. This input is a tensor of probabalistic labels for each logit.
ignore_index(int): Specifies a target value that is ignored and ignore_index(int): Specifies a target value that is ignored and
does not contribute to the input gradient. does not contribute to the input gradient.
@ -1456,22 +1428,19 @@ def sigmoid_cross_entropy_with_logits(x,
targets != ignore_index. targets != ignore_index.
Returns: Returns:
out(${out_type}): ${out_comment} out(Tensor): ${out_comment}
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid
input = fluid.data( import paddle
name='data', shape=[10], dtype='float32')
label = fluid.data( input = paddle.rand(shape=[10], dtype='float32')
name='data', shape=[10], dtype='float32') label = paddle.rand(shape=[10], dtype='float32')
loss = fluid.layers.sigmoid_cross_entropy_with_logits( loss = paddle.fluid.layers.sigmoid_cross_entropy_with_logits(input, label,
x=input, ignore_index=-1, normalize=True)
label=label, print(loss)
ignore_index=-1,
normalize=True) # or False
# loss = fluid.layers.reduce_sum(loss) # summation of loss
""" """
check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'], check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'],
'sigmoid_cross_entropy_with_logits') 'sigmoid_cross_entropy_with_logits')
@ -1619,47 +1588,44 @@ def huber_loss(input, label, delta):
@templatedoc() @templatedoc()
def kldiv_loss(x, target, reduction='mean', name=None): def kldiv_loss(x, target, reduction='mean', name=None):
""" """
:alias_main: paddle.nn.functional.kldiv_loss
:alias: paddle.nn.functional.kldiv_loss,paddle.nn.functional.loss.kldiv_loss
:old_api: paddle.fluid.layers.kldiv_loss
${comment} ${comment}
Args: Args:
x (Variable): ${x_comment} x (Tensor): ${x_comment}
target (Variable): ${target_comment} target (Tensor): ${target_comment}
reduction (Variable): ${reduction_comment} reduction (Tensor): ${reduction_comment}
name(str, optional): For detailed information, please refer name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and to :ref:`api_guide_Name`. Usually name is no need to set and
None by default. None by default.
Returns: Returns:
Variable(Tensor): The KL divergence loss. The data type is same as input tensor Tensor: The KL divergence loss. The data type is same as input tensor
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
# 'batchmean' reduction, loss shape will be [N] x = paddle.rand(shape=[3,4,2,2], dtype='float32')
x = fluid.data(name='x', shape=[None,4,2,2], dtype='float32') # shape=[-1, 4, 2, 2] target = paddle.rand(shape=[3,4,2,2], dtype='float32')
target = fluid.layers.data(name='target', shape=[4,2,2], dtype='float32')
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='batchmean') # shape=[-1] # 'batchmean' reduction, loss shape will be [1]
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='batchmean')
print(loss.shape) # shape=[1]
# 'mean' reduction, loss shape will be [1] # 'mean' reduction, loss shape will be [1]
x = fluid.data(name='x', shape=[None,4,2,2], dtype='float32') # shape=[-1, 4, 2, 2] loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='mean')
target = fluid.layers.data(name='target', shape=[4,2,2], dtype='float32') print(loss.shape) # shape=[1]
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='mean') # shape=[1]
# 'sum' reduction, loss shape will be [1] # 'sum' reduction, loss shape will be [1]
x = fluid.data(name='x', shape=[None,4,2,2], dtype='float32') # shape=[-1, 4, 2, 2] loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='sum')
target = fluid.layers.data(name='target', shape=[4,2,2], dtype='float32') print(loss.shape) # shape=[1]
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='sum') # shape=[1]
# 'none' reduction, loss shape is same with X shape # 'none' reduction, loss shape is same with X shape
x = fluid.data(name='x', shape=[None,4,2,2], dtype='float32') # shape=[-1, 4, 2, 2] loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='none')
target = fluid.layers.data(name='target', shape=[4,2,2], dtype='float32') print(loss.shape) # shape=[3, 4, 2, 2]
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='none') # shape=[-1, 4, 2, 2]
""" """
helper = LayerHelper('kldiv_loss', **locals()) helper = LayerHelper('kldiv_loss', **locals())

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -1271,28 +1271,26 @@ def has_nan(x):
def isfinite(x): def isfinite(x):
""" """
:alias_main: paddle.isfinite
:alias: paddle.isfinite,paddle.tensor.isfinite,paddle.tensor.logic.isfinite
:old_api: paddle.fluid.layers.isfinite
Test if any of x contains an infinity/NAN number. If all the elements are finite, Test if any of x contains an infinity/NAN number. If all the elements are finite,
returns true, else false. returns true, else false.
Args: Args:
x(variable): The Tensor/LoDTensor to be checked. x(Tensor): The Tensor to be checked.
Returns: Returns:
Variable: The tensor variable storing the output, contains a bool value. Tensor: The tensor storing the output, contains a bool value.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
var = fluid.layers.data(name="data",
shape=(4, 6), x = paddle.rand(shape=[4, 6], dtype='float32')
dtype="float32") y = paddle.fluid.layers.isfinite(x)
out = fluid.layers.isfinite(var) print(y)
""" """
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"], check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"isfinite") "isfinite")

@ -120,11 +120,10 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean',
import paddle import paddle
paddle.disable_static()
input = paddle.to_tensor([0.5, 0.6, 0.7], 'float32') input = paddle.to_tensor([0.5, 0.6, 0.7], 'float32')
label = paddle.to_tensor([1.0, 0.0, 1.0], 'float32') label = paddle.to_tensor([1.0, 0.0, 1.0], 'float32')
output = paddle.nn.functional.binary_cross_entropy(input, label) output = paddle.nn.functional.binary_cross_entropy(input, label)
print(output.numpy()) # [0.65537095] print(output) # [0.65537095]
""" """
if reduction not in ['sum', 'mean', 'none']: if reduction not in ['sum', 'mean', 'none']:
@ -200,16 +199,16 @@ def binary_cross_entropy_with_logits(logit,
.. math:: .. math::
Out = -Labels * \\log(\\sigma(Logit)) - (1 - Labels) * \\log(1 - \\sigma(Logit)) Out = -Labels * \\log(\\sigma(Logit)) - (1 - Labels) * \\log(1 - \\sigma(Logit))
We know that :math:`\\sigma(Logit) = \\frac{1}{1 + \\e^{-Logit}}`. By substituting this we get: We know that :math:`\\sigma(Logit) = \\frac{1}{1 + e^{-Logit}}`. By substituting this we get:
.. math:: .. math::
Out = Logit - Logit * Labels + \\log(1 + \\e^{-Logit}) Out = Logit - Logit * Labels + \\log(1 + e^{-Logit})
For stability and to prevent overflow of :math:`\\e^{-Logit}` when Logit < 0, For stability and to prevent overflow of :math:`e^{-Logit}` when Logit < 0,
we reformulate the loss as follows: we reformulate the loss as follows:
.. math:: .. math::
Out = \\max(Logit, 0) - Logit * Labels + \\log(1 + \\e^{-\|Logit\|}) Out = \\max(Logit, 0) - Logit * Labels + \\log(1 + e^{-\|Logit\|})
Then, if ``weight`` or ``pos_weight`` is not None, this operator multiply the Then, if ``weight`` or ``pos_weight`` is not None, this operator multiply the
weight tensor on the loss `Out`. The ``weight`` tensor will attach different weight tensor on the loss `Out`. The ``weight`` tensor will attach different
@ -254,11 +253,11 @@ def binary_cross_entropy_with_logits(logit,
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
logit = paddle.to_tensor([5.0, 1.0, 3.0]) logit = paddle.to_tensor([5.0, 1.0, 3.0])
label = paddle.to_tensor([1.0, 0.0, 1.0]) label = paddle.to_tensor([1.0, 0.0, 1.0])
output = paddle.nn.functional.binary_cross_entropy_with_logits(logit, label) output = paddle.nn.functional.binary_cross_entropy_with_logits(logit, label)
print(output.numpy()) # [0.45618808] print(output) # [0.45618808]
""" """
if reduction not in ['sum', 'mean', 'none']: if reduction not in ['sum', 'mean', 'none']:
@ -577,13 +576,12 @@ def margin_ranking_loss(input,
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
input = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32') input = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32')
other = paddle.to_tensor([[2, 1], [2, 4]], dtype='float32') other = paddle.to_tensor([[2, 1], [2, 4]], dtype='float32')
label = paddle.to_tensor([[1, -1], [-1, -1]], dtype='float32') label = paddle.to_tensor([[1, -1], [-1, -1]], dtype='float32')
loss = paddle.nn.functional.margin_ranking_loss(input, other, label) loss = paddle.nn.functional.margin_ranking_loss(input, other, label)
print(loss.numpy()) # [0.75] print(loss) # [0.75]
""" """
if reduction not in ['sum', 'mean', 'none']: if reduction not in ['sum', 'mean', 'none']:
raise ValueError( raise ValueError(
@ -651,22 +649,22 @@ def l1_loss(input, label, reduction='mean', name=None):
If `reduction` set to ``'none'``, the loss is: If `reduction` set to ``'none'``, the loss is:
.. math:: .. math::
Out = \lvert input - label\rvert Out = \\lvert input - label \\rvert
If `reduction` set to ``'mean'``, the loss is: If `reduction` set to ``'mean'``, the loss is:
.. math:: .. math::
Out = MEAN(\lvert input - label\rvert) Out = MEAN(\\lvert input - label \\rvert)
If `reduction` set to ``'sum'``, the loss is: If `reduction` set to ``'sum'``, the loss is:
.. math:: .. math::
Out = SUM(\lvert input - label\rvert) Out = SUM(\\lvert input - label\\rvert)
Parameters: Parameters:
input (Tensor): The input tensor. The shapes is [N, *], where N is batch size and `*` means any number of additional dimensions. It's data type should be float32, float64, int32, int64. input (Tensor): The input tensor. The shapes is [N, `*`], where N is batch size and `*` means any number of additional dimensions. It's data type should be float32, float64, int32, int64.
label (Tensor): label. The shapes is [N, *], same shape as ``input`` . It's data type should be float32, float64, int32, int64. label (Tensor): label. The shapes is [N, `*`], same shape as ``input`` . It's data type should be float32, float64, int32, int64.
reduction (str, optional): Indicate the reduction to apply to the loss, reduction (str, optional): Indicate the reduction to apply to the loss,
the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
If `reduction` is ``'none'``, the unreduced loss is returned; If `reduction` is ``'none'``, the unreduced loss is returned;
@ -674,12 +672,15 @@ def l1_loss(input, label, reduction='mean', name=None):
If `reduction` is ``'sum'``, the reduced sum loss is returned. If `reduction` is ``'sum'``, the reduced sum loss is returned.
Default is ``'mean'``. Default is ``'mean'``.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns: Returns:
Tensor, the L1 Loss of Tensor ``input`` and ``label``. Tensor, the L1 Loss of Tensor ``input`` and ``label``.
If `reduction` is ``'none'``, the shape of output loss is [N, *], the same as ``input`` . If `reduction` is ``'none'``, the shape of output loss is [N, *], the same as ``input`` .
If `reduction` is ``'mean'`` or ``'sum'``, the shape of output loss is [1]. If `reduction` is ``'mean'`` or ``'sum'``, the shape of output loss is [1].
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static() paddle.disable_static()

@ -40,15 +40,15 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None):
.. math:: .. math::
y = \frac{x}{ \max\left( \lvert \lvert x \rvert \rvert_p, epsilon\right) } y = \\frac{x}{ \\max\\left( \\lvert \\lvert x \\rvert \\rvert_p, epsilon\\right) }
.. math:: .. math::
\lvert \lvert x \rvert \rvert_p = \left(\sum_i {\lvert x_i\rvert^p} \right)^{1/p} \\lvert \\lvert x \\rvert \\rvert_p = \\left( \\sum_i {\\lvert x_i \\rvert^p} \\right)^{1/p}
where, :math:`\sum_i{\lvert x_i\rvert^p}` is calculated along the ``axis`` dimension. where, :math:`\\sum_i{\\lvert x_i \\rvert^p}` is calculated along the ``axis`` dimension.
Args: Parameters:
x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64.
p (float|int, optional): The exponent value in the norm formulation. Default: 2 p (float|int, optional): The exponent value in the norm formulation. Default: 2
axis (int, optional): The axis on which to apply normalization. If `axis < 0`, the dimension to normalization is `x.ndim + axis`. -1 is the last dimension. axis (int, optional): The axis on which to apply normalization. If `axis < 0`, the dimension to normalization is `x.ndim + axis`. -1 is the last dimension.

@ -838,9 +838,13 @@ class MarginRankingLoss(fluid.dygraph.Layer):
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Shape: Shape:
input: N-D Tensor, the shape is [N, *], N is batch size and `*` means any number of additional dimensions., available dtype is float32, float64.
input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means any number of additional dimensions, available dtype is float32, float64.
other: N-D Tensor, `other` have the same shape and dtype as `input`. other: N-D Tensor, `other` have the same shape and dtype as `input`.
label: N-D Tensor, label have the same shape and dtype as `input`. label: N-D Tensor, label have the same shape and dtype as `input`.
output: If :attr:`reduction` is ``'mean'`` or ``'sum'`` , the out shape is :math:`[1]`, otherwise the shape is the same as `input` .The same dtype as input tensor. output: If :attr:`reduction` is ``'mean'`` or ``'sum'`` , the out shape is :math:`[1]`, otherwise the shape is the same as `input` .The same dtype as input tensor.
Returns: Returns:
@ -851,14 +855,13 @@ class MarginRankingLoss(fluid.dygraph.Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
input = paddle.to_tensor([[1, 2], [3, 4]]), dtype="float32") input = paddle.to_tensor([[1, 2], [3, 4]]), dtype="float32")
other = paddle.to_tensor([[2, 1], [2, 4]]), dtype="float32") other = paddle.to_tensor([[2, 1], [2, 4]]), dtype="float32")
label = paddle.to_tensor([[1, -1], [-1, -1]], dtype="float32") label = paddle.to_tensor([[1, -1], [-1, -1]], dtype="float32")
margin_rank_loss = paddle.nn.MarginRankingLoss() margin_rank_loss = paddle.nn.MarginRankingLoss()
loss = margin_rank_loss(input, other, label) loss = margin_rank_loss(input, other, label)
print(loss.numpy()) # [0.75] print(loss) # [0.75]
""" """
def __init__(self, margin=0.0, reduction='mean', name=None): def __init__(self, margin=0.0, reduction='mean', name=None):

@ -54,9 +54,6 @@ __all__ = [
def equal_all(x, y, name=None): def equal_all(x, y, name=None):
""" """
:alias_main: paddle.equal_all
:alias: paddle.equal_all,paddle.tensor.equal_all,paddle.tensor.logic.equal_all
This OP returns the truth value of :math:`x == y`. True if two inputs have the same elements, False otherwise. This OP returns the truth value of :math:`x == y`. True if two inputs have the same elements, False otherwise.
**NOTICE**: The output of this OP has no gradient. **NOTICE**: The output of this OP has no gradient.
@ -75,14 +72,13 @@ def equal_all(x, y, name=None):
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3]) x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 2, 3]) y = paddle.to_tensor([1, 2, 3])
z = paddle.to_tensor([1, 4, 3]) z = paddle.to_tensor([1, 4, 3])
result1 = paddle.equal_all(x, y) result1 = paddle.equal_all(x, y)
print(result1.numpy()) # result1 = [True ] print(result1) # result1 = [True ]
result2 = paddle.equal_all(x, z) result2 = paddle.equal_all(x, z)
print(result2.numpy()) # result2 = [False ] print(result2) # result2 = [False ]
""" """
helper = LayerHelper("equal_all", **locals()) helper = LayerHelper("equal_all", **locals())
@ -122,8 +118,6 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([10000., 1e-07]) x = paddle.to_tensor([10000., 1e-07])
y = paddle.to_tensor([10000.1, 1e-08]) y = paddle.to_tensor([10000.1, 1e-08])
result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
@ -189,10 +183,9 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
@templatedoc() @templatedoc()
def equal(x, y, name=None): def equal(x, y, name=None):
""" """
:alias_main: paddle.equal
:alias: paddle.equal,paddle.tensor.equal,paddle.tensor.logic.equal
This layer returns the truth value of :math:`x == y` elementwise. This layer returns the truth value of :math:`x == y` elementwise.
**NOTICE**: The output of this OP has no gradient. **NOTICE**: The output of this OP has no gradient.
Args: Args:
@ -210,11 +203,10 @@ def equal(x, y, name=None):
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3]) x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2]) y = paddle.to_tensor([1, 3, 2])
result1 = paddle.equal(x, y) result1 = paddle.equal(x, y)
print(result1.numpy()) # result1 = [True False False] print(result1) # result1 = [True False False]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.equal(x, y) return core.ops.equal(x, y)
@ -236,10 +228,8 @@ def equal(x, y, name=None):
@templatedoc() @templatedoc()
def greater_equal(x, y, name=None): def greater_equal(x, y, name=None):
""" """
:alias_main: paddle.greater_equal
:alias: paddle.greater_equal,paddle.tensor.greater_equal,paddle.tensor.logic.greater_equal
This OP returns the truth value of :math:`x >= y` elementwise, which is equivalent function to the overloaded operator `>=`. This OP returns the truth value of :math:`x >= y` elementwise, which is equivalent function to the overloaded operator `>=`.
**NOTICE**: The output of this OP has no gradient. **NOTICE**: The output of this OP has no gradient.
Args: Args:
@ -252,13 +242,13 @@ def greater_equal(x, y, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3]) x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2]) y = paddle.to_tensor([1, 3, 2])
result1 = paddle.greater_equal(x, y) result1 = paddle.greater_equal(x, y)
print(result1.numpy()) # result1 = [True False True] print(result1) # result1 = [True False True]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.greater_equal(x, y) return core.ops.greater_equal(x, y)
@ -282,10 +272,8 @@ def greater_equal(x, y, name=None):
@templatedoc() @templatedoc()
def greater_than(x, y, name=None): def greater_than(x, y, name=None):
""" """
:alias_main: paddle.greater_than
:alias: paddle.greater_than,paddle.tensor.greater_than,paddle.tensor.logic.greater_than
This OP returns the truth value of :math:`x > y` elementwise, which is equivalent function to the overloaded operator `>`. This OP returns the truth value of :math:`x > y` elementwise, which is equivalent function to the overloaded operator `>`.
**NOTICE**: The output of this OP has no gradient. **NOTICE**: The output of this OP has no gradient.
Args: Args:
@ -298,13 +286,13 @@ def greater_than(x, y, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3]) x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2]) y = paddle.to_tensor([1, 3, 2])
result1 = paddle.greater_than(x, y) result1 = paddle.greater_than(x, y)
print(result1.numpy()) # result1 = [False False True] print(result1) # result1 = [False False True]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.greater_than(x, y) return core.ops.greater_than(x, y)
@ -328,10 +316,8 @@ def greater_than(x, y, name=None):
@templatedoc() @templatedoc()
def less_equal(x, y, name=None): def less_equal(x, y, name=None):
""" """
:alias_main: paddle.less_equal
:alias: paddle.less_equal,paddle.tensor.less_equal,paddle.tensor.logic.less_equal
This OP returns the truth value of :math:`x <= y` elementwise, which is equivalent function to the overloaded operator `<=`. This OP returns the truth value of :math:`x <= y` elementwise, which is equivalent function to the overloaded operator `<=`.
**NOTICE**: The output of this OP has no gradient. **NOTICE**: The output of this OP has no gradient.
Args: Args:
@ -345,13 +331,13 @@ def less_equal(x, y, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3]) x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2]) y = paddle.to_tensor([1, 3, 2])
result1 = paddle.less_equal(x, y) result1 = paddle.less_equal(x, y)
print(result1.numpy()) # result1 = [True True False] print(result1) # result1 = [True True False]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.less_equal(x, y) return core.ops.less_equal(x, y)
@ -373,10 +359,8 @@ def less_equal(x, y, name=None):
@templatedoc() @templatedoc()
def less_than(x, y, name=None): def less_than(x, y, name=None):
""" """
:alias_main: paddle.less_than
:alias: paddle.less_than,paddle.tensor.less_than,paddle.tensor.logic.less_than
This OP returns the truth value of :math:`x < y` elementwise, which is equivalent function to the overloaded operator `<`. This OP returns the truth value of :math:`x < y` elementwise, which is equivalent function to the overloaded operator `<`.
**NOTICE**: The output of this OP has no gradient. **NOTICE**: The output of this OP has no gradient.
Args: Args:
@ -390,13 +374,13 @@ def less_than(x, y, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3]) x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2]) y = paddle.to_tensor([1, 3, 2])
result1 = paddle.less_than(x, y) result1 = paddle.less_than(x, y)
print(result1.numpy()) # result1 = [False True False] print(result1) # result1 = [False True False]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.less_than(x, y) return core.ops.less_than(x, y)
@ -418,10 +402,8 @@ def less_than(x, y, name=None):
@templatedoc() @templatedoc()
def not_equal(x, y, name=None): def not_equal(x, y, name=None):
""" """
:alias_main: paddle.not_equal
:alias: paddle.not_equal,paddle.tensor.not_equal,paddle.tensor.logic.not_equal
This OP returns the truth value of :math:`x != y` elementwise, which is equivalent function to the overloaded operator `!=`. This OP returns the truth value of :math:`x != y` elementwise, which is equivalent function to the overloaded operator `!=`.
**NOTICE**: The output of this OP has no gradient. **NOTICE**: The output of this OP has no gradient.
Args: Args:
@ -438,11 +420,10 @@ def not_equal(x, y, name=None):
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3]) x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2]) y = paddle.to_tensor([1, 3, 2])
result1 = paddle.not_equal(x, y) result1 = paddle.not_equal(x, y)
print(result1.numpy()) # result1 = [False True True] print(result1) # result1 = [False True True]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.not_equal(x, y) return core.ops.not_equal(x, y)

File diff suppressed because it is too large Load Diff

@ -39,9 +39,6 @@ from paddle.common_ops_import import *
def argsort(x, axis=-1, descending=False, name=None): def argsort(x, axis=-1, descending=False, name=None):
""" """
:alias_main: paddle.argsort
:alias: paddle.argsort,paddle.tensor.argsort,paddle.tensor.search.argsort
This OP sorts the input along the given axis, and returns the corresponding index tensor for the sorted output values. The default sort algorithm is ascending, if you want the sort algorithm to be descending, you must set the :attr:`descending` as True. This OP sorts the input along the given axis, and returns the corresponding index tensor for the sorted output values. The default sort algorithm is ascending, if you want the sort algorithm to be descending, you must set the :attr:`descending` as True.
Args: Args:
@ -67,7 +64,6 @@ def argsort(x, axis=-1, descending=False, name=None):
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([[[5,8,9,5], x = paddle.to_tensor([[[5,8,9,5],
[0,0,1,7], [0,0,1,7],
[6,9,2,4]], [6,9,2,4]],
@ -78,21 +74,21 @@ def argsort(x, axis=-1, descending=False, name=None):
out1 = paddle.argsort(x=x, axis=-1) out1 = paddle.argsort(x=x, axis=-1)
out2 = paddle.argsort(x=x, axis=0) out2 = paddle.argsort(x=x, axis=0)
out3 = paddle.argsort(x=x, axis=1) out3 = paddle.argsort(x=x, axis=1)
print(out1.numpy()) print(out1)
#[[[0 3 1 2] #[[[0 3 1 2]
# [0 1 2 3] # [0 1 2 3]
# [2 3 0 1]] # [2 3 0 1]]
# [[1 3 2 0] # [[1 3 2 0]
# [0 1 2 3] # [0 1 2 3]
# [2 0 3 1]]] # [2 0 3 1]]]
print(out2.numpy()) print(out2)
#[[[0 1 1 1] #[[[0 1 1 1]
# [0 0 0 0] # [0 0 0 0]
# [1 1 1 0]] # [1 1 1 0]]
# [[1 0 0 0] # [[1 0 0 0]
# [1 1 1 1] # [1 1 1 1]
# [0 0 0 1]]] # [0 0 0 1]]]
print(out3.numpy()) print(out3)
#[[[1 1 1 2] #[[[1 1 1 2]
# [0 0 2 0] # [0 0 2 0]
# [2 2 0 1]] # [2 2 0 1]]
@ -149,17 +145,16 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([[5,8,9,5], x = paddle.to_tensor([[5,8,9,5],
[0,0,1,7], [0,0,1,7],
[6,9,2,4]]) [6,9,2,4]])
out1 = paddle.argmax(x) out1 = paddle.argmax(x)
print(out1.numpy()) # 2 print(out1) # 2
out2 = paddle.argmax(x, axis=1) out2 = paddle.argmax(x, axis=1)
print(out2.numpy()) print(out2)
# [2 3 1] # [2 3 1]
out3 = paddle.argmax(x, axis=-1) out3 = paddle.argmax(x, axis=-1)
print(out3.numpy()) print(out3)
# [2 3 1] # [2 3 1]
""" """
if axis is not None and not isinstance(axis, int): if axis is not None and not isinstance(axis, int):
@ -227,17 +222,16 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([[5,8,9,5], x = paddle.to_tensor([[5,8,9,5],
[0,0,1,7], [0,0,1,7],
[6,9,2,4]]) [6,9,2,4]])
out1 = paddle.argmin(x) out1 = paddle.argmin(x)
print(out1.numpy()) # 4 print(out1) # 4
out2 = paddle.argmin(x, axis=1) out2 = paddle.argmin(x, axis=1)
print(out2.numpy()) print(out2)
# [0 0 2] # [0 0 2]
out3 = paddle.argmin(x, axis=-1) out3 = paddle.argmin(x, axis=-1)
print(out3.numpy()) print(out3)
# [0 0 2] # [0 0 2]
""" """
if axis is not None and not isinstance(axis, int): if axis is not None and not isinstance(axis, int):
@ -357,22 +351,20 @@ def nonzero(x, as_tuple=False):
.. code-block:: python .. code-block:: python
import paddle import paddle
x1 = paddle.to_tensor([[1.0, 0.0, 0.0], x1 = paddle.to_tensor([[1.0, 0.0, 0.0],
[0.0, 2.0, 0.0], [0.0, 2.0, 0.0],
[0.0, 0.0, 3.0]]) [0.0, 0.0, 3.0]])
x2 = paddle.to_tensor([0.0, 1.0, 0.0, 3.0]) x2 = paddle.to_tensor([0.0, 1.0, 0.0, 3.0])
x3 = paddle.to_tensor([0.0, 0.0, 0.0])
out_z1 = paddle.nonzero(x1) out_z1 = paddle.nonzero(x1)
print(out_z1.numpy()) print(out_z1)
#[[0 0] #[[0 0]
# [1 1] # [1 1]
# [2 2]] # [2 2]]
out_z1_tuple = paddle.nonzero(x1, as_tuple=True) out_z1_tuple = paddle.nonzero(x1, as_tuple=True)
for out in out_z1_tuple: for out in out_z1_tuple:
print(out.numpy()) print(out)
#[[0] #[[0]
# [1] # [1]
# [2]] # [2]]
@ -380,21 +372,15 @@ def nonzero(x, as_tuple=False):
# [1] # [1]
# [2]] # [2]]
out_z2 = paddle.nonzero(x2) out_z2 = paddle.nonzero(x2)
print(out_z2.numpy()) print(out_z2)
#[[1] #[[1]
# [3]] # [3]]
out_z2_tuple = paddle.nonzero(x2, as_tuple=True) out_z2_tuple = paddle.nonzero(x2, as_tuple=True)
for out in out_z2_tuple: for out in out_z2_tuple:
print(out.numpy()) print(out)
#[[1] #[[1]
# [3]] # [3]]
out_z3 = paddle.nonzero(x3)
print(out_z3.numpy())
#[]
out_z3_tuple = paddle.nonzero(x3, as_tuple=True)
for out in out_z3_tuple:
print(out.numpy())
#[]
""" """
list_out = [] list_out = []
shape = x.shape shape = x.shape
@ -419,8 +405,6 @@ def nonzero(x, as_tuple=False):
def sort(x, axis=-1, descending=False, name=None): def sort(x, axis=-1, descending=False, name=None):
""" """
:alias_main: paddle.sort
:alias: paddle.sort,paddle.tensor.sort,paddle.tensor.search.sort
This OP sorts the input along the given axis, and returns the sorted output tensor. The default sort algorithm is ascending, if you want the sort algorithm to be descending, you must set the :attr:`descending` as True. This OP sorts the input along the given axis, and returns the sorted output tensor. The default sort algorithm is ascending, if you want the sort algorithm to be descending, you must set the :attr:`descending` as True.
@ -439,10 +423,11 @@ def sort(x, axis=-1, descending=False, name=None):
Returns: Returns:
Tensor: sorted tensor(with the same shape and data type as ``x``). Tensor: sorted tensor(with the same shape and data type as ``x``).
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.disable_static()
x = paddle.to_tensor([[[5,8,9,5], x = paddle.to_tensor([[[5,8,9,5],
[0,0,1,7], [0,0,1,7],
[6,9,2,4]], [6,9,2,4]],
@ -453,21 +438,21 @@ def sort(x, axis=-1, descending=False, name=None):
out1 = paddle.sort(x=x, axis=-1) out1 = paddle.sort(x=x, axis=-1)
out2 = paddle.sort(x=x, axis=0) out2 = paddle.sort(x=x, axis=0)
out3 = paddle.sort(x=x, axis=1) out3 = paddle.sort(x=x, axis=1)
print(out1.numpy()) print(out1)
#[[[5. 5. 8. 9.] #[[[5. 5. 8. 9.]
# [0. 0. 1. 7.] # [0. 0. 1. 7.]
# [2. 4. 6. 9.]] # [2. 4. 6. 9.]]
# [[2. 2. 4. 5.] # [[2. 2. 4. 5.]
# [4. 7. 7. 9.] # [4. 7. 7. 9.]
# [0. 1. 6. 7.]]] # [0. 1. 6. 7.]]]
print(out2.numpy()) print(out2)
#[[[5. 2. 4. 2.] #[[[5. 2. 4. 2.]
# [0. 0. 1. 7.] # [0. 0. 1. 7.]
# [1. 7. 0. 4.]] # [1. 7. 0. 4.]]
# [[5. 8. 9. 5.] # [[5. 8. 9. 5.]
# [4. 7. 7. 9.] # [4. 7. 7. 9.]
# [6. 9. 2. 6.]]] # [6. 9. 2. 6.]]]
print(out3.numpy()) print(out3)
#[[[0. 0. 1. 4.] #[[[0. 0. 1. 4.]
# [5. 8. 2. 5.] # [5. 8. 2. 5.]
# [6. 9. 9. 7.]] # [6. 9. 9. 7.]]
@ -610,7 +595,7 @@ def index_sample(x, index):
[500, 600, 700, 800], [500, 600, 700, 800],
[900, 1000, 1100, 1200]], dtype='int32') [900, 1000, 1100, 1200]], dtype='int32')
out_z1 = paddle.index_sample(x, index) out_z1 = paddle.index_sample(x, index)
print(out_z1.numpy()) print(out_z1)
#[[1. 2. 3.] #[[1. 2. 3.]
# [6. 7. 8.] # [6. 7. 8.]
# [9. 9. 9.]] # [9. 9. 9.]]
@ -619,17 +604,17 @@ def index_sample(x, index):
# get the value of the element of the corresponding index in other tensors # get the value of the element of the corresponding index in other tensors
top_value, top_index = paddle.topk(x, k=2) top_value, top_index = paddle.topk(x, k=2)
out_z2 = paddle.index_sample(target, top_index) out_z2 = paddle.index_sample(target, top_index)
print(top_value.numpy()) print(top_value)
#[[ 4. 3.] #[[ 4. 3.]
# [ 8. 7.] # [ 8. 7.]
# [12. 11.]] # [12. 11.]]
print(top_index.numpy()) print(top_index)
#[[3 2] #[[3 2]
# [3 2] # [3 2]
# [3 2]] # [3 2]]
print(out_z2.numpy()) print(out_z2)
#[[ 400 300] #[[ 400 300]
# [ 800 700] # [ 800 700]
# [1200 1100]] # [1200 1100]]
@ -673,7 +658,6 @@ def masked_select(x, mask, name=None):
import paddle import paddle
x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0], x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0], [5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0]]) [9.0, 10.0, 11.0, 12.0]])
@ -726,33 +710,31 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None):
import paddle import paddle
paddle.disable_static()
tensor_1 = paddle.to_tensor([1, 4, 5, 7]) tensor_1 = paddle.to_tensor([1, 4, 5, 7])
value_1, indices_1 = paddle.topk(tensor_1, k=1) value_1, indices_1 = paddle.topk(tensor_1, k=1)
print(value_1.numpy()) print(value_1)
# [7] # [7]
print(indices_1.numpy()) print(indices_1)
# [3] # [3]
tensor_2 = paddle.to_tensor([[1, 4, 5, 7], [2, 6, 2, 5]]) tensor_2 = paddle.to_tensor([[1, 4, 5, 7], [2, 6, 2, 5]])
value_2, indices_2 = paddle.topk(tensor_2, k=1) value_2, indices_2 = paddle.topk(tensor_2, k=1)
print(value_2.numpy()) print(value_2)
# [[7] # [[7]
# [6]] # [6]]
print(indices_2.numpy()) print(indices_2)
# [[3] # [[3]
# [1]] # [1]]
value_3, indices_3 = paddle.topk(tensor_2, k=1, axis=-1) value_3, indices_3 = paddle.topk(tensor_2, k=1, axis=-1)
print(value_3.numpy()) print(value_3)
# [[7] # [[7]
# [6]] # [6]]
print(indices_3.numpy()) print(indices_3)
# [[3] # [[3]
# [1]] # [1]]
value_4, indices_4 = paddle.topk(tensor_2, k=1, axis=0) value_4, indices_4 = paddle.topk(tensor_2, k=1, axis=0)
print(value_4.numpy()) print(value_4)
# [[2 6 5 7]] # [[2 6 5 7]]
print(indices_4.numpy()) print(indices_4)
# [[1 1 0 0]] # [[1 1 0 0]]
""" """

Loading…
Cancel
Save