test=develop (#24522)

release/2.0-alpha
swtkiwi 5 years ago committed by GitHub
parent a9520db24e
commit f5c6dd6def
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -454,6 +454,10 @@ def rpn_target_assign(bbox_pred,
def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25):
"""
:alias_main: paddle.nn.functional.sigmoid_focal_loss
:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss
:old_api: paddle.fluid.layers.sigmoid_focal_loss
**Sigmoid Focal Loss Operator.**
`Focal Loss <https://arxiv.org/abs/1708.02002>`_ is used to address the foreground-background
@ -550,6 +554,10 @@ def detection_output(loc,
nms_eta=1.0,
return_index=False):
"""
:alias_main: paddle.nn.functional.detection_output
:alias: paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output
:old_api: paddle.fluid.layers.detection_output
Given the regression locations, classification confidences and prior boxes,
calculate the detection outputs by performing following steps:
@ -679,6 +687,10 @@ def detection_output(loc,
@templatedoc()
def iou_similarity(x, y, box_normalized=True, name=None):
"""
:alias_main: paddle.nn.functional.iou_similarity
:alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity
:old_api: paddle.fluid.layers.iou_similarity
${comment}
Args:
@ -735,6 +747,10 @@ def box_coder(prior_box,
name=None,
axis=0):
"""
:alias_main: paddle.nn.functional.box_coder
:alias: paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder
:old_api: paddle.fluid.layers.box_coder
**Box Coder Layer**
Encode/Decode the target bounding box with the priorbox information.
@ -922,6 +938,10 @@ def yolov3_loss(x,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolov3_loss
:alias: paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss
:old_api: paddle.fluid.layers.yolov3_loss
${comment}
Args:
@ -1045,6 +1065,10 @@ def yolo_box(x,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolo_box
:alias: paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box
:old_api: paddle.fluid.layers.yolo_box
${comment}
Args:
@ -1220,6 +1244,10 @@ def bipartite_match(dist_matrix,
dist_threshold=None,
name=None):
"""
:alias_main: paddle.nn.functional.bipartite_match
:alias: paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match
:old_api: paddle.fluid.layers.bipartite_match
This operator implements a greedy bipartite matching algorithm, which is
used to obtain the matching with the maximum distance based on the input
distance matrix. For input 2D matrix, the bipartite matching algorithm can
@ -1310,6 +1338,10 @@ def target_assign(input,
mismatch_value=None,
name=None):
"""
:alias_main: paddle.nn.functional.target_assign
:alias: paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign
:old_api: paddle.fluid.layers.target_assign
This operator can be, for given the target bounding boxes or labels,
to assign classification and regression targets to each prediction as well as
weights to prediction. The weights is used to specify which prediction would
@ -1424,6 +1456,10 @@ def ssd_loss(location,
normalize=True,
sample_size=None):
"""
:alias_main: paddle.nn.functional.ssd_loss
:alias: paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss
:old_api: paddle.fluid.layers.ssd_loss
**Multi-box loss layer for object detection algorithm of SSD**
This layer is to compute detection loss for SSD given the location offset
@ -1667,6 +1703,10 @@ def prior_box(input,
name=None,
min_max_aspect_ratios_order=False):
"""
:alias_main: paddle.nn.functional.prior_box
:alias: paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box
:old_api: paddle.fluid.layers.prior_box
This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
Each position of the input produce N prior boxes, N is determined by
the count of min_sizes, max_sizes and aspect_ratios, The size of the
@ -1824,6 +1864,10 @@ def density_prior_box(input,
flatten_to_2d=False,
name=None):
"""
:alias_main: paddle.nn.functional.density_prior_box
:alias: paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box
:old_api: paddle.fluid.layers.density_prior_box
This op generates density prior boxes for SSD(Single Shot MultiBox Detector)
algorithm. Each position of the input produce N prior boxes, N is
@ -2012,6 +2056,8 @@ def multi_box_head(inputs,
name=None,
min_max_aspect_ratios_order=False):
"""
:api_attr: Static Graph
Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes,
regression location and classification confidence on multiple input feature
maps, then output the concatenate results. The details of this algorithm,
@ -2287,6 +2333,10 @@ def anchor_generator(input,
offset=0.5,
name=None):
"""
:alias_main: paddle.nn.functional.anchor_generator
:alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator
:old_api: paddle.fluid.layers.anchor_generator
**Anchor generator operator**
Generate anchors for Faster RCNN algorithm.
@ -2488,6 +2538,10 @@ def generate_proposal_labels(rpn_rois,
is_cls_agnostic=False,
is_cascade_rcnn=False):
"""
:alias_main: paddle.nn.functional.generate_proposal_labels
:alias: paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels
:old_api: paddle.fluid.layers.generate_proposal_labels
**Generate Proposal Labels of Faster-RCNN**
This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
@ -2602,6 +2656,10 @@ def generate_proposal_labels(rpn_rois,
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
labels_int32, num_classes, resolution):
"""
:alias_main: paddle.nn.functional.generate_mask_labels
:alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels
:old_api: paddle.fluid.layers.generate_mask_labels
**Generate Mask Labels for Mask-RCNN**
This operator can be, for given the RoIs and corresponding labels,
@ -2757,6 +2815,10 @@ def generate_proposals(scores,
name=None,
return_rois_num=False):
"""
:alias_main: paddle.nn.functional.generate_proposals
:alias: paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals
:old_api: paddle.fluid.layers.generate_proposals
**Generate proposal Faster-RCNN**
This operation proposes RoIs according to each box with their
@ -2867,6 +2929,10 @@ def generate_proposals(scores,
def box_clip(input, im_info, name=None):
"""
:alias_main: paddle.nn.functional.box_clip
:alias: paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip
:old_api: paddle.fluid.layers.box_clip
Clip the box into the size given by im_info
For each input box, The formula is given as follows:
@ -3092,6 +3158,10 @@ def multiclass_nms(bboxes,
background_label=0,
name=None):
"""
:alias_main: paddle.nn.functional.multiclass_nms
:alias: paddle.nn.functional.multiclass_nms,paddle.nn.functional.extension.multiclass_nms
:old_api: paddle.fluid.layers.multiclass_nms
**Multiclass NMS**
This operator is to do multi-class non maximum suppression (NMS) on
@ -3369,6 +3439,10 @@ def distribute_fpn_proposals(fpn_rois,
refer_scale,
name=None):
"""
:alias_main: paddle.nn.functional.distribute_fpn_proposals
:alias: paddle.nn.functional.distribute_fpn_proposals,paddle.nn.functional.vision.distribute_fpn_proposals
:old_api: paddle.fluid.layers.distribute_fpn_proposals
**This op only takes LoDTensor as input.** In Feature Pyramid Networks
(FPN) models, it is needed to distribute all proposals into different FPN
level, with respect to scale of the proposals, the referring scale and the
@ -3454,6 +3528,10 @@ def box_decoder_and_assign(prior_box,
box_clip,
name=None):
"""
:alias_main: paddle.nn.functional.box_decoder_and_assign
:alias: paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign
:old_api: paddle.fluid.layers.box_decoder_and_assign
${comment}
Args:
prior_box(${prior_box_type}): ${prior_box_comment}
@ -3525,6 +3603,10 @@ def collect_fpn_proposals(multi_rois,
post_nms_top_n,
name=None):
"""
:alias_main: paddle.nn.functional.collect_fpn_proposals
:alias: paddle.nn.functional.collect_fpn_proposals,paddle.nn.functional.vision.collect_fpn_proposals
:old_api: paddle.fluid.layers.collect_fpn_proposals
**This OP only supports LoDTensor as input**. Concat multi-level RoIs
(Region of Interest) and select N RoIs with respect to multi_scores.
This operation performs the following steps:

@ -557,6 +557,8 @@ def py_reader(capacity,
name=None,
use_double_buffer=True):
"""
:api_attr: Static Graph
Create a Python reader for data feeding in Python
This operator returns a Reader Variable.
@ -724,6 +726,8 @@ def create_py_reader_by_data(capacity,
name=None,
use_double_buffer=True):
"""
:api_attr: Static Graph
The OP creates a Python reader for data feeding in Python, it is similar
to :ref:`api_fluid_layers_py_reader` except that it can read data from
the list of feed variables.
@ -861,6 +865,8 @@ def double_buffer(reader, place=None, name=None):
def read_file(reader):
"""
:api_attr: Static Graph
Execute the given reader and get data via it.
A reader is also a Variable. It can be a raw reader generated by

@ -52,6 +52,10 @@ def _decay_step_counter(begin=0):
def noam_decay(d_model, warmup_steps, learning_rate=1.0):
"""
:alias_main: paddle.nn.functional.noam_decay
:alias: paddle.nn.functional.noam_decay,paddle.nn.functional.learning_rate.noam_decay
:old_api: paddle.fluid.layers.noam_decay
Noam decay method. The numpy implementation of noam decay as follows.
.. code-block:: python
@ -111,6 +115,10 @@ def noam_decay(d_model, warmup_steps, learning_rate=1.0):
def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False):
"""
:alias_main: paddle.nn.functional.exponential_decay
:alias: paddle.nn.functional.exponential_decay,paddle.nn.functional.learning_rate.exponential_decay
:old_api: paddle.fluid.layers.exponential_decay
Applies exponential decay to the learning rate.
When training a model, it is often recommended to lower the learning rate as the
@ -167,7 +175,12 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False):
def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False):
"""Applies natural exponential decay to the initial learning rate.
"""
:alias_main: paddle.nn.functional.natural_exp_decay
:alias: paddle.nn.functional.natural_exp_decay,paddle.nn.functional.learning_rate.natural_exp_decay
:old_api: paddle.fluid.layers.natural_exp_decay
Applies natural exponential decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as the
training progresses. By using this function, the learning rate will be decayed by
@ -224,6 +237,10 @@ def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False):
def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False):
"""
:alias_main: paddle.nn.functional.inverse_time_decay
:alias: paddle.nn.functional.inverse_time_decay,paddle.nn.functional.learning_rate.inverse_time_decay
:old_api: paddle.fluid.layers.inverse_time_decay
Applies inverse time decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as the
@ -285,6 +302,10 @@ def polynomial_decay(learning_rate,
power=1.0,
cycle=False):
"""
:alias_main: paddle.nn.functional.polynomial_decay
:alias: paddle.nn.functional.polynomial_decay,paddle.nn.functional.learning_rate.polynomial_decay
:old_api: paddle.fluid.layers.polynomial_decay
2
Applies polynomial decay to the initial learning rate.
.. code-block:: text
@ -349,7 +370,12 @@ def polynomial_decay(learning_rate,
def piecewise_decay(boundaries, values):
"""Applies piecewise decay to the initial learning rate.
"""
:alias_main: paddle.nn.functional.piecewise_decay
:alias: paddle.nn.functional.piecewise_decay,paddle.nn.functional.learning_rate.piecewise_decay
:old_api: paddle.fluid.layers.piecewise_decay
Applies piecewise decay to the initial learning rate.
The algorithm can be described as the code below.
@ -424,6 +450,10 @@ def piecewise_decay(boundaries, values):
def cosine_decay(learning_rate, step_each_epoch, epochs):
"""
:alias_main: paddle.nn.functional.cosine_decay
:alias: paddle.nn.functional.cosine_decay,paddle.nn.functional.learning_rate.cosine_decay
:old_api: paddle.fluid.layers.cosine_decay
Applies cosine decay to the learning rate.
when training a model, it is often recommended to lower the learning rate as the
@ -469,6 +499,10 @@ def cosine_decay(learning_rate, step_each_epoch, epochs):
def linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr):
"""
:alias_main: paddle.nn.functional.linear_lr_warmup
:alias: paddle.nn.functional.linear_lr_warmup,paddle.nn.functional.learning_rate.linear_lr_warmup
:old_api: paddle.fluid.layers.linear_lr_warmup
This operator use the linear learning rate warm up strategy to adjust the learning rate preliminarily before the normal learning rate scheduling.
For more information, please refer to `Bag of Tricks for Image Classification with Convolutional Neural Networks <https://arxiv.org/abs/1812.01187>`_

@ -80,6 +80,10 @@ def softshrink(x, alpha=None):
softshrink.__doc__ = """
:alias_main: paddle.nn.functional.softshrink
:alias: paddle.nn.functional.softshrink,paddle.nn.functional.activation.softshrink
:old_api: paddle.fluid.layers.softshrink
:strong:`Softshrink Activation Operator`
.. math::
@ -123,6 +127,10 @@ def hard_shrink(x, threshold=None):
hard_shrink.__doc__ = _hard_shrink_.__doc__ + """
:alias_main: paddle.nn.functional.hard_shrink
:alias: paddle.nn.functional.hard_shrink,paddle.nn.functional.activation.hard_shrink
:old_api: paddle.fluid.layers.hard_shrink
Examples:
>>> import paddle.fluid as fluid
@ -146,6 +154,10 @@ def cumsum(x, axis=None, exclusive=None, reverse=None):
cumsum.__doc__ = """
:alias_main: paddle.cumsum
:alias: paddle.cumsum,paddle.tensor.cumsum,paddle.tensor.math.cumsum
:old_api: paddle.fluid.layers.cumsum
The cumulative sum of the elements along a given axis. By default, the first element of the result is the same of the first element of the input. If exlusive is true, the first element of the result is 0.
Args:
@ -184,6 +196,10 @@ def thresholded_relu(x, threshold=None):
thresholded_relu.__doc__ = """
:alias_main: paddle.nn.functional.thresholded_relu
:alias: paddle.nn.functional.thresholded_relu,paddle.nn.functional.activation.thresholded_relu
:old_api: paddle.fluid.layers.thresholded_relu
:strong:`Thresholded ReLU Activation Operator`
Equation:
@ -266,6 +282,10 @@ def gelu(x, approximate=False):
gelu.__doc__ = """
:alias_main: paddle.nn.functional.gelu
:alias: paddle.nn.functional.gelu,paddle.nn.functional.activation.gelu
:old_api: paddle.fluid.layers.gelu
:strong:`GeLU Activation Operator`
For more details, see [Gaussian Error Linear Units](https://arxiv.org/abs/1606.08415).
@ -350,6 +370,10 @@ def erf(x):
erf.__doc__ = """
:alias_main: paddle.erf
:alias: paddle.erf,paddle.tensor.erf,paddle.tensor.math.erf,paddle.nn.functional.erf,paddle.nn.functional.activation.erf
:old_api: paddle.fluid.layers.erf
:strong:`Erf Operator`
For more details, see [Error function](https://en.wikipedia.org/wiki/Error_function).

@ -57,6 +57,8 @@ __all__ = [
class RNNCell(object):
"""
:api_attr: Static Graph
RNNCell is the base class for abstraction representing the calculations
mapping the input and state to the output and new state. It is suitable to
and mostly used in RNN.
@ -221,6 +223,8 @@ class RNNCell(object):
class GRUCell(RNNCell):
"""
:api_attr: Static Graph
Gated Recurrent Unit cell. It is a wrapper for
`fluid.contrib.layers.rnn_impl.BasicGRUUnit` to make it adapt to RNNCell.
@ -317,6 +321,8 @@ class GRUCell(RNNCell):
class LSTMCell(RNNCell):
"""
:api_attr: Static Graph
Long-Short Term Memory cell. It is a wrapper for
`fluid.contrib.layers.rnn_impl.BasicLSTMUnit` to make it adapt to RNNCell.
@ -431,6 +437,8 @@ def rnn(cell,
is_reverse=False,
**kwargs):
"""
:api_attr: Static Graph
rnn creates a recurrent neural network specified by RNNCell `cell`,
which performs :code:`cell.call()` repeatedly until reaches to the maximum
length of `inputs`.
@ -575,6 +583,8 @@ def rnn(cell,
class Decoder(object):
"""
:api_attr: Static Graph
Decoder is the base class for any decoder instance used in `dynamic_decode`.
It provides interface for output generation for one time step, which can be
used to generate sequences.
@ -686,6 +696,8 @@ class Decoder(object):
class BeamSearchDecoder(Decoder):
"""
:api_attr: Static Graph
Decoder with beam search decoding strategy. It wraps a cell to get probabilities,
and follows a beam search step to calculate scores and select candidate
token ids for each decoding step.
@ -1153,6 +1165,8 @@ def dynamic_decode(decoder,
return_length=False,
**kwargs):
"""
:api_attr: Static Graph
Dynamic decoding performs :code:`decoder.step()` repeatedly until the returned
Tensor indicating finished status contains all True values or the number of
decoding step reaches to :attr:`max_step_num`.
@ -1975,6 +1989,8 @@ def dynamic_lstm(input,
dtype='float32',
name=None):
"""
:api_attr: Static Graph
**Note**:
1. This OP only supports LoDTensor as inputs. If you need to deal with Tensor, please use :ref:`api_fluid_layers_lstm` .
2. In order to improve efficiency, users must first map the input of dimension [T, hidden_size] to input of [T, 4 * hidden_size], and then pass it to this OP.
@ -2145,6 +2161,8 @@ def lstm(input,
default_initializer=None,
seed=-1):
"""
:api_attr: Static Graph
**Note**:
This OP only supports running on GPU devices.
@ -2330,6 +2348,8 @@ def dynamic_lstmp(input,
cell_clip=None,
proj_clip=None):
"""
:api_attr: Static Graph
**Note**:
1. In order to improve efficiency, users must first map the input of dimension [T, hidden_size] to input of [T, 4 * hidden_size], and then pass it to this OP.
@ -2539,6 +2559,8 @@ def dynamic_gru(input,
h_0=None,
origin_mode=False):
"""
:api_attr: Static Graph
**Note: The input type of this must be LoDTensor. If the input type to be
processed is Tensor, use** :ref:`api_fluid_layers_StaticRNN` .
@ -2691,6 +2713,8 @@ def gru_unit(input,
gate_activation='sigmoid',
origin_mode=False):
"""
:api_attr: Static Graph
Gated Recurrent Unit (GRU) RNN cell. This operator performs GRU calculations for
one time step and it supports these two modes:
@ -2847,6 +2871,10 @@ def beam_search(pre_ids,
name=None,
return_parent_idx=False):
"""
:alias_main: paddle.nn.beam_search
:alias: paddle.nn.beam_search,paddle.nn.decode.beam_search
:old_api: paddle.fluid.layers.beam_search
Beam search is a classical algorithm for selecting candidate words in a
machine translation task.
@ -2988,6 +3016,10 @@ def beam_search(pre_ids,
def beam_search_decode(ids, scores, beam_size, end_id, name=None):
"""
:alias_main: paddle.nn.beam_search_decode
:alias: paddle.nn.beam_search_decode,paddle.nn.decode.beam_search_decode
:old_api: paddle.fluid.layers.beam_search_decode
This operator is used after beam search has completed. It constructs the
full predicted sequences for each sample by walking back along the search
paths stored in lod of ``ids`` . The result sequences are stored in a
@ -3067,6 +3099,8 @@ def lstm_unit(x_t,
bias_attr=None,
name=None):
"""
:api_attr: Static Graph
Long-Short Term Memory (LSTM) RNN cell. This operator performs LSTM calculations for
one time step, whose implementation is based on calculations described in `RECURRENT
NEURAL NETWORK REGULARIZATION <http://arxiv.org/abs/1409.2329>`_ .

@ -52,6 +52,8 @@ def sequence_conv(input,
act=None,
name=None):
"""
:api_attr: Static Graph
**Notes: The Op only receives LoDTensor as input. If your input is Tensor, please use conv2d Op.(fluid.layers.** :ref:`api_fluid_layers_conv2d` ).
This operator receives input sequences with variable length and other convolutional
@ -174,6 +176,8 @@ def sequence_conv(input,
def sequence_softmax(input, use_cudnn=False, name=None):
"""
:api_attr: Static Graph
**Note**:
**The input type of the OP must be LoDTensor. For Tensor, use:** :ref:`api_fluid_layers_softmax`
@ -256,6 +260,8 @@ def sequence_softmax(input, use_cudnn=False, name=None):
def sequence_pool(input, pool_type, is_test=False, pad_value=0.0):
"""
:api_attr: Static Graph
**Notes: The Op only receives LoDTensor as input. If your input is Tensor, please use pool2d Op.(fluid.layers.** :ref:`api_fluid_layers_pool2d` ).
This operator only supports LoDTensor as input. It will apply specified pooling
@ -368,6 +374,8 @@ def sequence_pool(input, pool_type, is_test=False, pad_value=0.0):
@templatedoc()
def sequence_concat(input, name=None):
"""
:api_attr: Static Graph
**Notes: The Op only receives LoDTensor as input. If your input is Tensor, please use concat Op.(fluid.layers.** :ref:`api_fluid_layers_concat` ).
This operator only supports LoDTensor as input. It concatenates the multiple LoDTensor from input by the LoD information,
@ -427,6 +435,8 @@ def sequence_concat(input, name=None):
def sequence_first_step(input):
"""
:api_attr: Static Graph
This operator only supports LoDTensor as input. Given the input LoDTensor, it will
select first time-step feature of each sequence as output.
@ -479,6 +489,8 @@ def sequence_first_step(input):
def sequence_last_step(input):
"""
:api_attr: Static Graph
This operator only supports LoDTensor as input. Given the input LoDTensor, it will
select last time-step feature of each sequence as output.
@ -532,6 +544,8 @@ def sequence_last_step(input):
def sequence_slice(input, offset, length, name=None):
"""
:api_attr: Static Graph
**Sequence Slice Layer**
The layer crops a subsequence from given sequence with given start
@ -617,7 +631,10 @@ def sequence_slice(input, offset, length, name=None):
def sequence_expand(x, y, ref_level=-1, name=None):
"""Sequence Expand Layer. This layer will expand the input variable ``x`` \
"""
:api_attr: Static Graph
Sequence Expand Layer. This layer will expand the input variable ``x`` \
according to specified level ``ref_level`` lod of ``y``. Please note that \
the lod level of ``x`` is at most 1. If the lod level of ``x`` is 1, than \
the size of lod of ``x`` must be equal to the length of ``ref_level`` lod \
@ -750,7 +767,10 @@ def sequence_expand(x, y, ref_level=-1, name=None):
def sequence_expand_as(x, y, name=None):
"""Sequence Expand As Layer. This OP will expand the input variable ``x`` \
"""
:api_attr: Static Graph
Sequence Expand As Layer. This OP will expand the input variable ``x`` \
according to the zeroth level lod of ``y``. Current implementation requires \
the level number of ``y``'s lod must be 1, and the first dimension of \
``x`` should be equal to the size of ``y``'s zeroth level lod, thus \
@ -865,6 +885,8 @@ def sequence_expand_as(x, y, name=None):
def sequence_pad(x, pad_value, maxlen=None, name=None):
"""
:api_attr: Static Graph
This layer padding the sequences in a same batch to a common length (according \
to ``maxlen``). The padding value is defined by ``pad_value``, and will be \
appended to the tail of sequences. The result is a Python tuple ``(Out, Length)``: \
@ -977,6 +999,8 @@ def sequence_pad(x, pad_value, maxlen=None, name=None):
def sequence_unpad(x, length, name=None):
"""
:api_attr: Static Graph
**Note**:
**The input of the OP is Tensor and the output is LoDTensor. For padding operation, See:** :ref:`api_fluid_layers_sequence_pad`
@ -1050,6 +1074,8 @@ def sequence_unpad(x, length, name=None):
def sequence_reshape(input, new_dim):
"""
:api_attr: Static Graph
**Notes: The Op only receives LoDTensor as input. If your input is Tensor, please use reshape Op.(fluid.layers.** :ref:`api_fluid_layers_reshape` ).
This operator only supports LoDTensor as input. Given :attr:`new_dim` ,
@ -1110,6 +1136,8 @@ def sequence_reshape(input, new_dim):
def sequence_scatter(input, index, updates, name=None):
"""
:api_attr: Static Graph
**Note**:
**The index and updates parameters of the OP must be LoDTensor.**
@ -1198,6 +1226,8 @@ def sequence_scatter(input, index, updates, name=None):
def sequence_enumerate(input, win_size, pad_value=0, name=None):
"""
:api_attr: Static Graph
Generate a new sequence for the input index sequence with \
shape ``[d_1, win_size]``, which enumerates all the \
sub-sequences with length ``win_size`` of the input with \

@ -75,6 +75,8 @@ def create_parameter(shape,
is_bias=False,
default_initializer=None):
"""
:api_attr: Static Graph
This function creates a parameter. The parameter is a learnable variable, which can have
gradient, and can be optimized.
@ -195,6 +197,10 @@ def create_global_var(shape,
def cast(x, dtype):
"""
:alias_main: paddle.cast
:alias: paddle.cast,paddle.tensor.cast,paddle.tensor.manipulation.cast
:old_api: paddle.fluid.layers.cast
This OP takes in the Variable :attr:`x` with :attr:`x.dtype` and casts it
to the output with :attr:`dtype`. It's meaningless if the output dtype
equals the input dtype, but it's fine if you do so.
@ -257,6 +263,10 @@ def cast(x, dtype):
def concat(input, axis=0, name=None):
"""
:alias_main: paddle.concat
:alias: paddle.concat,paddle.tensor.concat,paddle.tensor.manipulation.concat
:old_api: paddle.fluid.layers.concat
**Concat**
This OP concatenates the input along the axis.
@ -535,6 +545,10 @@ def sums(input, out=None):
def assign(input, output=None):
"""
:alias_main: paddle.nn.functional.assign
:alias: paddle.nn.functional.assign,paddle.nn.functional.common.assign
:old_api: paddle.fluid.layers.assign
The OP copies the :attr:`input` to the :attr:`output`.
Parameters:
@ -607,6 +621,10 @@ def assign(input, output=None):
def fill_constant(shape, dtype, value, force_cpu=False, out=None):
"""
:alias_main: paddle.fill_constant
:alias: paddle.fill_constant,paddle.tensor.fill_constant,paddle.tensor.creation.fill_constant
:old_api: paddle.fluid.layers.fill_constant
This OP creates a Tensor with specified `shape` and `dtype`, and
initializes it with a constant specified by `value`.
@ -787,6 +805,10 @@ def fill_constant_batch_size_like(input,
def argmin(x, axis=0):
"""
:alias_main: paddle.argmin
:alias: paddle.argmin,paddle.tensor.argmin,paddle.tensor.search.argmin
:old_api: paddle.fluid.layers.argmin
**argmin**
This OP computes the indices of the min elements of the input tensor's
@ -913,6 +935,10 @@ def argmax(x, axis=0):
def argsort(input, axis=-1, descending=False, name=None):
"""
:alias_main: paddle.argsort
:alias: paddle.argsort,paddle.tensor.argsort,paddle.tensor.search.argsort
:old_api: paddle.fluid.layers.argsort
This OP sorts the input along the given axis, and returns sorted output
data Varibale and its corresponding index Variable with the same shape as
:attr:`input`.
@ -1061,6 +1087,10 @@ def zeros(shape, dtype, force_cpu=False):
def reverse(x, axis):
"""
:alias_main: paddle.reverse
:alias: paddle.reverse,paddle.tensor.reverse,paddle.tensor.manipulation.reverse
:old_api: paddle.fluid.layers.reverse
The OP reverses the tensor :attr:`x` along the given :attr:`axis`.
Parameters:
@ -1171,6 +1201,10 @@ def load_combine(out, file_path):
def has_inf(x):
"""
:alias_main: paddle.has_inf
:alias: paddle.has_inf,paddle.tensor.has_inf,paddle.tensor.search.has_inf
:old_api: paddle.fluid.layers.has_inf
Test if any of x contains an infinity number
Args:
@ -1196,6 +1230,10 @@ def has_inf(x):
def has_nan(x):
"""
:alias_main: paddle.has_nan
:alias: paddle.has_nan,paddle.tensor.has_nan,paddle.tensor.search.has_nan
:old_api: paddle.fluid.layers.has_nan
Test if any of x contains a NAN
Args:
@ -1221,6 +1259,10 @@ def has_nan(x):
def isfinite(x):
"""
:alias_main: paddle.isfinite
:alias: paddle.isfinite,paddle.tensor.isfinite,paddle.tensor.logic.isfinite
:old_api: paddle.fluid.layers.isfinite
Test if any of x contains an infinity/NAN number. If all the elements are finite,
returns true, else false.
@ -1418,6 +1460,10 @@ def zeros_like(x, out=None):
def diag(diagonal):
"""
:alias_main: paddle.diag
:alias: paddle.diag,paddle.tensor.diag,paddle.tensor.creation.diag
:old_api: paddle.fluid.layers.diag
This OP creates a square matrix which has diagonal values specified by input :attr:`diagonal`.
Args:
@ -1461,6 +1507,10 @@ def diag(diagonal):
def eye(num_rows, num_columns=None, batch_shape=None, dtype='float32'):
"""
:alias_main: paddle.eye
:alias: paddle.eye,paddle.tensor.eye,paddle.tensor.creation.eye
:old_api: paddle.fluid.layers.eye
**eye**
This function constructs an identity tensor, or a batch of tensor.

@ -112,6 +112,10 @@ def _yield_flat_nest(nest):
def flatten(nest):
"""
:alias_main: paddle.flatten
:alias: paddle.flatten,paddle.tensor.flatten,paddle.tensor.manipulation.flatten
:old_api: paddle.fluid.layers.flatten
Traverse all entries in the nested structure and put them into an list.
"""
if is_sequence(nest):

@ -114,6 +114,8 @@ def create_lod_tensor(data, recursive_seq_lens, place):
def create_random_int_lodtensor(recursive_seq_lens, base_shape, place, low,
high):
"""
:api_attr: Static Graph
Create a LoDTensor containing random integers.
The implementation is as follows:

@ -43,6 +43,8 @@ def simple_img_conv_pool(input,
act=None,
use_cudnn=True):
"""
:api_attr: Static Graph
The simple_img_conv_pool api is composed of :ref:`api_fluid_layers_conv2d` and :ref:`api_fluid_layers_pool2d` .
Args:
@ -149,6 +151,8 @@ def img_conv_group(input,
pool_type="max",
use_cudnn=True):
"""
:api_attr: Static Graph
The Image Convolution Group is composed of Convolution2d, BatchNorm, DropOut,
and Pool2d. According to the input arguments, img_conv_group will do serials of
computation for Input using Convolution2d, BatchNorm, DropOut, and pass the last
@ -257,6 +261,8 @@ def sequence_conv_pool(input,
pool_type="max",
bias_attr=None):
"""
:api_attr: Static Graph
**This api takes input as an LoDTensor. If input is a Tensor, please use**
:ref:`api_fluid_nets_simple_img_conv_pool` **instead**
@ -321,6 +327,8 @@ def sequence_conv_pool(input,
def glu(input, dim=-1):
"""
:api_attr: Static Graph
The Gated Linear Units(GLU) composed by :ref:`api_fluid_layers_split` ,
:ref:`api_fluid_layers_sigmoid` and :ref:`api_fluid_layers_elementwise_mul` .
Specifically, GLU will plit the input into two equal-sized parts,
@ -367,6 +375,8 @@ def scaled_dot_product_attention(queries,
num_heads=1,
dropout_rate=0.):
"""
:api_attr: Static Graph
This interface Multi-Head Attention using scaled dot product.
Attention mechanism can be seen as mapping a query and a set of key-value
pairs to an output. Multi-Head Attention performs attention using multi-head

@ -1070,6 +1070,8 @@ class MomentumOptimizer(Optimizer):
class DGCMomentumOptimizer(Optimizer):
"""
:api_attr: Static Graph
DGC (Deep Gradient Compression) Momentum Optimizer. Original paper is https://arxiv.org/abs/1712.01887
DGC reduces the communication bandwidth by sending only the important gradients (sparse update):\
@ -2994,6 +2996,8 @@ Lamb = LambOptimizer
class ModelAverage(Optimizer):
"""
:api_attr: Static Graph
The ModelAverage optimizer accumulates specific continuous historical parameters
during training. The accumulated historical range can be controlled by the passed
``average_window_rate`` argument. The averaged ``Parameter`` are used in the prediction,
@ -3301,6 +3305,8 @@ class ModelAverage(Optimizer):
class ExponentialMovingAverage(object):
"""
:api_attr: Static Graph
Compute the moving average of parameters with exponential decay.
Given a parameter :math:`\\theta`, its exponential moving average (EMA)
will be
@ -3549,6 +3555,8 @@ class ExponentialMovingAverage(object):
class PipelineOptimizer(object):
"""
:api_attr: Static Graph
Pipeline Optimizer
Train with pipeline mode. The program will be split by cut_list.
@ -3849,6 +3857,8 @@ class PipelineOptimizer(object):
class RecomputeOptimizer(Optimizer):
"""
:api_attr: Static Graph
Recompute Optimizer Wrapper
Normally, a training step contains three sub-steps: first, run forward
@ -3921,6 +3931,8 @@ class RecomputeOptimizer(Optimizer):
def load(self, stat_dict):
"""
:api_attr: Static Graph
load function is not supported by Recompute Optimizer for now.
:return: None
@ -4137,6 +4149,8 @@ class RecomputeOptimizer(Optimizer):
class LookaheadOptimizer(object):
"""
:api_attr: Static Graph
This implements the Lookahead optimizer of the
paper : https://arxiv.org/abs/1907.08610.

@ -28,6 +28,8 @@ BuildStrategy = core.ParallelExecutor.BuildStrategy
class ParallelExecutor(object):
"""
:api_attr: Static Graph
The ParallelExecutor is an upgraded version of :code:`fluid.Executor` that supports multi-node model
training and testing based on the data-parallel mode. In data-parallel mode,
ParallelExecutor will broadcast the parameters from Node0 to other nodes during

@ -202,6 +202,8 @@ class ParamAttr(object):
class WeightNormParamAttr(ParamAttr):
"""
:api_attr: Static Graph
Parameter of weight Norm. Weight Norm is a reparameterization of the weight vectors
in a neural network that decouples the magnitude of those weight vectors from
their direction. Weight Norm has been implemented as discussed in this

@ -140,6 +140,8 @@ def slice_variable(var_list, slice_count, min_block_size):
class DistributeTranspilerConfig(object):
"""
:api_attr: Static Graph
A configuration class that provide support for transpiler distributed jobs.
Some important parameters are explained as follows:
@ -253,6 +255,8 @@ class ServerRuntimeConfig(object):
class DistributeTranspiler(object):
"""
:api_attr: Static Graph
**DistributeTranspiler**
Convert the fluid program to distributed data-parallelism programs.

@ -21,6 +21,8 @@ def memory_optimize(input_program,
level=0,
skip_grads=True):
"""
:api_attr: Static Graph
This API is deprecated since 1.6. Please do not use it. The better
memory optimization strategies are enabled by default.
"""
@ -41,6 +43,8 @@ def memory_optimize(input_program,
def release_memory(input_program, skip_opt_set=None):
"""
:api_attr: Static Graph
This API is deprecated since 1.6. Please do not use it. The better
memory optimization strategies are enabled by default.
"""

@ -48,6 +48,8 @@ class PSDispatcher(object):
class HashName(PSDispatcher):
"""
:api_attr: Static Graph
Hash variable names to several endpoints using python
"hash()" function.
@ -88,6 +90,8 @@ class HashName(PSDispatcher):
class RoundRobin(PSDispatcher):
"""
:api_attr: Static Graph
Distribute variables to several endpoints using
RondRobin<https://en.wikipedia.org/wiki/Round-robin_scheduling> method.

@ -20,6 +20,9 @@ __all__ = ['manual_seed']
def manual_seed(seed):
"""
:alias_main: paddle.manual_seed
:alias: paddle.manual_seed,paddle.framework.random.manual_seed
Set global manual seed for program
Args:

@ -78,6 +78,9 @@ def hsigmoid(input,
path_code=None,
is_sparse=False):
"""
:alias_main: paddle.nn.functional.hsigmoid
:alias: paddle.nn.functional.hsigmoid,paddle.nn.functional.activation.hsigmoid
The hierarchical sigmoid organizes the classes into a complete binary tree to reduce the computational complexity
and speed up the model training, especially the training of language model.
Each leaf node of the complete binary tree represents a class(word) and each non-leaf node acts as a binary classifier.
@ -191,6 +194,9 @@ def hsigmoid(input,
def relu(input, inplace=False, name=None):
"""
:alias_main: paddle.nn.functional.relu
:alias: paddle.nn.functional.relu,paddle.nn.functional.activation.relu
ReLU Activation.
.. math:
@ -240,6 +246,9 @@ def relu(input, inplace=False, name=None):
def sigmoid(input, inplace=False, name=None):
"""
:alias_main: paddle.nn.functional.sigmoid
:alias: paddle.nn.functional.sigmoid,paddle.nn.functional.activation.sigmoid
Sigmoid Activation.
.. math:
@ -298,6 +307,9 @@ def sigmoid(input, inplace=False, name=None):
def log_softmax(input, axis=None, dtype=None, name=None):
"""
:alias_main: paddle.nn.functional.log_softmax
:alias: paddle.nn.functional.log_softmax,paddle.nn.functional.activation.log_softmax
This operator implements the log_softmax layer. The calculation process is as follows:
.. math::

@ -51,6 +51,9 @@ def interpolate(input,
align_mode=1,
data_format='NCHW'):
"""
:alias_main: paddle.nn.functional.interpolate
:alias: paddle.nn.functional.interpolate,paddle.nn.functional.common.interpolate
This op resizes a batch of images.
The input must be a 4-D Tensor of the shape (num_batches, channels, in_h, in_w)
or (num_batches, in_h, in_w, channels), or a 5-D Tensor of the shape

@ -99,6 +99,9 @@ def conv2d(input,
data_format="NCHW",
name=None):
"""
:alias_main: paddle.nn.functional.conv2d
:alias: paddle.nn.functional.conv2d,paddle.nn.functional.conv.conv2d
The convolution2D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input and
Output are in NCHW or NHWC format, where N is batch size, C is the number of
@ -333,6 +336,9 @@ def conv2d_transpose(input,
data_format='NCHW',
name=None):
"""
:alias_main: paddle.nn.functional.conv2d_transpose
:alias: paddle.nn.functional.conv2d_transpose,paddle.nn.functional.conv.conv2d_transpose
The convolution2D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input(Input) and output(Output)
are in NCHW or NHWC format. Where N is batch size, C is the number of channels,
@ -572,6 +578,9 @@ def conv3d(input,
data_format="NCDHW",
name=None):
"""
:alias_main: paddle.nn.functional.conv3d
:alias: paddle.nn.functional.conv3d,paddle.nn.functional.conv.conv3d
The convolution3D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are in NCDHW or NDHWC format. Where N is batch size C is the number of
@ -786,6 +795,9 @@ def conv3d_transpose(input,
data_format='NCDHW',
name=None):
"""
:alias_main: paddle.nn.functional.conv3d_transpose
:alias: paddle.nn.functional.conv3d_transpose,paddle.nn.functional.conv.conv3d_transpose
The convolution3D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input(Input) and output(Output)
are in NCDHW or NDHWC format. Where N is batch size, C is the number of channels,

@ -48,6 +48,9 @@ from ...fluid.layers.layer_function_generator import templatedoc
def diag_embed(input, offset=0, dim1=-2, dim2=-1):
"""
:alias_main: paddle.nn.functional.diag_embed
:alias: paddle.nn.functional.diag_embed,paddle.nn.functional.extension.diag_embed
This OP creates a tensor whose diagonals of certain 2D planes (specified by dim1 and dim2)
are filled by ``input``. By default, a 2D plane formed by the last two dimensions
of the returned tensor will be selected.
@ -165,6 +168,9 @@ def diag_embed(input, offset=0, dim1=-2, dim2=-1):
@templatedoc()
def row_conv(input, weight, act=None):
"""
:alias_main: paddle.nn.functional.row_conv
:alias: paddle.nn.functional.row_conv,paddle.nn.functional.extension.row_conv
${comment}
Args:

@ -31,6 +31,8 @@ from .. import functional
class HSigmoid(layers.Layer):
"""
:alias_main: paddle.nn.HSigmoid
:alias: paddle.nn.HSigmoid,paddle.nn.layer.HSigmoid,paddle.nn.layer.activation.HSigmoid
Hierarchical Sigmoid Layer.
@ -165,6 +167,9 @@ class HSigmoid(layers.Layer):
class ReLU(layers.Layer):
"""
:alias_main: paddle.nn.ReLU
:alias: paddle.nn.ReLU,paddle.nn.layer.ReLU,paddle.nn.layer.activation.ReLU
ReLU Activation.
.. math:
@ -204,6 +209,9 @@ class ReLU(layers.Layer):
class Sigmoid(layers.Layer):
"""
:alias_main: paddle.nn.Sigmoid
:alias: paddle.nn.Sigmoid,paddle.nn.layer.Sigmoid,paddle.nn.layer.activation.Sigmoid
Sigmoid Activation.
.. math:
@ -246,6 +254,9 @@ class Sigmoid(layers.Layer):
class LogSoftmax(layers.Layer):
"""
:alias_main: paddle.nn.LogSoftmax
:alias: paddle.nn.LogSoftmax,paddle.nn.layer.LogSoftmax,paddle.nn.layer.activation.LogSoftmax
This operator implements the log_softmax layer. The calculation process is as follows:
.. math::

@ -40,6 +40,9 @@ def _get_default_param_initializer(num_channels, filter_size):
class Conv2D(layers.Layer):
"""
:alias_main: paddle.nn.Conv2D
:alias: paddle.nn.Conv2D,paddle.nn.layer.Conv2D,paddle.nn.layer.conv.Conv2D
This interface is used to construct a callable object of the ``Conv2D`` class.
For more details, refer to code examples.
The convolution2D layer calculates the output based on the input, filter
@ -235,6 +238,9 @@ class Conv2D(layers.Layer):
class Conv2DTranspose(layers.Layer):
"""
:alias_main: paddle.nn.Conv2DTranspose
:alias: paddle.nn.Conv2DTranspose,paddle.nn.layer.Conv2DTranspose,paddle.nn.layer.conv.Conv2DTranspose
This interface is used to construct a callable object of the ``Conv2DTranspose`` class.
For more details, refer to code examples.
The convolution2D transpose layer calculates the output based on the input,
@ -431,6 +437,9 @@ class Conv2DTranspose(layers.Layer):
class Conv3D(layers.Layer):
"""
:alias_main: paddle.nn.Conv3D
:alias: paddle.nn.Conv3D,paddle.nn.layer.Conv3D,paddle.nn.layer.conv.Conv3D
**Convlution3D Layer**
The convolution3D layer calculates the output based on the input, filter
@ -621,6 +630,9 @@ class Conv3D(layers.Layer):
class Conv3DTranspose(layers.Layer):
"""
:alias_main: paddle.nn.Conv3DTranspose
:alias: paddle.nn.Conv3DTranspose,paddle.nn.layer.Conv3DTranspose,paddle.nn.layer.conv.Conv3DTranspose
**Convlution3D transpose layer**
The convolution3D transpose layer calculates the output based on the input,

@ -20,6 +20,9 @@ from .. import functional as F
class RowConv(layers.Layer):
"""
:alias_main: paddle.nn.RowConv
:alias: paddle.nn.RowConv,paddle.nn.layer.RowConv,paddle.nn.layer.extension.RowConv
**Row-convolution operator**
The row convolution is called lookahead convolution. This operator was

@ -28,6 +28,9 @@ __all__ = [
class CrossEntropyLoss(fluid.dygraph.Layer):
"""
:alias_main: paddle.nn.CrossEntropyLoss
:alias: paddle.nn.CrossEntropyLoss,paddle.nn.layer.CrossEntropyLoss,paddle.nn.layer.loss.CrossEntropyLoss
This operator implements the cross entropy loss function. This OP combines ``LogSoftmax``,
and ``NLLLoss`` together.
@ -143,6 +146,9 @@ class CrossEntropyLoss(fluid.dygraph.Layer):
class MSELoss(fluid.dygraph.layers.Layer):
"""
:alias_main: paddle.nn.MSELoss
:alias: paddle.nn.MSELoss,paddle.nn.layer.MSELoss,paddle.nn.layer.loss.MSELoss
**Mean Square Error Loss**
Computes the mean square error (squared L2 norm) of given input and label.
@ -244,6 +250,9 @@ class MSELoss(fluid.dygraph.layers.Layer):
class L1Loss(fluid.dygraph.Layer):
"""
:alias_main: paddle.nn.L1Loss
:alias: paddle.nn.L1Loss,paddle.nn.layer.L1Loss,paddle.nn.layer.loss.L1Loss
This interface is used to construct a callable object of the ``L1Loss`` class.
The L1Loss layer calculates the L1 Loss of input predictions and target
labels as follows.
@ -331,6 +340,9 @@ class L1Loss(fluid.dygraph.Layer):
class BCELoss(fluid.dygraph.Layer):
"""
:alias_main: paddle.nn.BCELoss
:alias: paddle.nn.BCELoss,paddle.nn.layer.BCELoss,paddle.nn.layer.loss.BCELoss
This interface is used to construct a callable object of the ``BCELoss`` class.
The BCELoss layer measures the binary_cross_entropy loss between input predictions
and target labels. The binary_cross_entropy loss can be described as:
@ -456,6 +468,9 @@ class BCELoss(fluid.dygraph.Layer):
class NLLLoss(fluid.dygraph.Layer):
"""
:alias_main: paddle.nn.NLLLoss
:alias: paddle.nn.NLLLoss,paddle.nn.layer.NLLLoss,paddle.nn.layer.loss.NLLLoss
This op accepts input and target label and returns negative log likelihood
cross error. It is useful to train a classification problem with C classes.

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save