From d05058d268d10959b83b7048c7fa2b74dce48d6f Mon Sep 17 00:00:00 2001 From: chentianyu03 Date: Wed, 14 Oct 2020 20:55:35 +0800 Subject: [PATCH] Remove and reorganize the alias of APIs (#27717) * modify cond while_loop to paddle.static.nn.cond * modify crop_tensor to paddle.crop * modify Variable to paddle.static.Variable * remove nn.beam_search, nn.beam_search_decode, nn.gather_tree * remove bpr_loss, center_loss, rank_loss, smooth_l1, teacher_student_sigmoid_loss, edit_distance, sampled_softmax_with_cross_entropy in nn.functional * remove apis in nn.functional.learn_rate.py * remove pool2d, pool3d, adaptive_pool2d, adaptive_pool3d in nn.functional * remove apis in nn.functional.vision * remove erf, soft_relu in nn.functional.activation * remove apis in nn.functional.extension * remove nn.functional.rnn * remove hash from nn.functional.lod * remove row_conv from nn.functional.extension * remove one_hot, pad2d, pad_constant_like from nn.functional.common * remove nn.gather_tree, nn.BilinearTensorProduct, nn.Pool2D, nn.Pad2D * remove apis from optimizer.__init * remove tensor.creation.fill_constant * remove elementwise_mul in nn.functional.common and modify to paddle.multiply * remove tensor.stat.reduce_mean * remove reduce_all, reduce_any in tensor.logic * remove apis in tensor.math * remove apis in tensor.__init__ * remove has_inf, has_nan in tensor.search * remove apis in framework.__init__ * remove apis in paddle.__init__ * remove apis in nn.functional.__init__ * modify removed alias apis to raw api in doc and unittests * fix remove grid_sample bug * modify removed alias apis to raw api in doc and unittests * modify removed alias apis to raw api in doc and unittests * modify removed alias apis to raw api in doc and unittests * modify removed alias apis to raw api in doc and unittests * modify removed alias apis to raw api in doc and unittests * modify removed alias apis to raw api in doc and unittests * delete alias api relastions in doc * reserve paddle.compat, paddle.sysconfig * remove unittest for paddle.reduce_all, paddle.reduce_any * modify removed alias apis to raw api in doc and unittests * recover paddle.save and paddle.load * resolve conflicts * fix sample code missing paddle.enable_static() bug * fix sample code missing paddle.enable_static() bug * fix to_string sample code error --- paddle/fluid/pybind/imperative.cc | 2 +- python/paddle/__init__.py | 42 ++--- python/paddle/amp/grad_scaler.py | 6 +- python/paddle/distributed/collective.py | 2 +- python/paddle/distribution.py | 2 +- python/paddle/fluid/dygraph/base.py | 6 +- .../dygraph_to_static/variable_trans_func.py | 2 +- python/paddle/fluid/dygraph/nn.py | 6 - .../fluid/dygraph/varbase_patch_methods.py | 2 +- python/paddle/fluid/framework.py | 20 ++- python/paddle/fluid/layers/control_flow.py | 28 ++-- python/paddle/fluid/layers/detection.py | 82 ++++----- .../fluid/layers/learning_rate_scheduler.py | 35 ++-- python/paddle/fluid/layers/loss.py | 22 +-- python/paddle/fluid/layers/nn.py | 158 ++++++------------ python/paddle/fluid/layers/rnn.py | 15 +- python/paddle/fluid/layers/tensor.py | 8 +- python/paddle/fluid/metrics.py | 3 + .../simnet_dygraph_model_v2.py | 10 +- .../test_program_translator.py | 20 +-- .../dygraph_to_static/test_ptb_lm_v2.py | 4 +- .../dygraph_to_static/test_resnet_v2.py | 4 +- .../test_variable_trans_func.py | 10 +- .../test_squared_mat_sub_fuse_pass.py | 4 +- .../parallel_dygraph_sync_batch_norm.py | 2 +- .../unittests/rnn/test_rnn_cells_static.py | 20 +-- .../unittests/rnn/test_rnn_nets_static.py | 32 ++-- .../tests/unittests/test_activation_op.py | 106 ++++++------ .../fluid/tests/unittests/test_adam_op.py | 2 +- .../unittests/test_adaptive_avg_pool2d.py | 4 +- .../unittests/test_adaptive_avg_pool3d.py | 4 +- .../unittests/test_adaptive_max_pool2d.py | 4 +- .../unittests/test_adaptive_max_pool3d.py | 4 +- .../test_add_position_encoding_op.py | 3 +- .../fluid/tests/unittests/test_addcmul.py | 22 +-- .../fluid/tests/unittests/test_allclose_op.py | 12 +- .../fluid/tests/unittests/test_bce_loss.py | 12 +- .../unittests/test_bce_with_logits_loss.py | 8 +- .../fluid/tests/unittests/test_chunk_op.py | 16 +- .../fluid/tests/unittests/test_concat_op.py | 12 +- .../unittests/test_cosine_similarity_api.py | 4 +- .../paddle/fluid/tests/unittests/test_diag.py | 10 +- .../unittests/test_directory_migration.py | 3 +- .../test_flatten_contiguous_range_op.py | 2 +- .../tests/unittests/test_full_like_op.py | 4 +- .../tests/unittests/test_gather_nd_op.py | 6 +- .../fluid/tests/unittests/test_gather_op.py | 14 +- .../tests/unittests/test_histogram_op.py | 6 +- .../unittests/test_imperative_layer_apply.py | 12 +- .../test_imperative_layer_children.py | 4 +- .../fluid/tests/unittests/test_isfinite_op.py | 4 +- .../tests/unittests/test_isfinite_v2_op.py | 2 +- .../fluid/tests/unittests/test_l1_loss.py | 12 +- .../fluid/tests/unittests/test_layers.py | 2 +- .../fluid/tests/unittests/test_log_softmax.py | 8 +- .../fluid/tests/unittests/test_logsumexp.py | 4 +- .../tests/unittests/test_lr_scheduler.py | 2 +- .../tests/unittests/test_masked_select_op.py | 10 +- .../unittests/test_math_op_patch_var_base.py | 19 --- .../fluid/tests/unittests/test_maxout_op.py | 6 +- .../fluid/tests/unittests/test_mean_op.py | 4 +- .../fluid/tests/unittests/test_mse_loss.py | 12 +- .../fluid/tests/unittests/test_nll_loss.py | 16 +- .../fluid/tests/unittests/test_normal.py | 14 +- .../fluid/tests/unittests/test_normalize.py | 4 +- .../fluid/tests/unittests/test_numel_op.py | 4 +- .../fluid/tests/unittests/test_ones_like.py | 4 +- .../fluid/tests/unittests/test_pad3d_op.py | 14 +- .../tests/unittests/test_pairwise_distance.py | 4 +- .../tests/unittests/test_pixel_shuffle.py | 8 +- .../fluid/tests/unittests/test_prelu_op.py | 12 +- .../fluid/tests/unittests/test_prod_op.py | 6 +- .../fluid/tests/unittests/test_randint_op.py | 4 +- .../fluid/tests/unittests/test_randn_op.py | 8 +- .../fluid/tests/unittests/test_reshape_op.py | 6 +- .../tests/unittests/test_retain_graph.py | 8 +- .../fluid/tests/unittests/test_row_conv.py | 4 +- .../fluid/tests/unittests/test_selu_op.py | 8 +- .../unittests/test_sigmoid_focal_loss.py | 6 +- .../fluid/tests/unittests/test_softmax_op.py | 6 +- .../fluid/tests/unittests/test_std_layer.py | 4 +- .../tests/unittests/test_temporal_shift_op.py | 2 +- .../fluid/tests/unittests/test_unique.py | 6 +- .../tests/unittests/test_variance_layer.py | 4 +- .../tests/unittests/test_zeros_like_op.py | 4 +- python/paddle/framework/__init__.py | 5 +- python/paddle/nn/__init__.py | 9 +- python/paddle/nn/control_flow.py | 25 --- python/paddle/nn/decode.py | 30 ---- python/paddle/nn/functional/__init__.py | 115 ++++++------- python/paddle/nn/functional/activation.py | 11 +- python/paddle/nn/functional/common.py | 16 +- python/paddle/nn/functional/extension.py | 34 +--- python/paddle/nn/functional/input.py | 4 +- python/paddle/nn/functional/lod.py | 48 ------ python/paddle/nn/functional/loss.py | 34 ++-- python/paddle/nn/functional/norm.py | 4 +- python/paddle/nn/functional/pooling.py | 4 - python/paddle/nn/functional/rnn.py | 17 -- python/paddle/nn/functional/vision.py | 93 ++++------- python/paddle/nn/layer/common.py | 7 +- python/paddle/nn/layer/distance.py | 2 +- python/paddle/nn/layer/extension.py | 2 +- python/paddle/nn/layer/rnn.py | 4 +- python/paddle/optimizer/__init__.py | 13 +- python/paddle/optimizer/lr.py | 24 +-- python/paddle/optimizer/optimizer.py | 2 +- python/paddle/static/__init__.py | 3 +- python/paddle/static/nn/__init__.py | 4 + python/paddle/tensor/__init__.py | 37 ++-- python/paddle/tensor/creation.py | 15 +- python/paddle/tensor/logic.py | 4 - python/paddle/tensor/math.py | 36 ++-- python/paddle/tensor/random.py | 4 +- python/paddle/tensor/search.py | 6 +- python/paddle/tensor/stat.py | 3 +- python/paddle/tests/test_model.py | 6 +- 117 files changed, 675 insertions(+), 1008 deletions(-) delete mode 100644 python/paddle/nn/control_flow.py delete mode 100644 python/paddle/nn/decode.py delete mode 100644 python/paddle/nn/functional/lod.py delete mode 100644 python/paddle/nn/functional/rnn.py diff --git a/paddle/fluid/pybind/imperative.cc b/paddle/fluid/pybind/imperative.cc index c933e1a059..14e76ebd26 100644 --- a/paddle/fluid/pybind/imperative.cc +++ b/paddle/fluid/pybind/imperative.cc @@ -712,7 +712,7 @@ void BindImperative(py::module *m_ptr) { tmp.stop_gradient=False inputs.append(tmp) ret = paddle.sums(inputs2) - loss = paddle.reduce_sum(ret) + loss = paddle.fluid.layers.reduce_sum(ret) loss.backward() print("Before clear_gradient {}".format(loss.grad)) loss.clear_gradient() diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index f3c9f0b1f2..a7602f1541 100755 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -59,10 +59,9 @@ from .tensor.random import bernoulli from .tensor.attribute import rank #DEFINE_ALIAS from .tensor.attribute import shape #DEFINE_ALIAS from .tensor.creation import to_tensor #DEFINE_ALIAS -from .tensor.creation import crop_tensor #DEFINE_ALIAS from .tensor.creation import diag #DEFINE_ALIAS from .tensor.creation import eye #DEFINE_ALIAS -from .tensor.creation import fill_constant #DEFINE_ALIAS +# from .tensor.creation import fill_constant #DEFINE_ALIAS # from .tensor.creation import get_tensor_from_selected_rows #DEFINE_ALIAS from .tensor.creation import linspace #DEFINE_ALIAS from .tensor.creation import ones #DEFINE_ALIAS @@ -103,8 +102,8 @@ from .tensor.logic import logical_not #DEFINE_ALIAS from .tensor.logic import logical_or #DEFINE_ALIAS from .tensor.logic import logical_xor #DEFINE_ALIAS from .tensor.logic import not_equal #DEFINE_ALIAS -from .tensor.logic import reduce_all #DEFINE_ALIAS -from .tensor.logic import reduce_any #DEFINE_ALIAS +# from .tensor.logic import reduce_all #DEFINE_ALIAS +# from .tensor.logic import reduce_any #DEFINE_ALIAS from .tensor.logic import allclose #DEFINE_ALIAS from .tensor.logic import equal_all #DEFINE_ALIAS # from .tensor.logic import isnan #DEFINE_ALIAS @@ -144,12 +143,12 @@ from .tensor.math import ceil #DEFINE_ALIAS from .tensor.math import cos #DEFINE_ALIAS from .tensor.math import cosh #DEFINE_ALIAS from .tensor.math import cumsum #DEFINE_ALIAS -from .tensor.math import elementwise_add #DEFINE_ALIAS -from .tensor.math import elementwise_div #DEFINE_ALIAS -from .tensor.math import elementwise_floordiv #DEFINE_ALIAS -from .tensor.math import elementwise_mod #DEFINE_ALIAS -from .tensor.math import elementwise_pow #DEFINE_ALIAS -from .tensor.math import elementwise_sub #DEFINE_ALIAS +# from .tensor.math import elementwise_add #DEFINE_ALIAS +# from .tensor.math import elementwise_div #DEFINE_ALIAS +# from .tensor.math import elementwise_floordiv #DEFINE_ALIAS +# from .tensor.math import elementwise_mod #DEFINE_ALIAS +# from .tensor.math import elementwise_pow #DEFINE_ALIAS +# from .tensor.math import elementwise_sub #DEFINE_ALIAS from .tensor.math import exp #DEFINE_ALIAS from .tensor.math import floor #DEFINE_ALIAS from .tensor.math import increment #DEFINE_ALIAS @@ -157,10 +156,10 @@ from .tensor.math import log #DEFINE_ALIAS from .tensor.math import multiplex #DEFINE_ALIAS from .tensor.math import pow #DEFINE_ALIAS from .tensor.math import reciprocal #DEFINE_ALIAS -from .tensor.math import reduce_max #DEFINE_ALIAS -from .tensor.math import reduce_min #DEFINE_ALIAS -from .tensor.math import reduce_prod #DEFINE_ALIAS -from .tensor.math import reduce_sum #DEFINE_ALIAS +# from .tensor.math import reduce_max #DEFINE_ALIAS +# from .tensor.math import reduce_min #DEFINE_ALIAS +# from .tensor.math import reduce_prod #DEFINE_ALIAS +# from .tensor.math import reduce_sum #DEFINE_ALIAS from .tensor.math import round #DEFINE_ALIAS from .tensor.math import rsqrt #DEFINE_ALIAS from .tensor.math import scale #DEFINE_ALIAS @@ -190,7 +189,7 @@ from .tensor.math import logsumexp #DEFINE_ALIAS from .tensor.math import inverse #DEFINE_ALIAS from .tensor.math import log1p #DEFINE_ALIAS from .tensor.math import erf #DEFINE_ALIAS -from .tensor.math import addcmul #DEFINE_ALIAS +# from .tensor.math import addcmul #DEFINE_ALIAS from .tensor.math import addmm #DEFINE_ALIAS from .tensor.math import clip #DEFINE_ALIAS from .tensor.math import trace #DEFINE_ALIAS @@ -210,8 +209,8 @@ from .tensor.random import randperm #DEFINE_ALIAS from .tensor.search import argmax #DEFINE_ALIAS from .tensor.search import argmin #DEFINE_ALIAS from .tensor.search import argsort #DEFINE_ALIAS -from .tensor.search import has_inf #DEFINE_ALIAS -from .tensor.search import has_nan #DEFINE_ALIAS +# from .tensor.search import has_inf #DEFINE_ALIAS +# from .tensor.search import has_nan #DEFINE_ALIAS from .tensor.search import masked_select #DEFINE_ALIAS from .tensor.search import topk #DEFINE_ALIAS from .tensor.search import where #DEFINE_ALIAS @@ -224,9 +223,8 @@ from .tensor.to_string import set_printoptions from .framework.random import manual_seed #DEFINE_ALIAS from .framework.random import get_cuda_rng_state #DEFINE_ALIAS from .framework.random import set_cuda_rng_state #DEFINE_ALIAS -from .framework import Variable #DEFINE_ALIAS from .framework import ParamAttr #DEFINE_ALIAS -from .framework import create_global_var #DEFINE_ALIAS +# from .framework import create_global_var #DEFINE_ALIAS from .framework import create_parameter #DEFINE_ALIAS from .framework import CPUPlace #DEFINE_ALIAS from .framework import CUDAPlace #DEFINE_ALIAS @@ -243,10 +241,10 @@ from .framework import get_default_dtype #DEFINE_ALIAS from .tensor.search import index_sample #DEFINE_ALIAS from .tensor.stat import mean #DEFINE_ALIAS -from .tensor.stat import reduce_mean #DEFINE_ALIAS +# from .tensor.stat import reduce_mean #DEFINE_ALIAS from .tensor.stat import std #DEFINE_ALIAS from .tensor.stat import var #DEFINE_ALIAS -from .fluid.data import data +# from .fluid.data import data from .tensor.stat import numel #DEFINE_ALIAS from .device import get_cudnn_version from .device import set_device @@ -262,6 +260,8 @@ from .fluid.dygraph.base import enable_dygraph as disable_static #DEFINE_ALIAS from .fluid.dygraph.base import disable_dygraph as enable_static #DEFINE_ALIAS from .fluid.framework import in_dygraph_mode as in_dynamic_mode #DEFINE_ALIAS from .fluid.dygraph.base import no_grad_ as no_grad #DEFINE_ALIAS +from .fluid.layers import crop_tensor as crop #DEFINE_ALIAS + from . import jit from . import static diff --git a/python/paddle/amp/grad_scaler.py b/python/paddle/amp/grad_scaler.py index 9476f3765b..21ccc01640 100644 --- a/python/paddle/amp/grad_scaler.py +++ b/python/paddle/amp/grad_scaler.py @@ -56,7 +56,7 @@ class GradScaler(AmpScaler): data = paddle.rand([10, 3, 32, 32]) with paddle.amp.auto_cast(): conv = model(data) - loss = paddle.reduce_mean(conv) + loss = paddle.fluid.layers.reduce_mean(conv) scaled = scaler.scale(loss) # scale the loss scaled.backward() # do backward scaler.minimize(optimizer, scaled) # update parameters @@ -96,7 +96,7 @@ class GradScaler(AmpScaler): data = paddle.rand([10, 3, 32, 32]) with paddle.amp.auto_cast(): conv = model(data) - loss = paddle.reduce_mean(conv) + loss = paddle.fluid.layers.reduce_mean(conv) scaled = scaler.scale(loss) # scale the loss scaled.backward() # do backward scaler.minimize(optimizer, scaled) # update parameters @@ -128,7 +128,7 @@ class GradScaler(AmpScaler): data = paddle.rand([10, 3, 32, 32]) with paddle.amp.auto_cast(): conv = model(data) - loss = paddle.reduce_mean(conv) + loss = paddle.fluid.layers.reduce_mean(conv) scaled = scaler.scale(loss) # scale the loss scaled.backward() # do backward scaler.minimize(optimizer, scaled) # update parameters diff --git a/python/paddle/distributed/collective.py b/python/paddle/distributed/collective.py index 19df0ca91e..47db4d2e7a 100644 --- a/python/paddle/distributed/collective.py +++ b/python/paddle/distributed/collective.py @@ -439,7 +439,7 @@ def barrier(group=0): paddle.distributed.barrier() """ op_type = 'barrier' - temp = paddle.fill_constant([1], dtype="int32", value="1") + temp = fill_constant([1], dtype="int32", value="1") if in_dygraph_mode(): return core.ops.barrier(temp, temp, 'ring_id', group) if not isinstance(group, int): diff --git a/python/paddle/distribution.py b/python/paddle/distribution.py index ff3e882229..63a94a11f0 100644 --- a/python/paddle/distribution.py +++ b/python/paddle/distribution.py @@ -25,9 +25,9 @@ from .fluid.layers import control_flow from .fluid.layers import tensor from .fluid.layers import ops from .fluid.layers import nn +from .fluid.layers import elementwise_mul, elementwise_div, elementwise_add, elementwise_sub from .fluid import core from .fluid.framework import in_dygraph_mode -from .tensor.math import elementwise_mul, elementwise_div, elementwise_add, elementwise_sub from .tensor import arange, gather_nd, concat, multinomial import math import numpy as np diff --git a/python/paddle/fluid/dygraph/base.py b/python/paddle/fluid/dygraph/base.py index 9b540c378e..db1a705167 100644 --- a/python/paddle/fluid/dygraph/base.py +++ b/python/paddle/fluid/dygraph/base.py @@ -480,7 +480,7 @@ def grad(outputs, paddle.disable_static() def test_dygraph_grad(grad_outputs=None): - x = paddle.fill_constant(shape=[1], value=2.0, dtype='float32') + x = paddle.fluid.layers.fill_constant(shape=[1], value=2.0, dtype='float32') x.stop_gradient = False y1 = x * x @@ -503,7 +503,7 @@ def grad(outputs, return dx.numpy() - grad_value = paddle.fill_constant(shape=[1], value=4.0, dtype='float32') + grad_value = paddle.fluid.layers.fill_constant(shape=[1], value=4.0, dtype='float32') # dy1 = [1], dy2 = [1] print(test_dygraph_grad(None)) # [7.] @@ -515,7 +515,7 @@ def grad(outputs, print(test_dygraph_grad([grad_value, None])) # [19.] # dy1 = [3], dy2 = [4] - grad_y1 = paddle.fill_constant(shape=[1], value=3.0, dtype='float32') + grad_y1 = paddle.fluid.layers.fill_constant(shape=[1], value=3.0, dtype='float32') print(test_dygraph_grad([grad_y1, grad_value])) # [24.] ''' diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/variable_trans_func.py b/python/paddle/fluid/dygraph/dygraph_to_static/variable_trans_func.py index 8da7b40db4..b7ebd3800c 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/variable_trans_func.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/variable_trans_func.py @@ -87,7 +87,7 @@ def create_static_variable_gast_node(name): def create_fill_constant_node(name, value): - func_code = "{} = paddle.fill_constant(shape=[1], ".format(name) + func_code = "{} = paddle.fluid.layers.fill_constant(shape=[1], ".format(name) if isinstance(value, bool): func_code += "dtype='bool', value={})".format(value) return gast.parse(func_code).body[0] diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index 1a488844de..9a23e11b8a 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -702,9 +702,6 @@ class Conv3DTranspose(layers.Layer): class Pool2D(layers.Layer): """ - :alias_main: paddle.nn.Pool2D - :alias: paddle.nn.Pool2D,paddle.nn.layer.Pool2D,paddle.nn.layer.common.Pool2D - :old_api: paddle.fluid.dygraph.Pool2D This interface is used to construct a callable object of the ``Pool2D`` class. For more details, refer to code examples. @@ -2354,9 +2351,6 @@ class PRelu(layers.Layer): class BilinearTensorProduct(layers.Layer): """ - :alias_main: paddle.nn.BilinearTensorProduct - :alias: paddle.nn.BilinearTensorProduct,paddle.nn.layer.BilinearTensorProduct,paddle.nn.layer.common.BilinearTensorProduct - :old_api: paddle.fluid.dygraph.BilinearTensorProduct **Add Bilinear Tensor Product Layer** diff --git a/python/paddle/fluid/dygraph/varbase_patch_methods.py b/python/paddle/fluid/dygraph/varbase_patch_methods.py index f02c35aaed..39c6740d83 100644 --- a/python/paddle/fluid/dygraph/varbase_patch_methods.py +++ b/python/paddle/fluid/dygraph/varbase_patch_methods.py @@ -163,7 +163,7 @@ def monkey_patch_varbase(): tmp.stop_gradient=False inputs.append(tmp) ret = paddle.sums(inputs) - loss = paddle.reduce_sum(ret) + loss = paddle.fluid.layers.reduce_sum(ret) loss.backward() """ diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 714afe1d4b..a4cb92f729 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -543,7 +543,7 @@ def name_scope(prefix=None): import paddle paddle.enable_static() with paddle.static.name_scope("s1"): - a = paddle.data(name='data', shape=[None, 1], dtype='int32') + a = paddle.fluid.data(name='data', shape=[None, 1], dtype='int32') b = a + 1 with paddle.static.name_scope("s2"): c = b * 1 @@ -1193,7 +1193,7 @@ class Variable(object): tmp.stop_gradient=False inputs.append(tmp) ret = paddle.sums(inputs) - loss = paddle.reduce_sum(ret) + loss = paddle.fluid.layers.reduce_sum(ret) loss.backward() """ @@ -1343,7 +1343,9 @@ class Variable(object): .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() cur_program = fluid.Program() cur_block = cur_program.current_block() new_variable = cur_block.create_var(name="X", @@ -5355,8 +5357,8 @@ def default_startup_program(): main_program = paddle.static.Program() startup_program = paddle.static.Program() with paddle.static.program_guard(main_program=main_program, startup_program=startup_program): - x = paddle.data(name="x", shape=[-1, 784], dtype='float32') - y = paddle.data(name="y", shape=[-1, 1], dtype='int32') + x = paddle.fluid.data(name="x", shape=[-1, 784], dtype='float32') + y = paddle.fluid.data(name="y", shape=[-1, 1], dtype='int32') z = paddle.static.nn.fc(name="fc", x=x, size=10, activation="relu") print("main program is: {}".format(paddle.static.default_main_program())) @@ -5370,7 +5372,7 @@ def default_main_program(): This API can be used to get ``default main program`` which store the descriptions of Ops and tensors. - For example ``z = paddle.elementwise_add(x, y)`` will create a new ``elementwise_add`` + For example ``z = paddle.fluid.layers.elementwise_add(x, y)`` will create a new ``elementwise_add`` Op and a new ``z`` tensor, and they will be recorded in ``default main program`` . The ``default main program`` is the default value for ``Program`` parameter in @@ -5389,15 +5391,15 @@ def default_main_program(): paddle.enable_static() # Sample Network: - data = paddle.data(name='image', shape=[None, 3, 224, 224], dtype='float32') - label = paddle.data(name='label', shape=[None, 1], dtype='int64') + data = paddle.fluid.data(name='image', shape=[None, 3, 224, 224], dtype='float32') + label = paddle.fluid.data(name='label', shape=[None, 1], dtype='int64') conv1 = paddle.static.nn.conv2d(data, 4, 5, 1, act=None) bn1 = paddle.static.nn.batch_norm(conv1, act='relu') - pool1 = paddle.nn.functional.pool2d(bn1, 2, 'max', 2) + pool1 = paddle.fluid.layers.pool2d(bn1, 2, 'max', 2) conv2 = paddle.static.nn.conv2d(pool1, 16, 5, 1, act=None) bn2 = paddle.static.nn.batch_norm(conv2, act='relu') - pool2 = paddle.nn.functional.pool2d(bn2, 2, 'max', 2) + pool2 = paddle.fluid.layers.pool2d(bn2, 2, 'max', 2) fc1 = paddle.static.nn.fc(x=pool2, size=50, activation='relu') fc2 = paddle.static.nn.fc(x=fc1, size=102, activation='softmax') diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 0c77917c78..00e9e86d97 100755 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -1110,9 +1110,6 @@ def assign_skip_lod_tensor_array(input, output): def while_loop(cond, body, loop_vars, is_test=False, name=None): """ :api_attr: Static Graph - :alias_main: paddle.nn.while_loop - :alias: paddle.nn.while_loop,paddle.nn.control_flow.while_loop - :old_api: paddle.fluid.layers.while_loop while_loop is one of the control flows. Repeats while_loop `body` until `cond` returns False. @@ -1151,6 +1148,9 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None): import paddle.fluid as fluid import paddle.fluid.layers as layers + import paddle + paddle.enable_static() + def cond(i, ten): return i < ten @@ -2506,21 +2506,21 @@ def case(pred_fn_pairs, default=None, name=None): paddle.enable_static() def fn_1(): - return paddle.fill_constant(shape=[1, 2], dtype='float32', value=1) + return paddle.fluid.layers.fill_constant(shape=[1, 2], dtype='float32', value=1) def fn_2(): - return paddle.fill_constant(shape=[2, 2], dtype='int32', value=2) + return paddle.fluid.layers.fill_constant(shape=[2, 2], dtype='int32', value=2) def fn_3(): - return paddle.fill_constant(shape=[3], dtype='int32', value=3) + return paddle.fluid.layers.fill_constant(shape=[3], dtype='int32', value=3) main_program = paddle.static.default_startup_program() startup_program = paddle.static.default_main_program() with paddle.static.program_guard(main_program, startup_program): - x = paddle.fill_constant(shape=[1], dtype='float32', value=0.3) - y = paddle.fill_constant(shape=[1], dtype='float32', value=0.1) - z = paddle.fill_constant(shape=[1], dtype='float32', value=0.2) + x = paddle.fluid.layers.fill_constant(shape=[1], dtype='float32', value=0.3) + y = paddle.fluid.layers.fill_constant(shape=[1], dtype='float32', value=0.1) + z = paddle.fluid.layers.fill_constant(shape=[1], dtype='float32', value=0.2) pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3 pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1 @@ -3626,19 +3626,19 @@ def switch_case(branch_index, branch_fns, default=None, name=None): paddle.enable_static() def fn_1(): - return paddle.fill_constant(shape=[1, 2], dtype='float32', value=1) + return paddle.fluid.layers.fill_constant(shape=[1, 2], dtype='float32', value=1) def fn_2(): - return paddle.fill_constant(shape=[2, 2], dtype='int32', value=2) + return paddle.fluid.layers.fill_constant(shape=[2, 2], dtype='int32', value=2) def fn_3(): - return paddle.fill_constant(shape=[3], dtype='int32', value=3) + return paddle.fluid.layers.fill_constant(shape=[3], dtype='int32', value=3) main_program = paddle.static.default_startup_program() startup_program = paddle.static.default_main_program() with paddle.static.program_guard(main_program, startup_program): - index_1 = paddle.fill_constant(shape=[1], dtype='int32', value=1) - index_2 = paddle.fill_constant(shape=[1], dtype='int32', value=2) + index_1 = paddle.fluid.layers.fill_constant(shape=[1], dtype='int32', value=1) + index_2 = paddle.fluid.layers.fill_constant(shape=[1], dtype='int32', value=2) out_1 = paddle.static.nn.switch_case( branch_index=index_1, diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index bf87d1fc5a..951817db01 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -629,9 +629,6 @@ def detection_output(loc, nms_eta=1.0, return_index=False): """ - :alias_main: paddle.nn.functional.detection_output - :alias: paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output - :old_api: paddle.fluid.layers.detection_output Given the regression locations, classification confidences and prior boxes, calculate the detection outputs by performing following steps: @@ -700,6 +697,9 @@ def detection_output(loc, .. code-block:: python import paddle.fluid as fluid + import paddle + + paddle.enable_static() pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32') pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32') @@ -822,9 +822,6 @@ def box_coder(prior_box, name=None, axis=0): """ - :alias_main: paddle.nn.functional.box_coder - :alias: paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder - :old_api: paddle.fluid.layers.box_coder **Box Coder Layer** @@ -911,6 +908,8 @@ def box_coder(prior_box, .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() # For encode prior_box_encode = fluid.data(name='prior_box_encode', shape=[512, 4], @@ -1013,9 +1012,6 @@ def yolov3_loss(x, name=None, scale_x_y=1.): """ - :alias_main: paddle.nn.functional.yolov3_loss - :alias: paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss - :old_api: paddle.fluid.layers.yolov3_loss ${comment} @@ -1060,6 +1056,8 @@ def yolov3_loss(x, .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32') gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32') gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32') @@ -1140,9 +1138,6 @@ def yolo_box(x, name=None, scale_x_y=1.): """ - :alias_main: paddle.nn.functional.yolo_box - :alias: paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box - :old_api: paddle.fluid.layers.yolo_box ${comment} @@ -1175,6 +1170,8 @@ def yolo_box(x, .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32') img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64') anchors = [10, 13, 16, 30, 33, 23] @@ -1319,9 +1316,6 @@ def bipartite_match(dist_matrix, dist_threshold=None, name=None): """ - :alias_main: paddle.nn.functional.bipartite_match - :alias: paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match - :old_api: paddle.fluid.layers.bipartite_match This operator implements a greedy bipartite matching algorithm, which is used to obtain the matching with the maximum distance based on the input @@ -1413,9 +1407,6 @@ def target_assign(input, mismatch_value=None, name=None): """ - :alias_main: paddle.nn.functional.target_assign - :alias: paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign - :old_api: paddle.fluid.layers.target_assign This operator can be, for given the target bounding boxes or labels, to assign classification and regression targets to each prediction as well as @@ -1484,6 +1475,8 @@ def target_assign(input, .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() x = fluid.data( name='x', shape=[4, 20, 4], @@ -1778,9 +1771,6 @@ def prior_box(input, name=None, min_max_aspect_ratios_order=False): """ - :alias_main: paddle.nn.functional.prior_box - :alias: paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box - :old_api: paddle.fluid.layers.prior_box This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm. Each position of the input produce N prior boxes, N is determined by @@ -1832,6 +1822,8 @@ def prior_box(input, #declarative mode import paddle.fluid as fluid import numpy as np + import paddle + paddle.enable_static() input = fluid.data(name="input", shape=[None,3,6,9]) image = fluid.data(name="image", shape=[None,3,9,12]) box, var = fluid.layers.prior_box( @@ -1939,10 +1931,6 @@ def density_prior_box(input, flatten_to_2d=False, name=None): """ - :alias_main: paddle.nn.functional.density_prior_box - :alias: paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box - :old_api: paddle.fluid.layers.density_prior_box - This op generates density prior boxes for SSD(Single Shot MultiBox Detector) algorithm. Each position of the input produce N prior boxes, N is @@ -2008,6 +1996,8 @@ def density_prior_box(input, import paddle.fluid as fluid import numpy as np + import paddle + paddle.enable_static() input = fluid.data(name="input", shape=[None,3,6,9]) image = fluid.data(name="image", shape=[None,3,9,12]) @@ -2408,9 +2398,6 @@ def anchor_generator(input, offset=0.5, name=None): """ - :alias_main: paddle.nn.functional.anchor_generator - :alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator - :old_api: paddle.fluid.layers.anchor_generator **Anchor generator operator** @@ -2457,6 +2444,9 @@ def anchor_generator(input, .. code-block:: python import paddle.fluid as fluid + import paddle + + paddle.enable_static() conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32') anchor, var = fluid.layers.anchor_generator( input=conv1, @@ -2613,9 +2603,6 @@ def generate_proposal_labels(rpn_rois, is_cls_agnostic=False, is_cascade_rcnn=False): """ - :alias_main: paddle.nn.functional.generate_proposal_labels - :alias: paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels - :old_api: paddle.fluid.layers.generate_proposal_labels **Generate Proposal Labels of Faster-RCNN** @@ -2738,9 +2725,6 @@ def generate_proposal_labels(rpn_rois, def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois, labels_int32, num_classes, resolution): """ - :alias_main: paddle.nn.functional.generate_mask_labels - :alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels - :old_api: paddle.fluid.layers.generate_mask_labels **Generate Mask Labels for Mask-RCNN** @@ -2897,9 +2881,6 @@ def generate_proposals(scores, return_rois_num=False, name=None): """ - :alias_main: paddle.nn.functional.generate_proposals - :alias: paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals - :old_api: paddle.fluid.layers.generate_proposals **Generate proposal Faster-RCNN** @@ -2965,6 +2946,8 @@ def generate_proposals(scores, .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32') bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32') im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32') @@ -3036,9 +3019,6 @@ def generate_proposals(scores, def box_clip(input, im_info, name=None): """ - :alias_main: paddle.nn.functional.box_clip - :alias: paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip - :old_api: paddle.fluid.layers.box_clip Clip the box into the size given by im_info For each input box, The formula is given as follows: @@ -3079,6 +3059,8 @@ def box_clip(input, im_info, name=None): .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() boxes = fluid.data( name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1) im_info = fluid.data(name='im_info', shape=[-1 ,3]) @@ -3265,9 +3247,6 @@ def multiclass_nms(bboxes, background_label=0, name=None): """ - :alias_main: paddle.nn.functional.multiclass_nms - :alias: paddle.nn.functional.multiclass_nms,paddle.nn.functional.extension.multiclass_nms - :old_api: paddle.fluid.layers.multiclass_nms **Multiclass NMS** @@ -3363,6 +3342,8 @@ def multiclass_nms(bboxes, import paddle.fluid as fluid + import paddle + paddle.enable_static() boxes = fluid.data(name='bboxes', shape=[None,81, 4], dtype='float32', lod_level=1) scores = fluid.data(name='scores', shape=[None,81], @@ -3674,9 +3655,6 @@ def distribute_fpn_proposals(fpn_rois, rois_num=None, name=None): """ - :alias_main: paddle.nn.functional.distribute_fpn_proposals - :alias: paddle.nn.functional.distribute_fpn_proposals,paddle.nn.functional.vision.distribute_fpn_proposals - :old_api: paddle.fluid.layers.distribute_fpn_proposals **This op only takes LoDTensor as input.** In Feature Pyramid Networks (FPN) models, it is needed to distribute all proposals into different FPN @@ -3732,6 +3710,8 @@ def distribute_fpn_proposals(fpn_rois, .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() fpn_rois = fluid.data( name='data', shape=[None, 4], dtype='float32', lod_level=1) multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals( @@ -3798,9 +3778,6 @@ def box_decoder_and_assign(prior_box, box_clip, name=None): """ - :alias_main: paddle.nn.functional.box_decoder_and_assign - :alias: paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign - :old_api: paddle.fluid.layers.box_decoder_and_assign ${comment} Args: @@ -3825,6 +3802,8 @@ def box_decoder_and_assign(prior_box, .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() pb = fluid.data( name='prior_box', shape=[None, 4], dtype='float32') pbv = fluid.data( @@ -3874,9 +3853,6 @@ def collect_fpn_proposals(multi_rois, rois_num_per_level=None, name=None): """ - :alias_main: paddle.nn.functional.collect_fpn_proposals - :alias: paddle.nn.functional.collect_fpn_proposals,paddle.nn.functional.vision.collect_fpn_proposals - :old_api: paddle.fluid.layers.collect_fpn_proposals **This OP only supports LoDTensor as input**. Concat multi-level RoIs (Region of Interest) and select N RoIs with respect to multi_scores. @@ -3922,6 +3898,8 @@ def collect_fpn_proposals(multi_rois, .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() multi_rois = [] multi_scores = [] for i in range(4): diff --git a/python/paddle/fluid/layers/learning_rate_scheduler.py b/python/paddle/fluid/layers/learning_rate_scheduler.py index 47e62016a2..2710ab12cd 100644 --- a/python/paddle/fluid/layers/learning_rate_scheduler.py +++ b/python/paddle/fluid/layers/learning_rate_scheduler.py @@ -52,9 +52,6 @@ def _decay_step_counter(begin=0): def noam_decay(d_model, warmup_steps, learning_rate=1.0): """ - :alias_main: paddle.nn.functional.noam_decay - :alias: paddle.nn.functional.noam_decay,paddle.nn.functional.learning_rate.noam_decay - :old_api: paddle.fluid.layers.noam_decay Noam decay method. The numpy implementation of noam decay as follows. @@ -115,9 +112,6 @@ def noam_decay(d_model, warmup_steps, learning_rate=1.0): def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False): """ - :alias_main: paddle.nn.functional.exponential_decay - :alias: paddle.nn.functional.exponential_decay,paddle.nn.functional.learning_rate.exponential_decay - :old_api: paddle.fluid.layers.exponential_decay Applies exponential decay to the learning rate. @@ -149,6 +143,9 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False): .. code-block:: python import paddle.fluid as fluid + import paddle + + paddle.enable_static() base_lr = 0.1 sgd_optimizer = fluid.optimizer.SGD( learning_rate=fluid.layers.exponential_decay( @@ -176,9 +173,6 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False): def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False): """ - :alias_main: paddle.nn.functional.natural_exp_decay - :alias: paddle.nn.functional.natural_exp_decay,paddle.nn.functional.learning_rate.natural_exp_decay - :old_api: paddle.fluid.layers.natural_exp_decay Applies natural exponential decay to the initial learning rate. @@ -210,6 +204,9 @@ Applies natural exponential decay to the initial learning rate. .. code-block:: python import paddle.fluid as fluid + import paddle + + paddle.enable_static() base_lr = 0.1 sgd_optimizer = fluid.optimizer.SGD( learning_rate=fluid.layers.natural_exp_decay( @@ -237,9 +234,6 @@ Applies natural exponential decay to the initial learning rate. def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False): """ - :alias_main: paddle.nn.functional.inverse_time_decay - :alias: paddle.nn.functional.inverse_time_decay,paddle.nn.functional.learning_rate.inverse_time_decay - :old_api: paddle.fluid.layers.inverse_time_decay Applies inverse time decay to the initial learning rate. @@ -271,6 +265,8 @@ def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False): .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() base_lr = 0.1 sgd_optimizer = fluid.optimizer.SGD( learning_rate=fluid.layers.inverse_time_decay( @@ -302,10 +298,6 @@ def polynomial_decay(learning_rate, power=1.0, cycle=False): """ - :alias_main: paddle.nn.functional.polynomial_decay - :alias: paddle.nn.functional.polynomial_decay,paddle.nn.functional.learning_rate.polynomial_decay - :old_api: paddle.fluid.layers.polynomial_decay -2 Applies polynomial decay to the initial learning rate. .. code-block:: text @@ -371,9 +363,6 @@ def polynomial_decay(learning_rate, def piecewise_decay(boundaries, values): """ - :alias_main: paddle.nn.functional.piecewise_decay - :alias: paddle.nn.functional.piecewise_decay,paddle.nn.functional.learning_rate.piecewise_decay - :old_api: paddle.fluid.layers.piecewise_decay Applies piecewise decay to the initial learning rate. @@ -401,6 +390,8 @@ Applies piecewise decay to the initial learning rate. .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() boundaries = [10000, 20000] values = [1.0, 0.5, 0.1] optimizer = fluid.optimizer.Momentum( @@ -450,9 +441,6 @@ Applies piecewise decay to the initial learning rate. def cosine_decay(learning_rate, step_each_epoch, epochs): """ - :alias_main: paddle.nn.functional.cosine_decay - :alias: paddle.nn.functional.cosine_decay,paddle.nn.functional.learning_rate.cosine_decay - :old_api: paddle.fluid.layers.cosine_decay Applies cosine decay to the learning rate. @@ -499,9 +487,6 @@ def cosine_decay(learning_rate, step_each_epoch, epochs): def linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr): """ - :alias_main: paddle.nn.functional.linear_lr_warmup - :alias: paddle.nn.functional.linear_lr_warmup,paddle.nn.functional.learning_rate.linear_lr_warmup - :old_api: paddle.fluid.layers.linear_lr_warmup This operator use the linear learning rate warm up strategy to adjust the learning rate preliminarily before the normal learning rate scheduling. For more information, please refer to `Bag of Tricks for Image Classification with Convolutional Neural Networks `_ diff --git a/python/paddle/fluid/layers/loss.py b/python/paddle/fluid/layers/loss.py index 2b1449a94e..b363c37f64 100644 --- a/python/paddle/fluid/layers/loss.py +++ b/python/paddle/fluid/layers/loss.py @@ -59,9 +59,6 @@ def center_loss(input, update_center=True): """ :api_attr: Static Graph - :alias_main: paddle.nn.functional.center_loss - :alias: paddle.nn.functional.center_loss,paddle.nn.functional.loss.center_loss - :old_api: paddle.fluid.layers.center_loss **Center loss Cost layer** @@ -92,6 +89,8 @@ def center_loss(input, .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() input = fluid.data(name='x',shape=[20,30],dtype='float32') label = fluid.data(name='y',shape=[20,1],dtype='int64') @@ -153,9 +152,6 @@ def center_loss(input, def bpr_loss(input, label, name=None): """ - :alias_main: paddle.nn.functional.bpr_loss - :alias: paddle.nn.functional.bpr_loss,paddle.nn.functional.loss.bpr_loss - :old_api: paddle.fluid.layers.bpr_loss **Bayesian Personalized Ranking Loss Operator** @@ -183,6 +179,9 @@ def bpr_loss(input, label, name=None): .. code-block:: python import paddle.fluid as fluid + import paddle + + paddle.enable_static() neg_size = 10 label = fluid.data( @@ -1309,9 +1308,6 @@ def softmax_with_cross_entropy(logits, def rank_loss(label, left, right, name=None): """ - :alias_main: paddle.nn.functional.rank_loss - :alias: paddle.nn.functional.rank_loss,paddle.nn.functional.loss.rank_loss - :old_api: paddle.fluid.layers.rank_loss This operator implements the sort loss layer in the RankNet model. RankNet is a pairwise ranking model with a training sample consisting of a pair of documents (A and B), The label (P) @@ -1349,6 +1345,8 @@ def rank_loss(label, left, right, name=None): .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() label = fluid.data(name="label", shape=[-1, 1], dtype="float32") left = fluid.data(name="left", shape=[-1, 1], dtype="float32") right = fluid.data(name="right", shape=[-1, 1], dtype="float32") @@ -1491,9 +1489,6 @@ def teacher_student_sigmoid_loss(input, soft_max_up_bound=15.0, soft_max_lower_bound=-15.0): """ - :alias_main: paddle.nn.functional.teacher_student_sigmoid_loss - :alias: paddle.nn.functional.teacher_student_sigmoid_loss,paddle.nn.functional.loss.teacher_student_sigmoid_loss - :old_api: paddle.fluid.layers.teacher_student_sigmoid_loss **Teacher Student Log Loss Layer** @@ -1521,7 +1516,8 @@ def teacher_student_sigmoid_loss(input, .. code-block:: python import paddle.fluid as fluid - + import paddle + paddle.enable_static() batch_size = 64 label = fluid.data( name="label", shape=[batch_size, 1], dtype="int64") diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 9be798241f..b4495a04c1 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -1859,7 +1859,6 @@ def conv3d(input, return helper.append_activation(pre_act) -@deprecated(since="2.0.0", update_to="paddle.nn.functional.pool2d") @templatedoc() def pool2d(input, pool_size=-1, @@ -1873,9 +1872,6 @@ def pool2d(input, exclusive=True, data_format="NCHW"): """ - :alias_main: paddle.nn.functional.pool2d - :alias: paddle.nn.functional.pool2d,paddle.nn.functional.pooling.pool2d - :old_api: paddle.fluid.layers.pool2d ${comment} @@ -1934,6 +1930,9 @@ def pool2d(input, .. code-block:: python import paddle.fluid as fluid + import paddle + + paddle.enable_static() data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32') @@ -2077,7 +2076,6 @@ def pool2d(input, return pool_out -@deprecated(since="2.0.0", update_to="paddle.nn.functional.pool3d") @templatedoc() def pool3d(input, pool_size=-1, @@ -2091,9 +2089,6 @@ def pool3d(input, exclusive=True, data_format="NCDHW"): """ - :alias_main: paddle.nn.functional.pool3d - :alias: paddle.nn.functional.pool3d,paddle.nn.functional.pooling.pool3d - :old_api: paddle.fluid.layers.pool3d ${comment} @@ -2153,6 +2148,9 @@ def pool3d(input, .. code-block:: python import paddle.fluid as fluid + import paddle + + paddle.enable_static() data = fluid.data(name='data', shape=[None, 3, 32, 32, 32], dtype='float32') @@ -4318,9 +4316,6 @@ def conv3d_transpose(input, def reduce_sum(input, dim=None, keep_dim=False, name=None): """ - :alias_main: paddle.reduce_sum - :alias: paddle.reduce_sum,paddle.tensor.reduce_sum,paddle.tensor.math.reduce_sum - :old_api: paddle.fluid.layers.reduce_sum Computes the sum of tensor elements over the given dimension. @@ -4350,6 +4345,8 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None): .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() # x is a Tensor variable with following elements: # [[0.2, 0.3, 0.5, 0.9] # [0.1, 0.2, 0.6, 0.7]] @@ -4452,9 +4449,6 @@ def reduce_mean(input, dim=None, keep_dim=False, name=None): def reduce_max(input, dim=None, keep_dim=False, name=None): """ - :alias_main: paddle.reduce_max - :alias: paddle.reduce_max,paddle.tensor.reduce_max,paddle.tensor.math.reduce_max - :old_api: paddle.fluid.layers.reduce_max Computes the maximum of tensor elements over the given dimension. @@ -4481,6 +4475,8 @@ def reduce_max(input, dim=None, keep_dim=False, name=None): .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() # x is a Tensor variable with following elements: # [[0.2, 0.3, 0.5, 0.9] # [0.1, 0.2, 0.6, 0.7]] @@ -4518,9 +4514,6 @@ def reduce_max(input, dim=None, keep_dim=False, name=None): def reduce_min(input, dim=None, keep_dim=False, name=None): """ - :alias_main: paddle.reduce_min - :alias: paddle.reduce_min,paddle.tensor.reduce_min,paddle.tensor.math.reduce_min - :old_api: paddle.fluid.layers.reduce_min Computes the minimum of tensor elements over the given dimension. @@ -4547,6 +4540,9 @@ def reduce_min(input, dim=None, keep_dim=False, name=None): .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() + # x is a Tensor variable with following elements: # [[0.2, 0.3, 0.5, 0.9] # [0.1, 0.2, 0.6, 0.7]] @@ -4584,9 +4580,6 @@ def reduce_min(input, dim=None, keep_dim=False, name=None): def reduce_prod(input, dim=None, keep_dim=False, name=None): """ - :alias_main: paddle.reduce_prod - :alias: paddle.reduce_prod,paddle.tensor.reduce_prod,paddle.tensor.math.reduce_prod - :old_api: paddle.fluid.layers.reduce_prod Computes the product of tensor elements over the given dimension. @@ -4613,6 +4606,8 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None): .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() # x is a Tensor variable with following elements: # [[0.2, 0.3, 0.5, 0.9] # [0.1, 0.2, 0.6, 0.7]] @@ -4660,9 +4655,6 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None): def reduce_all(input, dim=None, keep_dim=False, name=None): """ - :alias_main: paddle.reduce_all - :alias: paddle.reduce_all,paddle.tensor.reduce_all,paddle.tensor.logic.reduce_all - :old_api: paddle.fluid.layers.reduce_all This OP computes the ``logical and`` of tensor elements over the given dimension, and output the result. @@ -4724,10 +4716,6 @@ def reduce_all(input, dim=None, keep_dim=False, name=None): def reduce_any(input, dim=None, keep_dim=False, name=None): """ - :alias_main: paddle.reduce_any - :alias: paddle.reduce_any,paddle.tensor.reduce_any,paddle.tensor.logic.reduce_any - :old_api: paddle.fluid.layers.reduce_any - This OP computes the ``logical or`` of tensor elements over the given dimension, and output the result. Args: @@ -4940,9 +4928,6 @@ def split(input, num_or_sections, dim=-1, name=None): def l2_normalize(x, axis, epsilon=1e-12, name=None): """ - :alias_main: paddle.nn.functional.l2_normalize - :alias: paddle.nn.functional.l2_normalize,paddle.nn.functional.norm.l2_normalize - :old_api: paddle.fluid.layers.l2_normalize This op normalizes `x` along dimension `axis` using an L2 norm. For a 1-D tensor (`dim` is fixed to 0), this layer computes @@ -4973,6 +4958,8 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None): # declarative mode import paddle.fluid as fluid import numpy as np + import paddle + paddle.enable_static() input = fluid.data(name="input", shape=[2,3]) output = fluid.layers.l2_normalize(x=input,axis=0) place = fluid.CPUPlace() @@ -5786,9 +5773,6 @@ def multiplex(inputs, index): def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None): """ - :alias_main: paddle.nn.functional.smooth_l1 - :alias: paddle.nn.functional.smooth_l1,paddle.nn.functional.loss.smooth_l1 - :old_api: paddle.fluid.layers.smooth_l1 This layer computes the smooth L1 loss for Variable :attr:`x` and :attr:`y`. It takes the first dimension of :attr:`x` and :attr:`y` as batch size. @@ -5824,6 +5808,8 @@ def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None): import paddle.fluid as fluid import numpy as np + import paddle + paddle.enable_static() data = fluid.data(name="x", shape=[-1, 3], dtype="float32") label = fluid.data(name="y", shape=[-1, 3], dtype="float32") result = fluid.layers.smooth_l1(data,label) @@ -6858,9 +6844,6 @@ def roi_pool(input, rois_num=None, name=None): """ - :alias_main: paddle.nn.functional.roi_pool - :alias: paddle.nn.functional.roi_pool,paddle.nn.functional.vision.roi_pool - :old_api: paddle.fluid.layers.roi_pool This operator implements the roi_pooling layer. Region of interest pooling (also known as RoI pooling) is to perform max pooling on inputs of nonuniform sizes to obtain fixed-size feature maps (e.g. 7*7). @@ -6895,6 +6878,8 @@ def roi_pool(input, import paddle.fluid as fluid import numpy as np + import paddle + paddle.enable_static() DATATYPE='float32' @@ -6965,9 +6950,6 @@ def roi_align(input, rois_num=None, name=None): """ - :alias_main: paddle.nn.functional.roi_align - :alias: paddle.nn.functional.roi_align,paddle.nn.functional.vision.roi_align - :old_api: paddle.fluid.layers.roi_align ${comment} @@ -6997,6 +6979,9 @@ def roi_align(input, .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() + x = fluid.data( name='data', shape=[None, 256, 32, 32], dtype='float32') rois = fluid.data( @@ -7800,9 +7785,9 @@ def resize_bilinear(input, #declarative mode import paddle.fluid as fluid + import numpy as np import paddle paddle.enable_static() - import numpy as np input = fluid.data(name="input", shape=[None,3,6,10]) #1 @@ -8951,6 +8936,9 @@ def crop(x, shape=None, offsets=None, name=None): .. code-block:: python import paddle.fluid as fluid + import paddle.fluid as fluid + import paddle + paddle.enable_static() x = fluid.data(name="x", shape=[3, 3, 5], dtype="float32") y = fluid.data(name="y", shape=[2, 2, 3], dtype="float32") crop = fluid.layers.crop(x, shape=y) @@ -8989,10 +8977,6 @@ def crop(x, shape=None, offsets=None, name=None): def crop_tensor(x, shape=None, offsets=None, name=None): """ - :alias_main: paddle.crop_tensor - :alias: paddle.crop_tensor,paddle.tensor.crop_tensor,paddle.tensor.creation.crop_tensor - :old_api: paddle.fluid.layers.crop_tensor - Crop input into output, as specified by offsets and shape. .. code-block:: text @@ -9062,6 +9046,9 @@ def crop_tensor(x, shape=None, offsets=None, name=None): .. code-block:: python import paddle.fluid as fluid + import paddle.fluid as fluid + import paddle + paddle.enable_static() x = fluid.data(name="x", shape=[None, 3, 5], dtype="float32") # x.shape = [-1, 3, 5], where -1 indicates batch size, and it will get the exact value in runtime. @@ -9269,9 +9256,6 @@ def pad2d(input, data_format="NCHW", name=None): """ - :alias_main: paddle.nn.functional.pad2d - :alias: paddle.nn.functional.pad2d,paddle.nn.functional.common.pad2d - :old_api: paddle.fluid.layers.pad2d Pad 2-d images according to 'paddings' and 'mode'. If mode is 'reflect', paddings[0] and paddings[1] must be no greater @@ -9336,7 +9320,7 @@ def pad2d(input, x_shape = (1, 1, 3, 4) x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape) + 1 tensor_x = paddle.to_tensor(x) - y = F.pad2d(tensor_x, paddings=[1, 2, 2, 1], pad_value=1, mode='constant') + y = paddle.fluid.layers.pad2d(tensor_x, paddings=[1, 2, 2, 1], pad_value=1, mode='constant') print(y.numpy()) # [[[[ 1. 1. 1. 1. 1. 1. 1.] # [ 1. 1. 1. 2. 3. 4. 1.] @@ -9349,7 +9333,7 @@ def pad2d(input, x_shape = (1, 1, 2, 3) x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape) + 1 tensor_x = paddle.to_tensor(x) - y = F.pad2d(tensor_x, paddings=[1, 1, 1, 1], mode='reflect') + y = paddle.fluid.layers.pad2d(tensor_x, paddings=[1, 1, 1, 1], mode='reflect') print(y.numpy()) # [[[[5. 4. 5. 6. 5.] # [2. 1. 2. 3. 2.] @@ -9873,9 +9857,6 @@ def leaky_relu(x, alpha=0.02, name=None): def soft_relu(x, threshold=40.0, name=None): """ - :alias_main: paddle.nn.functional.soft_relu - :alias: paddle.nn.functional.soft_relu,paddle.nn.functional.activation.soft_relu - :old_api: paddle.fluid.layers.soft_relu SoftRelu Activation Operator. @@ -9895,7 +9876,10 @@ def soft_relu(x, threshold=40.0, name=None): import paddle.fluid as fluid import numpy as np + import numpy as np + import paddle + paddle.enable_static() inputs = fluid.layers.data(name="x", shape=[2, 2], dtype="float32") output = fluid.layers.soft_relu(inputs, threshold=20.0) @@ -11456,9 +11440,6 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): def elementwise_add(x, y, axis=-1, act=None, name=None): """ - :alias_main: paddle.elementwise_add - :alias: paddle.elementwise_add,paddle.tensor.elementwise_add,paddle.tensor.math.elementwise_add - :old_api: paddle.fluid.layers.elementwise_add Examples: @@ -11550,9 +11531,6 @@ Examples: @deprecated(since="2.0.0", update_to="paddle.divide") def elementwise_div(x, y, axis=-1, act=None, name=None): """ - :alias_main: paddle.elementwise_div - :alias: paddle.elementwise_div,paddle.tensor.elementwise_div,paddle.tensor.math.elementwise_div - :old_api: paddle.fluid.layers.elementwise_div Examples: @@ -11638,9 +11616,6 @@ Examples: def elementwise_sub(x, y, axis=-1, act=None, name=None): """ - :alias_main: paddle.elementwise_sub - :alias: paddle.elementwise_sub,paddle.tensor.elementwise_sub,paddle.tensor.math.elementwise_sub - :old_api: paddle.fluid.layers.elementwise_sub Examples: @@ -11727,9 +11702,6 @@ Examples: @deprecated(since="2.0.0", update_to="paddle.multiply") def elementwise_mul(x, y, axis=-1, act=None, name=None): """ - :alias_main: paddle.elementwise_mul - :alias: paddle.elementwise_mul,paddle.tensor.elementwise_mul,paddle.tensor.math.elementwise_mul - :old_api: paddle.fluid.layers.elementwise_mul Examples: @@ -11937,9 +11909,6 @@ Examples: def elementwise_pow(x, y, axis=-1, act=None, name=None): """ - :alias_main: paddle.elementwise_pow - :alias: paddle.elementwise_pow,paddle.tensor.elementwise_pow,paddle.tensor.math.elementwise_pow - :old_api: paddle.fluid.layers.elementwise_pow Examples: @@ -11974,9 +11943,6 @@ Examples: @deprecated(since="2.0.0", update_to="paddle.remainder") def elementwise_mod(x, y, axis=-1, act=None, name=None): """ - :alias_main: paddle.elementwise_mod - :alias: paddle.elementwise_mod,paddle.tensor.elementwise_mod,paddle.tensor.math.elementwise_mod - :old_api: paddle.fluid.layers.elementwise_mod Examples: @@ -12012,9 +11978,6 @@ Examples: @deprecated(since="2.0.0", update_to="paddle.floor_divide") def elementwise_floordiv(x, y, axis=-1, act=None, name=None): """ - :alias_main: paddle.elementwise_floordiv - :alias: paddle.elementwise_floordiv,paddle.tensor.elementwise_floordiv,paddle.tensor.math.elementwise_floordiv - :old_api: paddle.fluid.layers.elementwise_floordiv Examples: @@ -12515,6 +12478,8 @@ def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None): .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() dataX = fluid.layers.data(name="dataX", append_batch_size = False, shape=[2, 5], dtype="float32") dataY = fluid.layers.data(name="dataY", append_batch_size = False, shape=[5, 3], dtype="float32") output = fluid.layers.mul(dataX, dataY, @@ -12579,9 +12544,6 @@ def maxout(x, groups, name=None, axis=1): def space_to_depth(x, blocksize, name=None): """ - :alias_main: paddle.nn.functional.space_to_depth - :alias: paddle.nn.functional.space_to_depth,paddle.nn.functional.vision.space_to_depth - :old_api: paddle.fluid.layers.space_to_depth Gives a blocksize to space_to_depth the input LoDtensor with Layout: [batch, channel, height, width] @@ -12638,7 +12600,10 @@ def space_to_depth(x, blocksize, name=None): import paddle.fluid as fluid import numpy as np + import numpy as np + import paddle + paddle.enable_static() data = fluid.data( name='data', shape=[1, 4, 2, 2], dtype='float32') space_to_depthed = fluid.layers.space_to_depth( @@ -12690,9 +12655,6 @@ def affine_channel(x, name=None, act=None): """ - :alias_main: paddle.nn.functional.affine_channel - :alias: paddle.nn.functional.affine_channel,paddle.nn.functional.vision.affine_channel - :old_api: paddle.fluid.layers.affine_channel Applies a separate affine transformation to each channel of the input. Useful for replacing spatial batch norm with its equivalent fixed @@ -12726,7 +12688,10 @@ def affine_channel(x, import numpy as np import paddle.fluid as fluid + import paddle.fluid as fluid + import paddle + paddle.enable_static() use_gpu = False place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) @@ -12852,6 +12817,8 @@ def similarity_focus(input, axis, indexes, name=None): .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() data = fluid.data( name='data', shape=[-1, 3, 2, 2], dtype='float32') fluid.layers.similarity_focus(input=data, axis=1, indexes=[0]) @@ -12879,9 +12846,6 @@ def similarity_focus(input, axis, indexes, name=None): def hash(input, hash_size, num_hash=1, name=None): """ - :alias_main: paddle.nn.functional.hash - :alias: paddle.nn.functional.hash,paddle.nn.functional.lod.hash - :old_api: paddle.fluid.layers.hash This OP hash the input to an integer less than the hash_size. The hash algorithm we used was xxHash - Extremely fast hash algorithm @@ -12903,6 +12867,8 @@ def hash(input, hash_size, num_hash=1, name=None): import paddle.fluid as fluid import numpy as np + import paddle + paddle.enable_static() place = fluid.core.CPUPlace() @@ -12943,9 +12909,6 @@ def hash(input, hash_size, num_hash=1, name=None): @templatedoc() def grid_sampler(x, grid, name=None): """ - :alias_main: paddle.nn.functional.grid_sampler - :alias: paddle.nn.functional.grid_sampler,paddle.nn.functional.vision.grid_sampler - :old_api: paddle.fluid.layers.grid_sampler This operation samples input X by using bilinear interpolation based on flow field grid, which is usually generated by :code:`affine_grid` . The grid of @@ -13019,7 +12982,10 @@ def grid_sampler(x, grid, name=None): .. code-block:: python import paddle.fluid as fluid + import paddle.fluid as fluid + import paddle + paddle.enable_static() # use with affine_grid x = fluid.data(name='x', shape=[None, 10, 32, 32], dtype='float32') theta = fluid.layers.data(name='theta', shape=[2, 3], dtype='float32') @@ -13103,9 +13069,6 @@ def log_loss(input, label, epsilon=1e-4, name=None): def add_position_encoding(input, alpha, beta, name=None): """ - :alias_main: paddle.nn.functional.add_position_encoding - :alias: paddle.nn.functional.add_position_encoding,paddle.nn.functional.extension.add_position_encoding - :old_api: paddle.fluid.layers.add_position_encoding This operator performs weighted sum of input feature at each position (position in the sequence) and the corresponding position encoding. @@ -13146,10 +13109,9 @@ def add_position_encoding(input, alpha, beta, name=None): .. code-block:: python import paddle - import paddle.nn.functional as F tensor = paddle.randn([16, 32, 64]) - position_tensor = F.add_position_encoding( + position_tensor = paddle.fluid.layers.add_position_encoding( input=tensor, alpha=1.0, beta=1.0) """ @@ -13374,9 +13336,6 @@ def shuffle_channel(x, group, name=None): @templatedoc() def temporal_shift(x, seg_num, shift_ratio=0.25, name=None): """ - :alias_main: paddle.nn.functional.temporal_shift - :alias: paddle.nn.functional.temporal_shift,paddle.nn.functional.extension.temporal_shift - :old_api: paddle.fluid.layers.temporal_shift **Temporal Shift Operator** @@ -13404,7 +13363,7 @@ def temporal_shift(x, seg_num, shift_ratio=0.25, name=None): import paddle.nn.functional as F input = paddle.randn([6, 4, 2, 2]) - out = F.temporal_shift(x=input, seg_num=2, shift_ratio=0.2) + out = paddle.fluid.layers.temporal_shift(x=input, seg_num=2, shift_ratio=0.2) """ helper = LayerHelper("temporal_shift", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'temporal_shift') @@ -13739,9 +13698,6 @@ def psroi_pool(input, pooled_width, name=None): """ - :alias_main: paddle.nn.functional.psroi_pool - :alias: paddle.nn.functional.psroi_pool,paddle.nn.functional.vision.psroi_pool - :old_api: paddle.fluid.layers.psroi_pool ${comment} @@ -13770,6 +13726,8 @@ def psroi_pool(input, .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() x = fluid.data(name='x', shape=[100, 490, 28, 28], dtype='float32') rois = fluid.data(name='rois', shape=[None, 4], lod_level=1, dtype='float32') pool_out = fluid.layers.psroi_pool(x, rois, 10, 1.0, 7, 7) @@ -13809,9 +13767,6 @@ def prroi_pool(input, batch_roi_nums=None, name=None): """ - :alias_main: paddle.nn.functional.prroi_pool - :alias: paddle.nn.functional.prroi_pool,paddle.nn.functional.vision.prroi_pool - :old_api: paddle.fluid.layers.prroi_pool The precise roi pooling implementation for paddle. Reference: https://arxiv.org/pdf/1807.11590.pdf @@ -14601,9 +14556,6 @@ def deformable_roi_pooling(input, position_sensitive=False, name=None): """ - :alias_main: paddle.nn.functional.deformable_roi_pooling - :alias: paddle.nn.functional.deformable_roi_pooling,paddle.nn.functional.vision.deformable_roi_pooling - :old_api: paddle.fluid.layers.deformable_roi_pooling Deformable ROI Pooling Layer diff --git a/python/paddle/fluid/layers/rnn.py b/python/paddle/fluid/layers/rnn.py index 57c2489194..84d567330a 100644 --- a/python/paddle/fluid/layers/rnn.py +++ b/python/paddle/fluid/layers/rnn.py @@ -488,7 +488,7 @@ def rnn(cell, inputs = paddle.rand((4, 23, 16)) prev_h = paddle.randn((4, 32)) - outputs, final_states = paddle.nn.functional.rnn(cell, inputs, prev_h) + outputs, final_states = paddle.fluid.layers.rnn(cell, inputs, prev_h) """ if in_dygraph_mode(): @@ -711,7 +711,7 @@ def birnn(cell_fw, hf, cf = paddle.rand((4, 32)), paddle.rand((4, 32)) hb, cb = paddle.rand((4, 32)), paddle.rand((4, 32)) initial_states = ((hf, cf), (hb, cb)) - outputs, final_states = paddle.nn.functional.birnn( + outputs, final_states = paddle.fluid.layers.birnn( cell_fw, cell_bw, inputs, initial_states) """ @@ -3046,9 +3046,6 @@ def beam_search(pre_ids, name=None, return_parent_idx=False): """ - :alias_main: paddle.nn.beam_search - :alias: paddle.nn.beam_search,paddle.nn.decode.beam_search - :old_api: paddle.fluid.layers.beam_search Beam search is a classical algorithm for selecting candidate words in a machine translation task. @@ -3126,6 +3123,8 @@ def beam_search(pre_ids, .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() # Suppose `probs` contains predicted results from the computation # cell and `pre_ids` and `pre_scores` is the output of beam_search @@ -3197,9 +3196,6 @@ def beam_search(pre_ids, def beam_search_decode(ids, scores, beam_size, end_id, name=None): """ - :alias_main: paddle.nn.beam_search_decode - :alias: paddle.nn.beam_search_decode,paddle.nn.decode.beam_search_decode - :old_api: paddle.fluid.layers.beam_search_decode This operator is used after beam search has completed. It constructs the full predicted sequences for each sample by walking back along the search @@ -3246,7 +3242,8 @@ def beam_search_decode(ids, scores, beam_size, end_id, name=None): .. code-block:: python import paddle.fluid as fluid - + import paddle + paddle.enable_static() # Suppose `ids` and `scores` are LodTensorArray variables reserving # the selected ids and scores of all steps ids = fluid.layers.create_array(dtype='int64') diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 931408199c..fe3970ce1c 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -605,8 +605,6 @@ def assign(input, output=None): def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None): """ - :alias_main: paddle.fill_constant - :alias: paddle.tensor.fill_constant, paddle.tensor.creation.fill_constant This OP creates a Tensor with specified `shape` and `dtype`, and initializes it with a constant specified by `value`. @@ -715,7 +713,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None): return out -@deprecated(since='1.8.0', update_to="paddle.fill_constant") +@deprecated(since='1.8.0', update_to="paddle.fluid.layers.fill_constant") @templatedoc() def fill_constant_batch_size_like(input, shape, @@ -1228,7 +1226,7 @@ def has_inf(x): import paddle data = paddle.randn(shape=[4, 32, 32], dtype="float32") - res = paddle.has_inf(data) + res = paddle.fluid.layers.has_inf(data) # [False] """ @@ -1257,7 +1255,7 @@ def has_nan(x): import paddle data = paddle.randn(shape=[2,3], dtype="float32") - res = paddle.has_nan(data) + res = paddle.fluid.layers.has_nan(data) # [False] """ diff --git a/python/paddle/fluid/metrics.py b/python/paddle/fluid/metrics.py index cab3daa29a..0c3f6e1673 100644 --- a/python/paddle/fluid/metrics.py +++ b/python/paddle/fluid/metrics.py @@ -851,6 +851,9 @@ class DetectionMAP(object): import paddle.fluid as fluid + import paddle + paddle.enable_static() + batch_size = None # can be any size image_boxs_num = 10 bounding_bboxes_num = 21 diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py index 66efb1cdf4..347f3f0d79 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py @@ -105,7 +105,7 @@ class ReduceMeanLayer(object): """ operation """ - mean = paddle.reduce_mean(input) + mean = paddle.fluid.layers.reduce_mean(input) return mean @@ -181,7 +181,7 @@ class ElementwiseSubLayer(object): """ operation """ - sub = paddle.elementwise_sub(x, y) + sub = paddle.fluid.layers.elementwise_sub(x, y) return sub @@ -203,7 +203,7 @@ class ConstantLayer(object): shape = list(shape) input_shape = paddle.shape(input) shape[0] = input_shape[0] - constant = paddle.fill_constant(shape, dtype, value) + constant = paddle.fluid.layers.fill_constant(shape, dtype, value) return constant @@ -473,8 +473,8 @@ class BOW(paddle.nn.Layer): right_emb = paddle.reshape( right_emb, shape=[-1, self.seq_len, self.bow_dim]) - bow_left = paddle.reduce_sum(left_emb, dim=1) - bow_right = paddle.reduce_sum(right_emb, dim=1) + bow_left = paddle.fluid.layers.reduce_sum(left_emb, dim=1) + bow_right = paddle.fluid.layers.reduce_sum(right_emb, dim=1) softsign_layer = SoftsignLayer() left_soft = softsign_layer.ops(bow_left) right_soft = softsign_layer.ops(bow_right) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_program_translator.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_program_translator.py index b308854dc0..00b2d8dd1a 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_program_translator.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_program_translator.py @@ -64,9 +64,9 @@ def get_source_code(func): class StaticCode1(): # TODO: Transform return statement def dyfunc_with_if_else(x_v, label=None): - __return_1 = paddle.fill_constant(shape=[1], dtype='bool', value=False) - __return_0 = paddle.fill_constant(shape=[1], dtype='bool', value=False) - __return_value_init_0 = paddle.fill_constant( + __return_1 = paddle.fluid.layers.fill_constant(shape=[1], dtype='bool', value=False) + __return_0 = paddle.fluid.layers.fill_constant(shape=[1], dtype='bool', value=False) + __return_value_init_0 = paddle.fluid.layers.fill_constant( shape=[1], dtype='float64', value=0.0) __return_value_0 = __return_value_init_0 @@ -84,7 +84,7 @@ class StaticCode1(): def true_fn_1(__return_0, __return_value_0, label, x_v): loss = fluid.layers.cross_entropy(x_v, label) - __return_0 = paddle.fill_constant( + __return_0 = paddle.fluid.layers.fill_constant( shape=[1], dtype='bool', value=True) __return_value_0 = loss return __return_0, __return_value_0 @@ -98,7 +98,7 @@ class StaticCode1(): (__return_0, __return_value_0), (__return_0, __return_value_0))) def true_fn_2(__return_1, __return_value_0, x_v): - __return_1 = paddle.fill_constant( + __return_1 = paddle.fluid.layers.fill_constant( shape=[1], dtype='bool', value=True) __return_value_0 = x_v return __return_1, __return_value_0 @@ -116,9 +116,9 @@ class StaticCode1(): class StaticCode2(): # TODO: Transform return statement def dyfunc_with_if_else(x_v, label=None): - __return_3 = paddle.fill_constant(shape=[1], dtype='bool', value=False) - __return_2 = paddle.fill_constant(shape=[1], dtype='bool', value=False) - __return_value_init_1 = paddle.fill_constant( + __return_3 = paddle.fluid.layers.fill_constant(shape=[1], dtype='bool', value=False) + __return_2 = paddle.fluid.layers.fill_constant(shape=[1], dtype='bool', value=False) + __return_value_init_1 = paddle.fluid.layers.fill_constant( shape=[1], dtype='float64', value=0.0) __return_value_1 = __return_value_init_1 @@ -136,7 +136,7 @@ class StaticCode2(): def true_fn_4(__return_2, __return_value_1, label, x_v): loss = fluid.layers.cross_entropy(x_v, label) - __return_2 = paddle.fill_constant( + __return_2 = paddle.fluid.layers.fill_constant( shape=[1], dtype='bool', value=True) __return_value_1 = loss return __return_2, __return_value_1 @@ -150,7 +150,7 @@ class StaticCode2(): (__return_2, __return_value_1), (__return_2, __return_value_1))) def true_fn_5(__return_3, __return_value_1, x_v): - __return_3 = paddle.fill_constant( + __return_3 = paddle.fluid.layers.fill_constant( shape=[1], dtype='bool', value=True) __return_value_1 = x_v return __return_3, __return_value_1 diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm_v2.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm_v2.py index bdd5131db9..ef5a5878e0 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm_v2.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm_v2.py @@ -187,8 +187,8 @@ class PtbModel(paddle.nn.Layer): loss = paddle.nn.functional.softmax_with_cross_entropy( logits=projection, label=label, soft_label=False) loss = paddle.reshape(loss, shape=[-1, self.num_steps]) - loss = paddle.reduce_mean(loss, dim=[0]) - loss = paddle.reduce_sum(loss) + loss = paddle.fluid.layers.reduce_mean(loss, dim=[0]) + loss = paddle.fluid.layers.reduce_sum(loss) return loss, last_hidden, last_cell diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet_v2.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet_v2.py index a8cfeb90bd..88c55f1907 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet_v2.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet_v2.py @@ -153,7 +153,7 @@ class ResNet(paddle.nn.Layer): self.conv = ConvBNLayer( num_channels=3, num_filters=64, filter_size=7, stride=2, act='relu') - self.pool2d_max = paddle.nn.Pool2D( + self.pool2d_max = paddle.fluid.dygraph.Pool2D( pool_size=3, pool_stride=2, pool_padding=1, pool_type='max') self.bottleneck_block_list = [] @@ -171,7 +171,7 @@ class ResNet(paddle.nn.Layer): self.bottleneck_block_list.append(bottleneck_block) shortcut = True - self.pool2d_avg = paddle.nn.Pool2D( + self.pool2d_avg = paddle.fluid.dygraph.Pool2D( pool_size=7, pool_type='avg', global_pooling=True) self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 4 * 1 * 1 diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_variable_trans_func.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_variable_trans_func.py index e79209cb53..403b8f56a1 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_variable_trans_func.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_variable_trans_func.py @@ -51,24 +51,24 @@ class TestDataLayerNotCheck(unittest.TestCase): class TestVariableTransFunc(unittest.TestCase): def test_create_fill_constant_node(self): node = create_fill_constant_node("a", 1.0) - source = "a = paddle.fill_constant(shape=[1], dtype='float64', value=1.0)" + source = "a = paddle.fluid.layers.fill_constant(shape=[1], dtype='float64', value=1.0)" self.assertEqual(ast_to_source_code(node).strip(), source) node = create_fill_constant_node("b", True) - source = "b = paddle.fill_constant(shape=[1], dtype='bool', value=True)" + source = "b = paddle.fluid.layers.fill_constant(shape=[1], dtype='bool', value=True)" self.assertEqual(ast_to_source_code(node).strip(), source) if six.PY2: node = create_fill_constant_node("c", 214) - source = "c = paddle.fill_constant(shape=[1], dtype='int32', value=214)" + source = "c = paddle.fluid.layers.fill_constant(shape=[1], dtype='int32', value=214)" self.assertEqual(ast_to_source_code(node).strip(), source) node = create_fill_constant_node("d", long(10086)) - source = "d = paddle.fill_constant(shape=[1], dtype='int64', value=10086)" + source = "d = paddle.fluid.layers.fill_constant(shape=[1], dtype='int64', value=10086)" self.assertEqual(ast_to_source_code(node).strip(), source) else: node = create_fill_constant_node("c", 4293) - source = "c = paddle.fill_constant(shape=[1], dtype='int64', value=4293)" + source = "c = paddle.fluid.layers.fill_constant(shape=[1], dtype='int64', value=4293)" self.assertEqual(ast_to_source_code(node).strip(), source) self.assertIsNone(create_fill_constant_node("e", None)) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_squared_mat_sub_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_squared_mat_sub_fuse_pass.py index 5fa242df4e..95cff4de6f 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_squared_mat_sub_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_squared_mat_sub_fuse_pass.py @@ -40,9 +40,9 @@ class SquaredMatSubFusePassTest(InferencePassTest): matmul_ab_square = paddle.square(matmul_ab) matmul_square_ab = paddle.matmul(data_a_square, data_b_square) - scale = paddle.fill_constant(shape=[1], value=0.5, dtype='float32') + scale = paddle.fluid.layers.fill_constant(shape=[1], value=0.5, dtype='float32') - sub_val = paddle.elementwise_sub(matmul_ab_square, matmul_square_ab) + sub_val = paddle.fluid.layers.elementwise_sub(matmul_ab_square, matmul_square_ab) squared_mat_sub_out = fluid.layers.elementwise_mul(sub_val, scale) self.feeds = { diff --git a/python/paddle/fluid/tests/unittests/parallel_dygraph_sync_batch_norm.py b/python/paddle/fluid/tests/unittests/parallel_dygraph_sync_batch_norm.py index 1320623f8f..b7ef54a5c2 100644 --- a/python/paddle/fluid/tests/unittests/parallel_dygraph_sync_batch_norm.py +++ b/python/paddle/fluid/tests/unittests/parallel_dygraph_sync_batch_norm.py @@ -26,7 +26,7 @@ import paddle.fluid as fluid import paddle.fluid.dygraph as dygraph from paddle.fluid import core from paddle.fluid.optimizer import SGDOptimizer -from paddle.nn import Conv2d, Pool2D, Linear, SyncBatchNorm +from paddle.nn import Conv2d, Linear, SyncBatchNorm from paddle.fluid.dygraph.base import to_variable from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase diff --git a/python/paddle/fluid/tests/unittests/rnn/test_rnn_cells_static.py b/python/paddle/fluid/tests/unittests/rnn/test_rnn_cells_static.py index 41c252c2aa..bb15b27134 100644 --- a/python/paddle/fluid/tests/unittests/rnn/test_rnn_cells_static.py +++ b/python/paddle/fluid/tests/unittests/rnn/test_rnn_cells_static.py @@ -70,10 +70,10 @@ class TestSimpleRNNCell(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.data( + x_data = paddle.fluid.data( "input", [-1, 16], dtype=paddle.framework.get_default_dtype()) - init_h = paddle.data( + init_h = paddle.fluid.data( "init_h", [-1, 32], dtype=paddle.framework.get_default_dtype()) y, h = rnn2(x_data, init_h) @@ -98,7 +98,7 @@ class TestSimpleRNNCell(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.data( + x_data = paddle.fluid.data( "input", [-1, 16], dtype=paddle.framework.get_default_dtype()) y, h = rnn2(x_data) @@ -166,10 +166,10 @@ class TestGRUCell(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.data( + x_data = paddle.fluid.data( "input", [-1, 16], dtype=paddle.framework.get_default_dtype()) - init_h = paddle.data( + init_h = paddle.fluid.data( "init_h", [-1, 32], dtype=paddle.framework.get_default_dtype()) y, h = rnn2(x_data, init_h) @@ -194,7 +194,7 @@ class TestGRUCell(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.data( + x_data = paddle.fluid.data( "input", [-1, 16], dtype=paddle.framework.get_default_dtype()) y, h = rnn2(x_data) @@ -263,13 +263,13 @@ class TestLSTMCell(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.data( + x_data = paddle.fluid.data( "input", [-1, 16], dtype=paddle.framework.get_default_dtype()) - init_h = paddle.data( + init_h = paddle.fluid.data( "init_h", [-1, 32], dtype=paddle.framework.get_default_dtype()) - init_c = paddle.data( + init_c = paddle.fluid.data( "init_c", [-1, 32], dtype=paddle.framework.get_default_dtype()) y, (h, c) = rnn2(x_data, (init_h, init_c)) @@ -295,7 +295,7 @@ class TestLSTMCell(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.data( + x_data = paddle.fluid.data( "input", [-1, 16], dtype=paddle.framework.get_default_dtype()) y, (h, c) = rnn2(x_data) diff --git a/python/paddle/fluid/tests/unittests/rnn/test_rnn_nets_static.py b/python/paddle/fluid/tests/unittests/rnn/test_rnn_nets_static.py index 71a0b5b7bc..f113189b61 100644 --- a/python/paddle/fluid/tests/unittests/rnn/test_rnn_nets_static.py +++ b/python/paddle/fluid/tests/unittests/rnn/test_rnn_nets_static.py @@ -81,10 +81,10 @@ class TestSimpleRNN(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.data( + x_data = paddle.fluid.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype()) - init_h = paddle.data( + init_h = paddle.fluid.data( "init_h", [2 * self.num_directions, -1, 32], dtype=paddle.framework.get_default_dtype()) y, h = rnn2(x_data, init_h) @@ -112,7 +112,7 @@ class TestSimpleRNN(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.data( + x_data = paddle.fluid.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype()) y, h = rnn2(x_data) @@ -142,10 +142,10 @@ class TestSimpleRNN(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.data( + x_data = paddle.fluid.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype()) - seq_len = paddle.data("seq_len", [-1], dtype="int64") + seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64") mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype()) if self.time_major: mask = paddle.transpose(mask, [1, 0]) @@ -226,10 +226,10 @@ class TestGRU(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.data( + x_data = paddle.fluid.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype()) - init_h = paddle.data( + init_h = paddle.fluid.data( "init_h", [2 * self.num_directions, -1, 32], dtype=paddle.framework.get_default_dtype()) y, h = rnn2(x_data, init_h) @@ -257,7 +257,7 @@ class TestGRU(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.data( + x_data = paddle.fluid.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype()) y, h = rnn2(x_data) @@ -287,10 +287,10 @@ class TestGRU(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.data( + x_data = paddle.fluid.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype()) - seq_len = paddle.data("seq_len", [-1], dtype="int64") + seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64") mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype()) if self.time_major: mask = paddle.transpose(mask, [1, 0]) @@ -368,13 +368,13 @@ class TestLSTM(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.data( + x_data = paddle.fluid.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype()) - init_h = paddle.data( + init_h = paddle.fluid.data( "init_h", [2 * self.num_directions, -1, 32], dtype=paddle.framework.get_default_dtype()) - init_c = paddle.data( + init_c = paddle.fluid.data( "init_c", [2 * self.num_directions, -1, 32], dtype=paddle.framework.get_default_dtype()) y, (h, c) = rnn2(x_data, (init_h, init_c)) @@ -403,7 +403,7 @@ class TestLSTM(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.data( + x_data = paddle.fluid.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype()) y, (h, c) = rnn2(x_data) @@ -434,10 +434,10 @@ class TestLSTM(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.data( + x_data = paddle.fluid.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype()) - seq_len = paddle.data("seq_len", [-1], dtype="int64") + seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64") mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype()) if self.time_major: mask = paddle.transpose(mask, [1, 0]) diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index 4fed0c8552..8d9056f0ee 100755 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -143,7 +143,7 @@ class TestLogSigmoidAPI(unittest.TestCase): def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', [11, 17]) + x = paddle.fluid.data('X', [11, 17]) out1 = F.log_sigmoid(x) m = paddle.nn.LogSigmoid() out2 = m(x) @@ -167,7 +167,7 @@ class TestLogSigmoidAPI(unittest.TestCase): def test_fluid_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', [11, 17]) + x = paddle.fluid.data('X', [11, 17]) out = paddle.fluid.layers.logsigmoid(x) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) @@ -180,10 +180,10 @@ class TestLogSigmoidAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.log_sigmoid, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.data(name='x_int32', shape=[11, 17], dtype='int32') + x_int32 = paddle.fluid.data(name='x_int32', shape=[11, 17], dtype='int32') self.assertRaises(TypeError, F.log_sigmoid, x_int32) # support the input dtype is float16 - x_fp16 = paddle.data(name='x_fp16', shape=[11, 17], dtype='float16') + x_fp16 = paddle.fluid.data(name='x_fp16', shape=[11, 17], dtype='float16') F.log_sigmoid(x_fp16) @@ -222,7 +222,7 @@ class TestTanhAPI(unittest.TestCase): def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', [10, 12], self.dtype) + x = paddle.fluid.data('X', [10, 12], self.dtype) out1 = F.tanh(x) th = paddle.nn.Tanh() out2 = th(x) @@ -260,10 +260,10 @@ class TestTanhAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.tanh, 1) # The input dtype must be float16, float32. - x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') + x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32') self.assertRaises(TypeError, F.tanh, x_int32) # support the input dtype is float16 - x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') + x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16') F.tanh(x_fp16) @@ -482,7 +482,7 @@ class TestTanhshrinkAPI(unittest.TestCase): def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.tanhshrink(x) tanhshrink = paddle.nn.Tanhshrink() out2 = tanhshrink(x) @@ -519,10 +519,10 @@ class TestTanhshrinkAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.tanhshrink, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') + x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32') self.assertRaises(TypeError, F.tanhshrink, x_int32) # support the input dtype is float16 - x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') + x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16') F.tanhshrink(x_fp16) @@ -572,7 +572,7 @@ class TestHardShrinkAPI(unittest.TestCase): def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', [10, 12]) + x = paddle.fluid.data('X', [10, 12]) out1 = F.hardshrink(x) hd = paddle.nn.Hardshrink() out2 = hd(x) @@ -616,10 +616,10 @@ class TestHardShrinkAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.hardshrink, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') + x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32') self.assertRaises(TypeError, F.hardshrink, x_int32) # support the input dtype is float16 - x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') + x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16') F.hardshrink(x_fp16) @@ -642,7 +642,7 @@ class TestHardtanhAPI(unittest.TestCase): def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', [10, 12]) + x = paddle.fluid.data('X', [10, 12]) out1 = F.hardtanh(x) m = paddle.nn.Hardtanh() out2 = m(x) @@ -676,10 +676,10 @@ class TestHardtanhAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.hardtanh, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') + x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32') self.assertRaises(TypeError, F.hardtanh, x_int32) # support the input dtype is float16 - x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') + x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16') F.hardtanh(x_fp16) @@ -722,7 +722,7 @@ class TestSoftshrinkAPI(unittest.TestCase): def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.softshrink(x, self.threshold) softshrink = paddle.nn.Softshrink(self.threshold) out2 = softshrink(x) @@ -759,13 +759,13 @@ class TestSoftshrinkAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.softshrink, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') + x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32') self.assertRaises(TypeError, F.softshrink, x_int32) # The threshold must be no less than zero - x_fp32 = paddle.data(name='x_fp32', shape=[12, 10], dtype='float32') + x_fp32 = paddle.fluid.data(name='x_fp32', shape=[12, 10], dtype='float32') self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0) # support the input dtype is float16 - x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') + x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16') F.softshrink(x_fp16) @@ -983,7 +983,7 @@ class TestReluAPI(unittest.TestCase): def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', [10, 12]) + x = paddle.fluid.data('X', [10, 12]) out1 = F.relu(x) m = paddle.nn.ReLU() out2 = m(x) @@ -1010,10 +1010,10 @@ class TestReluAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.relu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.data(name='x_int32', shape=[10, 12], dtype='int32') + x_int32 = paddle.fluid.data(name='x_int32', shape=[10, 12], dtype='int32') self.assertRaises(TypeError, F.relu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.data(name='x_fp16', shape=[10, 12], dtype='float16') + x_fp16 = paddle.fluid.data(name='x_fp16', shape=[10, 12], dtype='float16') F.relu(x_fp16) @@ -1075,7 +1075,7 @@ class TestLeakyReluAPI(unittest.TestCase): def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', [10, 12]) + x = paddle.fluid.data('X', [10, 12]) out1 = F.leaky_relu(x) m = paddle.nn.LeakyReLU() out2 = m(x) @@ -1119,10 +1119,10 @@ class TestLeakyReluAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.leaky_relu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') + x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32') self.assertRaises(TypeError, F.leaky_relu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') + x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16') F.leaky_relu(x_fp16) @@ -1184,7 +1184,7 @@ class TestGELUAPI(unittest.TestCase): def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', [11, 17]) + x = paddle.fluid.data('X', [11, 17]) out1 = F.gelu(x) m = paddle.nn.GELU() out2 = m(x) @@ -1218,10 +1218,10 @@ class TestGELUAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.gelu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.data(name='x_int32', shape=[11, 17], dtype='int32') + x_int32 = paddle.fluid.data(name='x_int32', shape=[11, 17], dtype='int32') self.assertRaises(TypeError, F.gelu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.data(name='x_fp16', shape=[11, 17], dtype='float16') + x_fp16 = paddle.fluid.data(name='x_fp16', shape=[11, 17], dtype='float16') F.gelu(x_fp16) @@ -1331,7 +1331,7 @@ class TestRelu6API(unittest.TestCase): def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.relu6(x) relu6 = paddle.nn.ReLU6() out2 = relu6(x) @@ -1368,10 +1368,10 @@ class TestRelu6API(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.relu6, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') + x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32') self.assertRaises(TypeError, F.relu6, x_int32) # support the input dtype is float16 - x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') + x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16') F.relu6(x_fp16) @@ -1414,7 +1414,7 @@ class TestHardswishAPI(unittest.TestCase): def test_static_api(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.hardswish(x) m = paddle.nn.Hardswish() out2 = m(x) @@ -1455,10 +1455,10 @@ class TestHardswishAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.hardswish, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') + x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32') self.assertRaises(TypeError, F.hardswish, x_int32) # support the input dtype is float16 - x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') + x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16') F.hardswish(x_fp16) @@ -1538,7 +1538,7 @@ class TestELUAPI(unittest.TestCase): def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', [10, 12]) + x = paddle.fluid.data('X', [10, 12]) out1 = F.elu(x) m = paddle.nn.ELU() out2 = m(x) @@ -1572,10 +1572,10 @@ class TestELUAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.elu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.data(name='x_int32', shape=[10, 12], dtype='int32') + x_int32 = paddle.fluid.data(name='x_int32', shape=[10, 12], dtype='int32') self.assertRaises(TypeError, F.elu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.data(name='x_fp16', shape=[10, 12], dtype='float16') + x_fp16 = paddle.fluid.data(name='x_fp16', shape=[10, 12], dtype='float16') F.elu(x_fp16) @@ -1858,7 +1858,7 @@ class TestSoftplusAPI(unittest.TestCase): def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.softplus(x, self.beta, self.threshold) softplus = paddle.nn.Softplus(self.beta, self.threshold) out2 = softplus(x) @@ -1895,10 +1895,10 @@ class TestSoftplusAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.softplus, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') + x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32') self.assertRaises(TypeError, F.softplus, x_int32) # support the input dtype is float16 - x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') + x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16') F.softplus(x_fp16) @@ -1935,7 +1935,7 @@ class TestSoftsignAPI(unittest.TestCase): def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.softsign(x) softsign = paddle.nn.Softsign() out2 = softsign(x) @@ -1972,10 +1972,10 @@ class TestSoftsignAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.softsign, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') + x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32') self.assertRaises(TypeError, F.softsign, x_int32) # support the input dtype is float16 - x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') + x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16') F.softsign(x_fp16) @@ -2018,7 +2018,7 @@ class TestThresholdedReluAPI(unittest.TestCase): def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.thresholded_relu(x, self.threshold) thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold) out2 = thresholded_relu(x) @@ -2055,10 +2055,10 @@ class TestThresholdedReluAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.thresholded_relu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') + x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32') self.assertRaises(TypeError, F.thresholded_relu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') + x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16') F.thresholded_relu(x_fp16) @@ -2113,7 +2113,7 @@ class TestHardsigmoidAPI(unittest.TestCase): def test_static_api(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.hardsigmoid(x) m = paddle.nn.Hardsigmoid() out2 = m(x) @@ -2154,10 +2154,10 @@ class TestHardsigmoidAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.hardsigmoid, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') + x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32') self.assertRaises(TypeError, F.hardsigmoid, x_int32) # support the input dtype is float16 - x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') + x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16') F.hardsigmoid(x_fp16) @@ -2195,7 +2195,7 @@ class TestSwishAPI(unittest.TestCase): def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.swish(x) swish = paddle.nn.Swish() out2 = swish(x) @@ -2232,10 +2232,10 @@ class TestSwishAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.swish, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') + x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32') self.assertRaises(TypeError, F.swish, x_int32) # support the input dtype is float16 - x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') + x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16') F.swish(x_fp16) diff --git a/python/paddle/fluid/tests/unittests/test_adam_op.py b/python/paddle/fluid/tests/unittests/test_adam_op.py index 5167922ccc..f337e0079e 100644 --- a/python/paddle/fluid/tests/unittests/test_adam_op.py +++ b/python/paddle/fluid/tests/unittests/test_adam_op.py @@ -499,7 +499,7 @@ class TestAdamOpV2(unittest.TestCase): cur_lr = adam.get_lr() assert (lr == cur_lr) with self.assertRaises(TypeError): - lr_var = paddle.create_global_var( + lr_var = paddle.fluid.layers.create_global_var( shape=[1], value=lr, dtype='float32') adam.set_lr(lr_var) diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool2d.py b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool2d.py index b8c5bd2949..25692808d0 100644 --- a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool2d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool2d.py @@ -110,7 +110,7 @@ class TestAdaptiveAvgPool2dAPI(unittest.TestCase): if core.is_compiled_with_cuda() else [False]): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.data(name="x", shape=[2, 3, 7, 7], dtype="float32") + x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32") out_1 = paddle.nn.functional.adaptive_avg_pool2d( x=x, output_size=[3, 3]) @@ -205,7 +205,7 @@ class TestAdaptiveAvgPool2dClassAPI(unittest.TestCase): if core.is_compiled_with_cuda() else [False]): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.data(name="x", shape=[2, 3, 7, 7], dtype="float32") + x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32") adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=[3, 3]) out_1 = adaptive_avg_pool(x=x) diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool3d.py b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool3d.py index bb36aaebf0..ce85f6bf9f 100755 --- a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool3d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool3d.py @@ -125,7 +125,7 @@ class TestAdaptiveAvgPool3dAPI(unittest.TestCase): if core.is_compiled_with_cuda() else [False]): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32") + x = paddle.fluid.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32") out_1 = paddle.nn.functional.adaptive_avg_pool3d( x=x, output_size=[3, 3, 3]) @@ -220,7 +220,7 @@ class TestAdaptiveAvgPool3dClassAPI(unittest.TestCase): if core.is_compiled_with_cuda() else [False]): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32") + x = paddle.fluid.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32") adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d( output_size=[3, 3, 3]) diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py index dfa6f3226c..14de5aa53a 100644 --- a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py @@ -110,7 +110,7 @@ class TestAdaptiveMaxPool2dAPI(unittest.TestCase): if core.is_compiled_with_cuda() else [False]): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.data(name="x", shape=[2, 3, 7, 7], dtype="float32") + x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32") out_1 = paddle.nn.functional.adaptive_max_pool2d( x=x, output_size=[3, 3]) @@ -200,7 +200,7 @@ class TestAdaptiveMaxPool2dClassAPI(unittest.TestCase): if core.is_compiled_with_cuda() else [False]): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.data(name="x", shape=[2, 3, 7, 7], dtype="float32") + x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32") adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d(output_size=[3, 3]) out_1 = adaptive_max_pool(x=x) diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py index 1fa703688c..0aa97bdf1c 100755 --- a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py @@ -125,7 +125,7 @@ class TestAdaptiveMaxPool3dAPI(unittest.TestCase): if core.is_compiled_with_cuda() else [False]): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32") + x = paddle.fluid.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32") out_1 = paddle.nn.functional.adaptive_max_pool3d( x=x, output_size=[3, 3, 3]) @@ -215,7 +215,7 @@ class TestAdaptiveMaxPool3dClassAPI(unittest.TestCase): if core.is_compiled_with_cuda() else [False]): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32") + x = paddle.fluid.data(name="x", shape=[2, 3, 5, 7, 7], dtype="float32") adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d( output_size=[3, 3, 3]) diff --git a/python/paddle/fluid/tests/unittests/test_add_position_encoding_op.py b/python/paddle/fluid/tests/unittests/test_add_position_encoding_op.py index 4dc1ed99df..5424a1447b 100644 --- a/python/paddle/fluid/tests/unittests/test_add_position_encoding_op.py +++ b/python/paddle/fluid/tests/unittests/test_add_position_encoding_op.py @@ -18,7 +18,6 @@ import paddle.fluid.core as core from op_test import OpTest import paddle.fluid as fluid import paddle -import paddle.nn.functional as F from paddle.fluid import Program, program_guard @@ -157,7 +156,7 @@ class TestAddPositionEncodingOpDygraph(unittest.TestCase): def test_dygraph(self): paddle.disable_static() tensor = np.random.randn(16, 32, 64) - position_tensor = F.add_position_encoding( + position_tensor = paddle.fluid.layers.add_position_encoding( input=paddle.to_tensor(tensor), alpha=1.0, beta=1.0).numpy() paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_addcmul.py b/python/paddle/fluid/tests/unittests/test_addcmul.py index 6657ebe77a..ed466cda38 100644 --- a/python/paddle/fluid/tests/unittests/test_addcmul.py +++ b/python/paddle/fluid/tests/unittests/test_addcmul.py @@ -37,7 +37,7 @@ class TestAddcmulLayer(unittest.TestCase): tensor1 = fluid.data(name="tensor1", dtype=self._dtype, shape=[100]) tensor2 = fluid.data( name="tensor2", dtype=self._dtype, shape=[3, 100]) - out = paddle.addcmul(input, tensor1, tensor2, value) + out = paddle.tensor.math.addcmul(input, tensor1, tensor2, value) exe = fluid.Executor(self._place) return exe.run(feed={ @@ -53,7 +53,7 @@ class TestAddcmulLayer(unittest.TestCase): input = fluid.dygraph.to_variable(self.input) tensor1 = fluid.dygraph.to_variable(self.tensor1) tensor2 = fluid.dygraph.to_variable(self.tensor2) - out = paddle.addcmul(input, tensor1, tensor2, value) + out = paddle.tensor.math.addcmul(input, tensor1, tensor2, value) return out.numpy() def numpy(self, value=1.0): @@ -85,7 +85,7 @@ class TestAddcmul(unittest.TestCase): tensor1 = fluid.data(name='t1', shape=data_shape, dtype='float32') tensor2 = fluid.data(name='t2', shape=data_shape, dtype='float32') - out = paddle.addcmul(input, tensor1, tensor2) + out = paddle.tensor.math.addcmul(input, tensor1, tensor2) self.assertEqual(out.shape, input.shape) def test_addcmul_with_broadcast0(self): @@ -95,7 +95,7 @@ class TestAddcmul(unittest.TestCase): tensor1 = fluid.data(name='t1', shape=[3, 100], dtype='float32') tensor2 = fluid.data(name='t2', shape=[100], dtype='float32') - out = paddle.addcmul(input, tensor1, tensor2) + out = paddle.tensor.math.addcmul(input, tensor1, tensor2) self.assertEqual(out.shape, input.shape) def test_addcmul_with_broadcast1(self): @@ -105,7 +105,7 @@ class TestAddcmul(unittest.TestCase): tensor1 = fluid.data(name='t1', shape=[100], dtype='float32') tensor2 = fluid.data(name='t2', shape=[4, 100], dtype='float32') - out = paddle.addcmul(input, tensor1, tensor2) + out = paddle.tensor.math.addcmul(input, tensor1, tensor2) self.assertEqual(out.shape, input.shape) def test_addcmul_with_broadcast2(self): @@ -115,7 +115,7 @@ class TestAddcmul(unittest.TestCase): tensor1 = fluid.data(name='t1', shape=[100], dtype='float32') tensor2 = fluid.data(name='t2', shape=[100], dtype='float32') - out = paddle.addcmul(input, tensor1, tensor2) + out = paddle.tensor.math.addcmul(input, tensor1, tensor2) self.assertEqual(out.shape, input.shape) @@ -129,7 +129,7 @@ class InvalidInputTest(unittest.TestCase): name='tensor1', shape=[20, 20], dtype='float32') tensor2 = fluid.data( name='tensor2', shape=[20, 20], dtype='float32') - out = paddle.addcmul(input, tensor1, tensor2) + out = paddle.tensor.math.addcmul(input, tensor1, tensor2) self.assertRaises(TypeError, test_invalid_input) @@ -141,7 +141,7 @@ class InvalidInputTest(unittest.TestCase): tensor1 = [20, 20] tensor2 = fluid.data( name='tensor2', shape=[20, 20], dtype='float32') - out = paddle.addcmul(input, tensor1, tensor2) + out = paddle.tensor.math.addcmul(input, tensor1, tensor2) self.assertRaises(TypeError, test_invalid_tensor1) @@ -153,7 +153,7 @@ class InvalidInputTest(unittest.TestCase): tensor1 = fluid.data( name='tensor1', shape=[20, 20], dtype='float32') tensor2 = [20, 20] - out = paddle.addcmul(input, tensor1, tensor2) + out = paddle.tensor.math.addcmul(input, tensor1, tensor2) self.assertRaises(TypeError, test_invalid_tensor2) @@ -166,7 +166,7 @@ class InvalidInputTest(unittest.TestCase): name='tensor1', shape=[20, 20], dtype='float32') tensor2 = fluid.data( name='tensor2', shape=[20, 20], dtype='float32') - out = paddle.addcmul(input, tensor1, tensor2, value=1) + out = paddle.tensor.math.addcmul(input, tensor1, tensor2, value=1) self.assertRaises(TypeError, test_invalid_value_int) @@ -178,7 +178,7 @@ class InvalidInputTest(unittest.TestCase): name='tensor1', shape=[20, 20], dtype='int32') tensor2 = fluid.data( name='tensor2', shape=[20, 20], dtype='int32') - out = paddle.addcmul(input, tensor1, tensor2, value=1.0) + out = paddle.tensor.math.addcmul(input, tensor1, tensor2, value=1.0) self.assertRaises(TypeError, test_invalid_value_float) diff --git a/python/paddle/fluid/tests/unittests/test_allclose_op.py b/python/paddle/fluid/tests/unittests/test_allclose_op.py index dc50e569f8..83fef8c29f 100644 --- a/python/paddle/fluid/tests/unittests/test_allclose_op.py +++ b/python/paddle/fluid/tests/unittests/test_allclose_op.py @@ -95,8 +95,8 @@ class TestAllcloseError(unittest.TestCase): def test_x_dtype(): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - x = paddle.data(name='x', shape=[10, 10], dtype='float16') - y = paddle.data(name='y', shape=[10, 10], dtype='float64') + x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float16') + y = paddle.fluid.data(name='y', shape=[10, 10], dtype='float64') result = paddle.allclose(x, y) self.assertRaises(TypeError, test_x_dtype) @@ -104,15 +104,15 @@ class TestAllcloseError(unittest.TestCase): def test_y_dtype(): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - x = paddle.data(name='x', shape=[10, 10], dtype='float64') - y = paddle.data(name='y', shape=[10, 10], dtype='int32') + x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') + y = paddle.fluid.data(name='y', shape=[10, 10], dtype='int32') result = paddle.allclose(x, y) self.assertRaises(TypeError, test_y_dtype) def test_attr(self): - x = paddle.data(name='x', shape=[10, 10], dtype='float64') - y = paddle.data(name='y', shape=[10, 10], dtype='float64') + x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') + y = paddle.fluid.data(name='y', shape=[10, 10], dtype='float64') def test_rtol(): result = paddle.allclose(x, y, rtol=True) diff --git a/python/paddle/fluid/tests/unittests/test_bce_loss.py b/python/paddle/fluid/tests/unittests/test_bce_loss.py index a8054295b4..4b39436842 100644 --- a/python/paddle/fluid/tests/unittests/test_bce_loss.py +++ b/python/paddle/fluid/tests/unittests/test_bce_loss.py @@ -27,10 +27,10 @@ def test_static_layer(place, prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.data(name='input', shape=input_np.shape, dtype='float64') - label = paddle.data(name='label', shape=label_np.shape, dtype='float64') + input = paddle.fluid.data(name='input', shape=input_np.shape, dtype='float64') + label = paddle.fluid.data(name='label', shape=label_np.shape, dtype='float64') if weight_np is not None: - weight = paddle.data( + weight = paddle.fluid.data( name='weight', shape=weight_np.shape, dtype='float64') bce_loss = paddle.nn.loss.BCELoss( weight=weight, reduction=reduction) @@ -58,10 +58,10 @@ def test_static_functional(place, prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.data(name='input', shape=input_np.shape, dtype='float64') - label = paddle.data(name='label', shape=label_np.shape, dtype='float64') + input = paddle.fluid.data(name='input', shape=input_np.shape, dtype='float64') + label = paddle.fluid.data(name='label', shape=label_np.shape, dtype='float64') if weight_np is not None: - weight = paddle.data( + weight = paddle.fluid.data( name='weight', shape=weight_np.shape, dtype='float64') res = paddle.nn.functional.binary_cross_entropy( input, label, weight=weight, reduction=reduction) diff --git a/python/paddle/fluid/tests/unittests/test_bce_with_logits_loss.py b/python/paddle/fluid/tests/unittests/test_bce_with_logits_loss.py index 5ba13a6da0..a6175aa471 100644 --- a/python/paddle/fluid/tests/unittests/test_bce_with_logits_loss.py +++ b/python/paddle/fluid/tests/unittests/test_bce_with_logits_loss.py @@ -48,18 +48,18 @@ def test_static(place, prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - logit = paddle.data(name='logit', shape=logit_np.shape, dtype='float64') - label = paddle.data(name='label', shape=label_np.shape, dtype='float64') + logit = paddle.fluid.data(name='logit', shape=logit_np.shape, dtype='float64') + label = paddle.fluid.data(name='label', shape=label_np.shape, dtype='float64') feed_dict = {"logit": logit_np, "label": label_np} pos_weight = None weight = None if pos_weight_np is not None: - pos_weight = paddle.data( + pos_weight = paddle.fluid.data( name='pos_weight', shape=pos_weight_np.shape, dtype='float64') feed_dict["pos_weight"] = pos_weight_np if weight_np is not None: - weight = paddle.data( + weight = paddle.fluid.data( name='weight', shape=weight_np.shape, dtype='float64') feed_dict["weight"] = weight_np if functional: diff --git a/python/paddle/fluid/tests/unittests/test_chunk_op.py b/python/paddle/fluid/tests/unittests/test_chunk_op.py index 043b326fbd..8488bfe773 100644 --- a/python/paddle/fluid/tests/unittests/test_chunk_op.py +++ b/python/paddle/fluid/tests/unittests/test_chunk_op.py @@ -27,28 +27,28 @@ class TestChunkOpError(unittest.TestCase): with program_guard(Program(), Program()): # The type of axis in chunk_op should be int or Variable. def test_axis_type(): - x1 = paddle.data(shape=[4], dtype='float16', name='x3') + x1 = paddle.fluid.data(shape=[4], dtype='float16', name='x3') paddle.chunk(x=x1, chunks=2, axis=3.2) self.assertRaises(TypeError, test_axis_type) # The type of axis in chunk op should be int or Variable. def test_axis_variable_type(): - x2 = paddle.data(shape=[4], dtype='float16', name='x9') - x3 = paddle.data(shape=[1], dtype='float16', name='x10') + x2 = paddle.fluid.data(shape=[4], dtype='float16', name='x9') + x3 = paddle.fluid.data(shape=[1], dtype='float16', name='x10') paddle.chunk(input=x2, chunks=2, axis=x3) self.assertRaises(TypeError, test_axis_variable_type) # The type of num_or_sections in chunk_op should be int, tuple or list. def test_chunks_type(): - x4 = paddle.data(shape=[4], dtype='float16', name='x4') + x4 = paddle.fluid.data(shape=[4], dtype='float16', name='x4') paddle.chunk(input=x4, chunks=2.1, axis=3) self.assertRaises(TypeError, test_chunks_type) def test_axis_type_tensor(): - x5 = paddle.data(shape=[4], dtype='float16', name='x6') + x5 = paddle.fluid.data(shape=[4], dtype='float16', name='x6') paddle.chunk(input=x5, chunks=2, axis=3.2) self.assertRaises(TypeError, test_axis_type_tensor) @@ -57,8 +57,8 @@ class TestChunkOpError(unittest.TestCase): class API_TestChunk(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = paddle.data('data1', shape=[4, 6, 6], dtype='float64') - data2 = paddle.data('data2', shape=[1], dtype='int32') + data1 = paddle.fluid.data('data1', shape=[4, 6, 6], dtype='float64') + data2 = paddle.fluid.data('data2', shape=[1], dtype='int32') x0, x1, x2 = paddle.chunk(data1, chunks=3, axis=data2) place = paddle.CPUPlace() exe = paddle.static.Executor(place) @@ -76,7 +76,7 @@ class API_TestChunk(unittest.TestCase): class API_TestChunk1(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = paddle.data('data1', shape=[4, 6, 6], dtype='float64') + data1 = paddle.fluid.data('data1', shape=[4, 6, 6], dtype='float64') x0, x1, x2 = paddle.chunk(data1, chunks=3, axis=2) place = paddle.CPUPlace() exe = paddle.static.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_concat_op.py b/python/paddle/fluid/tests/unittests/test_concat_op.py index 14c10e7aa2..82efd66a5e 100644 --- a/python/paddle/fluid/tests/unittests/test_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_concat_op.py @@ -253,16 +253,16 @@ class TestConcatAPI(unittest.TestCase): assert np.array_equal(res_3, np.concatenate((input_2, input_3), axis=1)) def test_api(self): - x_1 = paddle.data(shape=[None, 1, 4, 5], dtype='int32', name='x_1') + x_1 = paddle.fluid.data(shape=[None, 1, 4, 5], dtype='int32', name='x_1') paddle.concat([x_1, x_1], 0) input_2 = np.random.random([2, 1, 4, 5]).astype("int32") input_3 = np.random.random([2, 2, 4, 5]).astype("int32") x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2') x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3') - positive_1_int32 = paddle.fill_constant([1], "int32", 1) - positive_1_int64 = paddle.fill_constant([1], "int64", 1) - negative_int64 = paddle.fill_constant([1], "int64", -3) + positive_1_int32 = paddle.fluid.layers.fill_constant([1], "int32", 1) + positive_1_int64 = paddle.fluid.layers.fill_constant([1], "int64", 1) + negative_int64 = paddle.fluid.layers.fill_constant([1], "int64", -3) out_1 = paddle.concat(x=[x_2, x_3], axis=1) out_2 = paddle.concat(x=[x_2, x_3], axis=positive_1_int32) out_3 = paddle.concat(x=[x_2, x_3], axis=positive_1_int64) @@ -305,8 +305,8 @@ class TestConcatAPI(unittest.TestCase): np.array([[-1]]), [[1]], fluid.CPUPlace()) self.assertRaises(TypeError, paddle.concat, [x2]) # The input dtype of concat_op must be float16, float32, float64, int32, int64. - x4 = paddle.data(shape=[4], dtype='uint8', name='x4') - x5 = paddle.data(shape=[4], dtype='uint8', name='x5') + x4 = paddle.fluid.data(shape=[4], dtype='uint8', name='x4') + x5 = paddle.fluid.data(shape=[4], dtype='uint8', name='x5') self.assertRaises(TypeError, fluid.layers.concat, [x4, x5]) # The type of axis in concat_op should be int or Variable. diff --git a/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py b/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py index a8899d9f02..0b6e5b444c 100644 --- a/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py +++ b/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py @@ -48,8 +48,8 @@ class TestCosineSimilarityAPI(unittest.TestCase): np_x1 = np.random.rand(*shape).astype(np.float32) np_x2 = np.random.rand(*shape).astype(np.float32) - x1 = paddle.data(name="x1", shape=shape) - x2 = paddle.data(name="x2", shape=shape) + x1 = paddle.fluid.data(name="x1", shape=shape) + x2 = paddle.fluid.data(name="x2", shape=shape) result = F.cosine_similarity(x1, x2, axis=axis, eps=eps) exe = Executor(place) fetches = exe.run(default_main_program(), diff --git a/python/paddle/fluid/tests/unittests/test_diag.py b/python/paddle/fluid/tests/unittests/test_diag.py index ddf1240e4e..94a51301d5 100644 --- a/python/paddle/fluid/tests/unittests/test_diag.py +++ b/python/paddle/fluid/tests/unittests/test_diag.py @@ -172,11 +172,11 @@ class TestDiagV2API(unittest.TestCase): self.assertTrue(np.allclose(y.numpy(), self.expected11)) def run_static(self, use_gpu=False): - x = paddle.data(name='input', shape=[10, 10], dtype='float32') - x2 = paddle.data(name='input2', shape=[100], dtype='float64') - x3 = paddle.data(name='input3', shape=[100], dtype='int64') - x4 = paddle.data(name='input4', shape=[2000, 2000], dtype='float32') - x5 = paddle.data(name='input5', shape=[2000], dtype='float32') + x = paddle.fluid.data(name='input', shape=[10, 10], dtype='float32') + x2 = paddle.fluid.data(name='input2', shape=[100], dtype='float64') + x3 = paddle.fluid.data(name='input3', shape=[100], dtype='int64') + x4 = paddle.fluid.data(name='input4', shape=[2000, 2000], dtype='float32') + x5 = paddle.fluid.data(name='input5', shape=[2000], dtype='float32') result0 = paddle.diag(x) result1 = paddle.diag(x, offset=1) result2 = paddle.diag(x, offset=-1) diff --git a/python/paddle/fluid/tests/unittests/test_directory_migration.py b/python/paddle/fluid/tests/unittests/test_directory_migration.py index 3e0c52b7be..72df01ac1b 100644 --- a/python/paddle/fluid/tests/unittests/test_directory_migration.py +++ b/python/paddle/fluid/tests/unittests/test_directory_migration.py @@ -37,8 +37,7 @@ class TestDirectory(unittest.TestCase): new_directory = [ 'paddle.enable_static', 'paddle.disable_static', 'paddle.in_dynamic_mode', 'paddle.to_tensor', 'paddle.grad', - 'paddle.no_grad', 'paddle.save', 'paddle.load', - 'paddle.static.save', 'paddle.static.load', + 'paddle.no_grad', 'paddle.static.save', 'paddle.static.load', 'paddle.distributed.ParallelEnv', 'paddle.distributed.prepare_context', 'paddle.DataParallel', 'paddle.jit', 'paddle.jit.TracedLayer', 'paddle.jit.to_static', diff --git a/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py b/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py index e0e487eff1..aa85eb3df3 100644 --- a/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py +++ b/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py @@ -170,7 +170,7 @@ class TestFlatten2OpError(unittest.TestCase): x2 = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * image_shape[3]).reshape(image_shape) / 100. x2 = x2.astype('float16') - x2_var = paddle.data(name='x2', shape=[3, 2, 4, 5], dtype='float16') + x2_var = paddle.fluid.data(name='x2', shape=[3, 2, 4, 5], dtype='float16') paddle.flatten(x2_var) self.assertRaises(TypeError, test_type) diff --git a/python/paddle/fluid/tests/unittests/test_full_like_op.py b/python/paddle/fluid/tests/unittests/test_full_like_op.py index ba14aeae99..30bc097428 100644 --- a/python/paddle/fluid/tests/unittests/test_full_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_full_like_op.py @@ -31,7 +31,7 @@ class TestFullOp(unittest.TestCase): train_program = Program() with program_guard(train_program, startup_program): fill_value = 2.0 - input = paddle.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.fluid.data(name='input', dtype='float32', shape=[2, 3]) output = paddle.full_like(input, fill_value) output_dtype = paddle.full_like(input, fill_value, dtype='float32') @@ -67,7 +67,7 @@ class TestFullOpError(unittest.TestCase): with program_guard(Program(), Program()): #for ci coverage - input_data = paddle.data( + input_data = paddle.fluid.data( name='input', dtype='float32', shape=[2, 3]) output = paddle.full_like(input_data, 2.0) diff --git a/python/paddle/fluid/tests/unittests/test_gather_nd_op.py b/python/paddle/fluid/tests/unittests/test_gather_nd_op.py index bd934c76eb..a2955c12fc 100644 --- a/python/paddle/fluid/tests/unittests/test_gather_nd_op.py +++ b/python/paddle/fluid/tests/unittests/test_gather_nd_op.py @@ -192,9 +192,9 @@ class TestGatherNdError(unittest.TestCase): paddle.static.Program()): shape = [8, 9, 6] - x = paddle.data(shape=shape, dtype='float32', name='x') - index = paddle.data(shape=shape, dtype='bool', name='index') - index_float = paddle.data( + x = paddle.fluid.data(shape=shape, dtype='float32', name='x') + index = paddle.fluid.data(shape=shape, dtype='bool', name='index') + index_float = paddle.fluid.data( shape=shape, dtype='float32', name='index_float') np_x = np.random.random(shape).astype('float32') np_index = np.array(np.random.randint(2, size=shape, dtype=bool)) diff --git a/python/paddle/fluid/tests/unittests/test_gather_op.py b/python/paddle/fluid/tests/unittests/test_gather_op.py index 5dcce88acf..2e4b52c282 100644 --- a/python/paddle/fluid/tests/unittests/test_gather_op.py +++ b/python/paddle/fluid/tests/unittests/test_gather_op.py @@ -202,9 +202,9 @@ class API_TestGather(unittest.TestCase): def test_out2(self): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - x = paddle.data('x', shape=[-1, 2], dtype='float64') - index = paddle.data('index', shape=[-1, 1], dtype='int32') - axis = paddle.data('axis', shape=[1], dtype='int32') + x = paddle.fluid.data('x', shape=[-1, 2], dtype='float64') + index = paddle.fluid.data('index', shape=[-1, 1], dtype='int32') + axis = paddle.fluid.data('axis', shape=[1], dtype='int32') out = paddle.gather(x, index, axis) place = paddle.CPUPlace() exe = paddle.static.Executor(place) @@ -252,10 +252,10 @@ class TestGathertError(unittest.TestCase): paddle.static.Program()): shape = [8, 9, 6] - x = paddle.data(shape=shape, dtype='int8', name='x') - axis = paddle.data(shape=[1], dtype='float32', name='axis') - index = paddle.data(shape=shape, dtype='int32', name='index') - index_float = paddle.data( + x = paddle.fluid.data(shape=shape, dtype='int8', name='x') + axis = paddle.fluid.data(shape=[1], dtype='float32', name='axis') + index = paddle.fluid.data(shape=shape, dtype='int32', name='index') + index_float = paddle.fluid.data( shape=shape, dtype='float32', name='index_float') def test_x_type(): diff --git a/python/paddle/fluid/tests/unittests/test_histogram_op.py b/python/paddle/fluid/tests/unittests/test_histogram_op.py index 0ccb6fce8e..f540b885e1 100644 --- a/python/paddle/fluid/tests/unittests/test_histogram_op.py +++ b/python/paddle/fluid/tests/unittests/test_histogram_op.py @@ -73,7 +73,7 @@ class TestHistogramOpError(unittest.TestCase): """Test bins should be greater than or equal to 1.""" def net_func(): - input_value = paddle.fill_constant( + input_value = paddle.fluid.layers.fill_constant( shape=[3, 4], dtype='float32', value=3.0) paddle.histogram(input=input_value, bins=-1, min=1, max=5) @@ -84,7 +84,7 @@ class TestHistogramOpError(unittest.TestCase): """Test max must be larger or equal to min.""" def net_func(): - input_value = paddle.fill_constant( + input_value = paddle.fluid.layers.fill_constant( shape=[3, 4], dtype='float32', value=3.0) paddle.histogram(input=input_value, bins=1, min=5, max=1) @@ -95,7 +95,7 @@ class TestHistogramOpError(unittest.TestCase): """Test range of min, max is not finite""" def net_func(): - input_value = paddle.fill_constant( + input_value = paddle.fluid.layers.fill_constant( shape=[3, 4], dtype='float32', value=3.0) paddle.histogram(input=input_value, bins=1, min=-np.inf, max=5) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_layer_apply.py b/python/paddle/fluid/tests/unittests/test_imperative_layer_apply.py index f61d1ab888..ab9a98588f 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_layer_apply.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_layer_apply.py @@ -31,11 +31,11 @@ class LeNetDygraph(fluid.dygraph.Layer): nn.Conv2d( 1, 6, 3, stride=1, padding=1), nn.ReLU(), - nn.Pool2D(2, 'max', 2), + paddle.fluid.dygraph.Pool2D(2, 'max', 2), nn.Conv2d( 6, 16, 5, stride=1, padding=0), nn.ReLU(), - nn.Pool2D(2, 'max', 2)) + paddle.fluid.dygraph.Pool2D(2, 'max', 2)) if num_classes > 0: self.fc = nn.Sequential( @@ -54,17 +54,17 @@ class LeNetDygraph(fluid.dygraph.Layer): def init_weights(layer): if type(layer) == nn.Linear: - new_weight = paddle.fill_constant( + new_weight = paddle.fluid.layers.fill_constant( layer.weight.shape, layer.weight.dtype, value=0.9) layer.weight.set_value(new_weight) - new_bias = paddle.fill_constant( + new_bias = paddle.fluid.layers.fill_constant( layer.bias.shape, layer.bias.dtype, value=-0.1) layer.bias.set_value(new_bias) elif type(layer) == nn.Conv2d: - new_weight = paddle.fill_constant( + new_weight = paddle.fluid.layers.fill_constant( layer.weight.shape, layer.weight.dtype, value=0.7) layer.weight.set_value(new_weight) - new_bias = paddle.fill_constant( + new_bias = paddle.fluid.layers.fill_constant( layer.bias.shape, layer.bias.dtype, value=-0.2) layer.bias.set_value(new_bias) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_layer_children.py b/python/paddle/fluid/tests/unittests/test_imperative_layer_children.py index c7e0902341..95d3b87f0e 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_layer_children.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_layer_children.py @@ -30,11 +30,11 @@ class LeNetDygraph(fluid.dygraph.Layer): nn.Conv2d( 1, 6, 3, stride=1, padding=1), nn.ReLU(), - nn.Pool2D(2, 'max', 2), + paddle.fluid.dygraph.Pool2D(2, 'max', 2), nn.Conv2d( 6, 16, 5, stride=1, padding=0), nn.ReLU(), - nn.Pool2D(2, 'max', 2)) + paddle.fluid.dygraph.Pool2D(2, 'max', 2)) def forward(self, inputs): x = self.features(inputs) diff --git a/python/paddle/fluid/tests/unittests/test_isfinite_op.py b/python/paddle/fluid/tests/unittests/test_isfinite_op.py index 743bdbc5a4..83d86aff7a 100644 --- a/python/paddle/fluid/tests/unittests/test_isfinite_op.py +++ b/python/paddle/fluid/tests/unittests/test_isfinite_op.py @@ -135,10 +135,10 @@ class BadInputTest(unittest.TestCase): with fluid.dygraph.guard(): data = paddle.zeros([2, 3]) - result = paddle.has_inf(data) + result = paddle.fluid.layers.has_inf(data) expect_value = np.array([False]) self.assertEqual((result.numpy() == expect_value).all(), True) - result = paddle.has_nan(data) + result = paddle.fluid.layers.has_nan(data) self.assertEqual((result.numpy() == expect_value).all(), True) diff --git a/python/paddle/fluid/tests/unittests/test_isfinite_v2_op.py b/python/paddle/fluid/tests/unittests/test_isfinite_v2_op.py index 281dc7cade..0d4d3b58e8 100644 --- a/python/paddle/fluid/tests/unittests/test_isfinite_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_isfinite_v2_op.py @@ -27,7 +27,7 @@ def run_static(x_np, dtype, op_str, use_gpu=False): place = paddle.CUDAPlace(0) exe = fluid.Executor(place) with fluid.program_guard(main_program, startup_program): - x = paddle.data(name='x', shape=x_np.shape, dtype=dtype) + x = paddle.fluid.data(name='x', shape=x_np.shape, dtype=dtype) res = getattr(paddle.tensor, op_str)(x) exe.run(startup_program) static_result = exe.run(main_program, diff --git a/python/paddle/fluid/tests/unittests/test_l1_loss.py b/python/paddle/fluid/tests/unittests/test_l1_loss.py index 3c37397cae..fba1695990 100644 --- a/python/paddle/fluid/tests/unittests/test_l1_loss.py +++ b/python/paddle/fluid/tests/unittests/test_l1_loss.py @@ -44,8 +44,8 @@ class TestFunctionalL1Loss(unittest.TestCase): self.assertTrue(dy_result.shape, [10, 10, 5]) def run_static(self, use_gpu=False): - input = paddle.data(name='input', shape=[10, 10, 5], dtype='float32') - label = paddle.data(name='label', shape=[10, 10, 5], dtype='float32') + input = paddle.fluid.data(name='input', shape=[10, 10, 5], dtype='float32') + label = paddle.fluid.data(name='label', shape=[10, 10, 5], dtype='float32') result0 = paddle.nn.functional.l1_loss(input, label) result1 = paddle.nn.functional.l1_loss(input, label, reduction='sum') result2 = paddle.nn.functional.l1_loss(input, label, reduction='none') @@ -90,9 +90,9 @@ class TestFunctionalL1Loss(unittest.TestCase): # test case the raise message def test_errors(self): def test_value_error(): - input = paddle.data( + input = paddle.fluid.data( name='input', shape=[10, 10, 5], dtype='float32') - label = paddle.data( + label = paddle.fluid.data( name='label', shape=[10, 10, 5], dtype='float32') loss = paddle.nn.functional.l1_loss( input, label, reduction='reduce_mean') @@ -127,8 +127,8 @@ class TestClassL1Loss(unittest.TestCase): self.assertTrue(dy_result.shape, [10, 10, 5]) def run_static(self, use_gpu=False): - input = paddle.data(name='input', shape=[10, 10, 5], dtype='float32') - label = paddle.data(name='label', shape=[10, 10, 5], dtype='float32') + input = paddle.fluid.data(name='input', shape=[10, 10, 5], dtype='float32') + label = paddle.fluid.data(name='label', shape=[10, 10, 5], dtype='float32') l1_loss = paddle.nn.loss.L1Loss() result0 = l1_loss(input, label) l1_loss = paddle.nn.loss.L1Loss(reduction='sum') diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index cb1a5a6bdf..ce9cc33cf9 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -327,7 +327,7 @@ class TestLayer(LayerTest): with self.dynamic_graph(): t = np.ones([3, 3, 5, 5], dtype='float32') - my_pad2d = paddle.nn.Pad2D(paddings=1) + my_pad2d = paddle.nn.layer.Pad2D(paddings=1) dy_ret = my_pad2d(base.to_variable(t)) dy_ret_value = dy_ret.numpy() diff --git a/python/paddle/fluid/tests/unittests/test_log_softmax.py b/python/paddle/fluid/tests/unittests/test_log_softmax.py index 9ac4895f49..0dd6c9f893 100644 --- a/python/paddle/fluid/tests/unittests/test_log_softmax.py +++ b/python/paddle/fluid/tests/unittests/test_log_softmax.py @@ -88,7 +88,7 @@ class TestNNLogSoftmaxAPI(unittest.TestCase): logsoftmax = paddle.nn.LogSoftmax(axis) # test static api with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data(name='x', shape=self.x_shape) + x = paddle.fluid.data(name='x', shape=self.x_shape) y = logsoftmax(x) exe = paddle.static.Executor(self.place) out = exe.run(feed={'x': self.x}, fetch_list=[y]) @@ -120,7 +120,7 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): x = x.astype(dtype) ref_out = np.apply_along_axis(ref_log_softmax, axis, x) with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data(name='x', shape=self.x_shape) + x = paddle.fluid.data(name='x', shape=self.x_shape) y = F.log_softmax(x, axis, dtype) exe = paddle.static.Executor(self.place) out = exe.run(feed={'x': self.x}, fetch_list=[y]) @@ -139,10 +139,10 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): def test_errors(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data(name='X1', shape=[100], dtype='int32') + x = paddle.fluid.data(name='X1', shape=[100], dtype='int32') self.assertRaises(TypeError, F.log_softmax, x) - x = paddle.data(name='X2', shape=[100], dtype='float32') + x = paddle.fluid.data(name='X2', shape=[100], dtype='float32') self.assertRaises(TypeError, F.log_softmax, x, dtype='int32') diff --git a/python/paddle/fluid/tests/unittests/test_logsumexp.py b/python/paddle/fluid/tests/unittests/test_logsumexp.py index 9032293070..c48ec2a4fb 100644 --- a/python/paddle/fluid/tests/unittests/test_logsumexp.py +++ b/python/paddle/fluid/tests/unittests/test_logsumexp.py @@ -90,7 +90,7 @@ class TestLogsumexpError(unittest.TestCase): def test_errors(self): with paddle.static.program_guard(paddle.static.Program()): self.assertRaises(TypeError, paddle.logsumexp, 1) - x1 = paddle.data(name='x1', shape=[120], dtype="int32") + x1 = paddle.fluid.data(name='x1', shape=[120], dtype="int32") self.assertRaises(TypeError, paddle.logsumexp, x1) @@ -104,7 +104,7 @@ class TestLogsumexpAPI(unittest.TestCase): def api_case(self, axis=None, keepdim=False): out_ref = ref_logsumexp(self.x, axis, keepdim) with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', self.shape) + x = paddle.fluid.data('X', self.shape) out = paddle.logsumexp(x, axis, keepdim) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x}, fetch_list=[out]) diff --git a/python/paddle/fluid/tests/unittests/test_lr_scheduler.py b/python/paddle/fluid/tests/unittests/test_lr_scheduler.py index 21d1ba7e39..f9ae3cda67 100644 --- a/python/paddle/fluid/tests/unittests/test_lr_scheduler.py +++ b/python/paddle/fluid/tests/unittests/test_lr_scheduler.py @@ -414,7 +414,7 @@ class TestLRScheduler(unittest.TestCase): for batch_id in range(2): x = paddle.to_tensor(x) out = linear(x) - loss = paddle.reduce_mean(out) + loss = paddle.fluid.layers.reduce_mean(out) loss.backward() adam.step() adam.clear_grad() diff --git a/python/paddle/fluid/tests/unittests/test_masked_select_op.py b/python/paddle/fluid/tests/unittests/test_masked_select_op.py index 259a36e30d..ed1a981d03 100644 --- a/python/paddle/fluid/tests/unittests/test_masked_select_op.py +++ b/python/paddle/fluid/tests/unittests/test_masked_select_op.py @@ -74,8 +74,8 @@ class TestMaskedSelectAPI(unittest.TestCase): def test_static_mode(self): shape = [8, 9, 6] - x = paddle.data(shape=shape, dtype='float32', name='x') - mask = paddle.data(shape=shape, dtype='bool', name='mask') + x = paddle.fluid.data(shape=shape, dtype='float32', name='x') + mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask') np_x = np.random.random(shape).astype('float32') np_mask = np.array(np.random.randint(2, size=shape, dtype=bool)) @@ -97,9 +97,9 @@ class TestMaskedSelectError(unittest.TestCase): paddle.static.Program()): shape = [8, 9, 6] - x = paddle.data(shape=shape, dtype='float32', name='x') - mask = paddle.data(shape=shape, dtype='bool', name='mask') - mask_float = paddle.data( + x = paddle.fluid.data(shape=shape, dtype='float32', name='x') + mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask') + mask_float = paddle.fluid.data( shape=shape, dtype='float32', name='mask_float') np_x = np.random.random(shape).astype('float32') np_mask = np.array(np.random.randint(2, size=shape, dtype=bool)) diff --git a/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py b/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py index 37bea9deae..4795b49301 100644 --- a/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py @@ -473,12 +473,6 @@ class TestMathOpPatchesVarBase(unittest.TestCase): # 3. Bool tensor operation x = paddle.to_tensor([[True, False], [True, False]]) y = paddle.to_tensor([[False, False], [False, True]]) - self.assertTrue( - np.array_equal(x.reduce_all().numpy(), paddle.reduce_all(x).numpy( - ))) - self.assertTrue( - np.array_equal(x.reduce_any().numpy(), paddle.reduce_any(x).numpy( - ))) self.assertTrue( np.array_equal( x.logical_and(y).numpy(), paddle.logical_and(x, y).numpy())) @@ -501,18 +495,9 @@ class TestMathOpPatchesVarBase(unittest.TestCase): x.where(a, b).numpy(), paddle.where(x, a, b).numpy())) self.assertTrue(inspect.ismethod(a.dot)) - self.assertTrue(inspect.ismethod(a.elementwise_add)) - self.assertTrue(inspect.ismethod(a.elementwise_div)) - self.assertTrue(inspect.ismethod(a.elementwise_floordiv)) - self.assertTrue(inspect.ismethod(a.elementwise_mod)) - self.assertTrue(inspect.ismethod(a.elementwise_sub)) self.assertTrue(inspect.ismethod(a.logsumexp)) self.assertTrue(inspect.ismethod(a.multiplex)) self.assertTrue(inspect.ismethod(a.prod)) - self.assertTrue(inspect.ismethod(a.reduce_max)) - self.assertTrue(inspect.ismethod(a.reduce_min)) - self.assertTrue(inspect.ismethod(a.reduce_prod)) - self.assertTrue(inspect.ismethod(a.reduce_sum)) self.assertTrue(inspect.ismethod(a.scale)) self.assertTrue(inspect.ismethod(a.stanh)) self.assertTrue(inspect.ismethod(a.add_n)) @@ -528,7 +513,6 @@ class TestMathOpPatchesVarBase(unittest.TestCase): self.assertTrue(inspect.ismethod(a.inverse)) self.assertTrue(inspect.ismethod(a.log1p)) self.assertTrue(inspect.ismethod(a.erf)) - self.assertTrue(inspect.ismethod(a.addcmul)) self.assertTrue(inspect.ismethod(a.addmm)) self.assertTrue(inspect.ismethod(a.clip)) self.assertTrue(inspect.ismethod(a.trace)) @@ -548,8 +532,6 @@ class TestMathOpPatchesVarBase(unittest.TestCase): self.assertTrue(inspect.ismethod(a.argmax)) self.assertTrue(inspect.ismethod(a.argmin)) self.assertTrue(inspect.ismethod(a.argsort)) - self.assertTrue(inspect.ismethod(a.has_inf)) - self.assertTrue(inspect.ismethod(a.has_nan)) self.assertTrue(inspect.ismethod(a.masked_select)) self.assertTrue(inspect.ismethod(a.topk)) self.assertTrue(inspect.ismethod(a.index_select)) @@ -557,7 +539,6 @@ class TestMathOpPatchesVarBase(unittest.TestCase): self.assertTrue(inspect.ismethod(a.sort)) self.assertTrue(inspect.ismethod(a.index_sample)) self.assertTrue(inspect.ismethod(a.mean)) - self.assertTrue(inspect.ismethod(a.reduce_mean)) self.assertTrue(inspect.ismethod(a.std)) self.assertTrue(inspect.ismethod(a.numel)) diff --git a/python/paddle/fluid/tests/unittests/test_maxout_op.py b/python/paddle/fluid/tests/unittests/test_maxout_op.py index 1d38c83377..fac400caac 100644 --- a/python/paddle/fluid/tests/unittests/test_maxout_op.py +++ b/python/paddle/fluid/tests/unittests/test_maxout_op.py @@ -92,7 +92,7 @@ class TestMaxoutAPI(unittest.TestCase): def test_static_api(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.maxout(x, self.groups, self.axis) m = paddle.nn.Maxout(self.groups, self.axis) out2 = m(x) @@ -137,11 +137,11 @@ class TestMaxoutAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.maxout, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.data( + x_int32 = paddle.fluid.data( name='x_int32', shape=[2, 4, 6, 8], dtype='int32') self.assertRaises(TypeError, F.maxout, x_int32) - x_float32 = paddle.data(name='x_float32', shape=[2, 4, 6, 8]) + x_float32 = paddle.fluid.data(name='x_float32', shape=[2, 4, 6, 8]) self.assertRaises(ValueError, F.maxout, x_float32, 2, 2) diff --git a/python/paddle/fluid/tests/unittests/test_mean_op.py b/python/paddle/fluid/tests/unittests/test_mean_op.py index f0094e703c..e2a2dcf44f 100644 --- a/python/paddle/fluid/tests/unittests/test_mean_op.py +++ b/python/paddle/fluid/tests/unittests/test_mean_op.py @@ -185,7 +185,7 @@ class TestMeanAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', self.x_shape) + x = paddle.fluid.data('X', self.x_shape) out1 = paddle.mean(x) out2 = paddle.tensor.mean(x) out3 = paddle.tensor.stat.mean(x) @@ -249,7 +249,7 @@ class TestMeanAPI(unittest.TestCase): self.assertRaises(Exception, paddle.mean, x, 2) paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', [10, 12], 'int32') + x = paddle.fluid.data('X', [10, 12], 'int32') self.assertRaises(TypeError, paddle.mean, x) diff --git a/python/paddle/fluid/tests/unittests/test_mse_loss.py b/python/paddle/fluid/tests/unittests/test_mse_loss.py index e327307e95..bc5d35d325 100644 --- a/python/paddle/fluid/tests/unittests/test_mse_loss.py +++ b/python/paddle/fluid/tests/unittests/test_mse_loss.py @@ -191,8 +191,8 @@ class TestNNFunctionalMseLoss(unittest.TestCase): place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda( ) else paddle.CPUPlace() with paddle.static.program_guard(prog, startup_prog): - input = paddle.data(name='input', shape=dim, dtype='float32') - target = paddle.data(name='target', shape=dim, dtype='float32') + input = paddle.fluid.data(name='input', shape=dim, dtype='float32') + target = paddle.fluid.data(name='target', shape=dim, dtype='float32') mse_loss = paddle.nn.functional.mse_loss(input, target, 'mean') exe = paddle.static.Executor(place) @@ -225,8 +225,8 @@ class TestNNFunctionalMseLoss(unittest.TestCase): place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda( ) else paddle.CPUPlace() with paddle.static.program_guard(prog, startup_prog): - input = paddle.data(name='input', shape=dim, dtype='float32') - target = paddle.data(name='target', shape=dim, dtype='float32') + input = paddle.fluid.data(name='input', shape=dim, dtype='float32') + target = paddle.fluid.data(name='target', shape=dim, dtype='float32') mse_loss = paddle.nn.functional.mse_loss(input, target, 'sum') exe = paddle.static.Executor(place) @@ -259,8 +259,8 @@ class TestNNFunctionalMseLoss(unittest.TestCase): place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda( ) else paddle.CPUPlace() with paddle.static.program_guard(prog, startup_prog): - input = paddle.data(name='input', shape=dim, dtype='float32') - target = paddle.data(name='target', shape=dim, dtype='float32') + input = paddle.fluid.data(name='input', shape=dim, dtype='float32') + target = paddle.fluid.data(name='target', shape=dim, dtype='float32') mse_loss = paddle.nn.functional.mse_loss(input, target, 'none') exe = paddle.static.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_nll_loss.py b/python/paddle/fluid/tests/unittests/test_nll_loss.py index c07bf949af..2b741fcd07 100644 --- a/python/paddle/fluid/tests/unittests/test_nll_loss.py +++ b/python/paddle/fluid/tests/unittests/test_nll_loss.py @@ -884,8 +884,8 @@ class TestNLLLossName(unittest.TestCase): startup_prog = paddle.static.Program() place = paddle.CPUPlace() with paddle.static.program_guard(prog, startup_prog): - x = paddle.data(name='x', shape=[10, 10], dtype='float64') - label = paddle.data(name='label', shape=[10], dtype='int64') + x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') + label = paddle.fluid.data(name='label', shape=[10], dtype='int64') nll_loss = paddle.nn.loss.NLLLoss(name='nll_loss') res = nll_loss(x, label) self.assertTrue(res.name.startswith('nll_loss')) @@ -898,8 +898,8 @@ class TestNLLLossInvalidArgs(unittest.TestCase): startup_prog = paddle.static.Program() place = paddle.CPUPlace() with paddle.static.program_guard(prog, startup_prog): - x = paddle.data(name='x', shape=[10, ], dtype='float64') - label = paddle.data(name='label', shape=[10, ], dtype='float64') + x = paddle.fluid.data(name='x', shape=[10, ], dtype='float64') + label = paddle.fluid.data(name='label', shape=[10, ], dtype='float64') nll_loss = paddle.nn.loss.NLLLoss() res = nll_loss(x, label) @@ -922,8 +922,8 @@ class TestNLLLossInvalidArgs(unittest.TestCase): startup_prog = paddle.static.Program() place = paddle.CPUPlace() with paddle.static.program_guard(prog, startup_prog): - x = paddle.data(name='x', shape=[10, 10], dtype='float64') - label = paddle.data(name='label', shape=[10], dtype='int64') + x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') + label = paddle.fluid.data(name='label', shape=[10], dtype='int64') nll_loss = paddle.nn.loss.NLLLoss(reduction='') res = nll_loss(x, label) @@ -946,8 +946,8 @@ class TestNLLLossInvalidArgs(unittest.TestCase): startup_prog = paddle.static.Program() place = paddle.CPUPlace() with paddle.static.program_guard(prog, startup_prog): - x = paddle.data(name='x', shape=[10, 10], dtype='float64') - label = paddle.data(name='label', shape=[10], dtype='int64') + x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') + label = paddle.fluid.data(name='label', shape=[10], dtype='int64') res = paddle.nn.functional.nll_loss(x, label, reduction='') self.assertRaises(ValueError, diff --git a/python/paddle/fluid/tests/unittests/test_normal.py b/python/paddle/fluid/tests/unittests/test_normal.py index 995a1f26ff..595e0bb480 100644 --- a/python/paddle/fluid/tests/unittests/test_normal.py +++ b/python/paddle/fluid/tests/unittests/test_normal.py @@ -61,8 +61,8 @@ class TestNormalAPI(unittest.TestCase): if isinstance(self.mean, np.ndarray) \ and isinstance(self.std, np.ndarray): with paddle.static.program_guard(paddle.static.Program()): - mean = paddle.data('Mean', self.mean.shape, self.mean.dtype) - std = paddle.data('Std', self.std.shape, self.std.dtype) + mean = paddle.fluid.data('Mean', self.mean.shape, self.mean.dtype) + std = paddle.fluid.data('Std', self.std.shape, self.std.dtype) out = paddle.normal(mean, std, self.shape) exe = paddle.static.Executor(self.place) @@ -76,7 +76,7 @@ class TestNormalAPI(unittest.TestCase): return ret_all elif isinstance(self.mean, np.ndarray): with paddle.static.program_guard(paddle.static.Program()): - mean = paddle.data('Mean', self.mean.shape, self.mean.dtype) + mean = paddle.fluid.data('Mean', self.mean.shape, self.mean.dtype) out = paddle.normal(mean, self.std, self.shape) exe = paddle.static.Executor(self.place) @@ -86,7 +86,7 @@ class TestNormalAPI(unittest.TestCase): return ret_all elif isinstance(self.std, np.ndarray): with paddle.static.program_guard(paddle.static.Program()): - std = paddle.data('Std', self.std.shape, self.std.dtype) + std = paddle.fluid.data('Std', self.std.shape, self.std.dtype) out = paddle.normal(self.mean, std, self.shape) exe = paddle.static.Executor(self.place) @@ -180,17 +180,17 @@ class TestNormalErrors(unittest.TestCase): std = [1, 2, 3] self.assertRaises(TypeError, paddle.normal, std=std) - mean = paddle.data('Mean', [100], 'int32') + mean = paddle.fluid.data('Mean', [100], 'int32') self.assertRaises(TypeError, paddle.normal, mean) - std = paddle.data('Std', [100], 'int32') + std = paddle.fluid.data('Std', [100], 'int32') self.assertRaises(TypeError, paddle.normal, mean=1.0, std=std) self.assertRaises(TypeError, paddle.normal, shape=1) self.assertRaises(TypeError, paddle.normal, shape=[1.0]) - shape = paddle.data('Shape', [100], 'float32') + shape = paddle.fluid.data('Shape', [100], 'float32') self.assertRaises(TypeError, paddle.normal, shape=shape) diff --git a/python/paddle/fluid/tests/unittests/test_normalize.py b/python/paddle/fluid/tests/unittests/test_normalize.py index 614e0e8976..274a4ebee7 100644 --- a/python/paddle/fluid/tests/unittests/test_normalize.py +++ b/python/paddle/fluid/tests/unittests/test_normalize.py @@ -56,8 +56,8 @@ class TestNNFunctionalNormalize(unittest.TestCase): self.assertRaises(BaseException, F.normalize, x) def run_static(self, use_gpu=False): - x = paddle.data(name='input', shape=[10, 10], dtype='float32') - x2 = paddle.data(name='input2', shape=[2], dtype='float32') + x = paddle.fluid.data(name='input', shape=[10, 10], dtype='float32') + x2 = paddle.fluid.data(name='input2', shape=[2], dtype='float32') result0 = F.normalize(x) result1 = F.normalize(x, p=1.5) result2 = F.normalize(x, axis=0) diff --git a/python/paddle/fluid/tests/unittests/test_numel_op.py b/python/paddle/fluid/tests/unittests/test_numel_op.py index 800706e596..d106484d91 100644 --- a/python/paddle/fluid/tests/unittests/test_numel_op.py +++ b/python/paddle/fluid/tests/unittests/test_numel_op.py @@ -55,8 +55,8 @@ class TestNumelOoAPI(unittest.TestCase): with fluid.program_guard(main_program, startup_program): shape1 = [2, 1, 4, 5] shape2 = [1, 4, 5] - x_1 = paddle.data(shape=shape1, dtype='int32', name='x_1') - x_2 = paddle.data(shape=shape2, dtype='int32', name='x_2') + x_1 = paddle.fluid.data(shape=shape1, dtype='int32', name='x_1') + x_2 = paddle.fluid.data(shape=shape2, dtype='int32', name='x_2') input_1 = np.random.random(shape1).astype("int32") input_2 = np.random.random(shape2).astype("int32") out_1 = paddle.numel(x_1) diff --git a/python/paddle/fluid/tests/unittests/test_ones_like.py b/python/paddle/fluid/tests/unittests/test_ones_like.py index bb0d6f07bd..db7fc9d2b2 100644 --- a/python/paddle/fluid/tests/unittests/test_ones_like.py +++ b/python/paddle/fluid/tests/unittests/test_ones_like.py @@ -25,7 +25,7 @@ from paddle.fluid import core, Program, program_guard class TestOnesLikeAPIError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): - x = paddle.data('x', [3, 4]) + x = paddle.fluid.data('x', [3, 4]) self.assertRaises(TypeError, ones_like, x, 'int8') @@ -35,7 +35,7 @@ class TestOnesLikeAPI(unittest.TestCase): startup_program = Program() train_program = Program() with program_guard(train_program, startup_program): - x = paddle.data('X', shape) + x = paddle.fluid.data('X', shape) # 'bool', 'float32', 'float64', 'int32', 'int64' out1 = ones_like(x) diff --git a/python/paddle/fluid/tests/unittests/test_pad3d_op.py b/python/paddle/fluid/tests/unittests/test_pad3d_op.py index 11719a9c4a..aa75ee9c7c 100644 --- a/python/paddle/fluid/tests/unittests/test_pad3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pad3d_op.py @@ -165,7 +165,7 @@ class TestPadAPI(unittest.TestCase): mode = "constant" value = 100 input_data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.data(name="x", shape=input_shape) + x = paddle.fluid.data(name="x", shape=input_shape) result = F.pad(x=x, pad=pad, value=value, @@ -186,7 +186,7 @@ class TestPadAPI(unittest.TestCase): pad = [1, 2, 1, 1, 1, 2] mode = "reflect" input_data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.data(name="x", shape=input_shape) + x = paddle.fluid.data(name="x", shape=input_shape) result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW") result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC") exe = Executor(place) @@ -208,7 +208,7 @@ class TestPadAPI(unittest.TestCase): pad = [1, 2, 1, 1, 3, 4] mode = "replicate" input_data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.data(name="x", shape=input_shape) + x = paddle.fluid.data(name="x", shape=input_shape) result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW") result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC") exe = Executor(place) @@ -230,7 +230,7 @@ class TestPadAPI(unittest.TestCase): pad = [1, 2, 1, 1, 3, 4] mode = "circular" input_data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.data(name="x", shape=input_shape) + x = paddle.fluid.data(name="x", shape=input_shape) result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW") result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC") exe = Executor(place) @@ -637,7 +637,7 @@ class TestPad3dOpError(unittest.TestCase): def test_reflect_1(): input_shape = (1, 2, 3, 4, 5) data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.data(name="x", shape=input_shape) + x = paddle.fluid.data(name="x", shape=input_shape) y = F.pad(x, pad=[5, 6, 1, 1, 1, 1], value=1, mode='reflect') place = paddle.CPUPlace() exe = Executor(place) @@ -646,7 +646,7 @@ class TestPad3dOpError(unittest.TestCase): def test_reflect_2(): input_shape = (1, 2, 3, 4, 5) data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.data(name="x", shape=input_shape) + x = paddle.fluid.data(name="x", shape=input_shape) y = F.pad(x, pad=[1, 1, 4, 3, 1, 1], value=1, mode='reflect') place = paddle.CPUPlace() exe = Executor(place) @@ -655,7 +655,7 @@ class TestPad3dOpError(unittest.TestCase): def test_reflect_3(): input_shape = (1, 2, 3, 4, 5) data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.data(name="x", shape=input_shape) + x = paddle.fluid.data(name="x", shape=input_shape) y = F.pad(x, pad=[1, 1, 1, 1, 2, 3], value=1, mode='reflect') place = paddle.CPUPlace() exe = Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_pairwise_distance.py b/python/paddle/fluid/tests/unittests/test_pairwise_distance.py index cf138e6772..c91616b06e 100644 --- a/python/paddle/fluid/tests/unittests/test_pairwise_distance.py +++ b/python/paddle/fluid/tests/unittests/test_pairwise_distance.py @@ -32,8 +32,8 @@ def test_static(x_np, y_np, p=2.0, epsilon=1e-6, keepdim=False): ) else fluid.CPUPlace() with paddle.static.program_guard(prog, startup_prog): - x = paddle.data(name='x', shape=x_np.shape, dtype=x_np.dtype) - y = paddle.data(name='y', shape=y_np.shape, dtype=x_np.dtype) + x = paddle.fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + y = paddle.fluid.data(name='y', shape=y_np.shape, dtype=x_np.dtype) dist = paddle.nn.layer.distance.PairwiseDistance( p=p, epsilon=epsilon, keepdim=keepdim) distance = dist(x, y) diff --git a/python/paddle/fluid/tests/unittests/test_pixel_shuffle.py b/python/paddle/fluid/tests/unittests/test_pixel_shuffle.py index cf93f39ab8..f75d6e9df5 100644 --- a/python/paddle/fluid/tests/unittests/test_pixel_shuffle.py +++ b/python/paddle/fluid/tests/unittests/test_pixel_shuffle.py @@ -97,8 +97,8 @@ class TestPixelShuffleAPI(unittest.TestCase): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x_1 = paddle.data(name="x", shape=[2, 9, 4, 4], dtype="float64") - x_2 = paddle.data(name="x2", shape=[2, 4, 4, 9], dtype="float64") + x_1 = paddle.fluid.data(name="x", shape=[2, 9, 4, 4], dtype="float64") + x_2 = paddle.fluid.data(name="x2", shape=[2, 4, 4, 9], dtype="float64") out_1 = F.pixel_shuffle(x_1, 3) out_2 = F.pixel_shuffle(x_2, 3, "NHWC") @@ -123,8 +123,8 @@ class TestPixelShuffleAPI(unittest.TestCase): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x_1 = paddle.data(name="x", shape=[2, 9, 4, 4], dtype="float64") - x_2 = paddle.data(name="x2", shape=[2, 4, 4, 9], dtype="float64") + x_1 = paddle.fluid.data(name="x", shape=[2, 9, 4, 4], dtype="float64") + x_2 = paddle.fluid.data(name="x2", shape=[2, 4, 4, 9], dtype="float64") # init instance ps_1 = paddle.nn.PixelShuffle(3) ps_2 = paddle.nn.PixelShuffle(3, "NHWC") diff --git a/python/paddle/fluid/tests/unittests/test_prelu_op.py b/python/paddle/fluid/tests/unittests/test_prelu_op.py index 16388ff8f5..f33b375029 100644 --- a/python/paddle/fluid/tests/unittests/test_prelu_op.py +++ b/python/paddle/fluid/tests/unittests/test_prelu_op.py @@ -49,8 +49,8 @@ class TestFunctionalPReluAPI(unittest.TestCase): def static_check(self, weight_np): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', self.x_np.shape, 'float32') - weight = paddle.data('Alpha', weight_np.shape, 'float32') + x = paddle.fluid.data('X', self.x_np.shape, 'float32') + weight = paddle.fluid.data('Alpha', weight_np.shape, 'float32') out = F.prelu(x, weight) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x_np, @@ -78,15 +78,15 @@ class TestFunctionalPReluAPI(unittest.TestCase): def test_error(self): with paddle.static.program_guard(paddle.static.Program()): - weight_fp32 = paddle.data( + weight_fp32 = paddle.fluid.data( name='weight_fp32', shape=[1], dtype='float32') # The input type must be Variable. self.assertRaises(TypeError, F.prelu, x=1, weight=weight_fp32) # The input dtype must be float16, float32, float64. - x_int32 = paddle.data(name='x_int32', shape=[2, 3], dtype='int32') + x_int32 = paddle.fluid.data(name='x_int32', shape=[2, 3], dtype='int32') self.assertRaises(TypeError, F.prelu, x=x_int32, weight=weight_fp32) # support the input dtype is float16 - x_fp16 = paddle.data(name='x_fp16', shape=[2, 3], dtype='float16') + x_fp16 = paddle.fluid.data(name='x_fp16', shape=[2, 3], dtype='float16') F.prelu(x=x_fp16, weight=weight_fp32) @@ -100,7 +100,7 @@ class TestNNPReluAPI(unittest.TestCase): startup_program = paddle.static.Program() train_program = paddle.static.Program() with paddle.static.program_guard(train_program, startup_program): - x = paddle.data(name='X', shape=self.x_np.shape, dtype='float32') + x = paddle.fluid.data(name='X', shape=self.x_np.shape, dtype='float32') m = paddle.nn.PReLU() out = m(x) exe = paddle.static.Executor(self.place) diff --git a/python/paddle/fluid/tests/unittests/test_prod_op.py b/python/paddle/fluid/tests/unittests/test_prod_op.py index 1586839072..15fd79542d 100644 --- a/python/paddle/fluid/tests/unittests/test_prod_op.py +++ b/python/paddle/fluid/tests/unittests/test_prod_op.py @@ -55,7 +55,7 @@ class TestProdOp(unittest.TestCase): self.assertTrue(np.allclose(dy_result.numpy(), expected_result)) def run_static(self, use_gpu=False): - input = paddle.data(name='input', shape=[10, 10, 5], dtype='float32') + input = paddle.fluid.data(name='input', shape=[10, 10, 5], dtype='float32') result0 = paddle.prod(input) result1 = paddle.prod(input, axis=1) result2 = paddle.prod(input, axis=-1) @@ -113,8 +113,8 @@ class TestProdOpError(unittest.TestCase): def test_error(self): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - x = paddle.data(name='x', shape=[2, 2, 4], dtype='float32') - bool_x = paddle.data(name='bool_x', shape=[2, 2, 4], dtype='bool') + x = paddle.fluid.data(name='x', shape=[2, 2, 4], dtype='float32') + bool_x = paddle.fluid.data(name='bool_x', shape=[2, 2, 4], dtype='bool') # The argument x shoule be a Tensor self.assertRaises(TypeError, paddle.prod, [1]) diff --git a/python/paddle/fluid/tests/unittests/test_randint_op.py b/python/paddle/fluid/tests/unittests/test_randint_op.py index 7880b48cd7..82bfb88d54 100644 --- a/python/paddle/fluid/tests/unittests/test_randint_op.py +++ b/python/paddle/fluid/tests/unittests/test_randint_op.py @@ -125,8 +125,8 @@ class TestRandintAPI(unittest.TestCase): out3 = paddle.randint( low=-100, high=100, shape=(32, 32, 3), dtype='int64') # shape is a tensorlist and dtype is 'float32' - dim_1 = paddle.fill_constant([1], "int64", 32) - dim_2 = paddle.fill_constant([1], "int32", 50) + dim_1 = paddle.fluid.layers.fill_constant([1], "int64", 32) + dim_2 = paddle.fluid.layers.fill_constant([1], "int32", 50) out4 = paddle.randint( low=-100, high=100, shape=[dim_1, 5, dim_2], dtype='int32') # shape is a tensor and dtype is 'float64' diff --git a/python/paddle/fluid/tests/unittests/test_randn_op.py b/python/paddle/fluid/tests/unittests/test_randn_op.py index 4ddd98a8a7..6d33b468ee 100644 --- a/python/paddle/fluid/tests/unittests/test_randn_op.py +++ b/python/paddle/fluid/tests/unittests/test_randn_op.py @@ -30,8 +30,8 @@ class TestRandnOp(unittest.TestCase): x1 = paddle.randn(shape, 'float32') x2 = paddle.randn(shape, 'float64') - dim_1 = paddle.fill_constant([1], "int64", 20) - dim_2 = paddle.fill_constant([1], "int32", 50) + dim_1 = paddle.fluid.layers.fill_constant([1], "int64", 20) + dim_2 = paddle.fluid.layers.fill_constant([1], "int32", 50) x3 = paddle.randn([dim_1, dim_2, 784]) var_shape = paddle.static.data('X', [2], 'int32') @@ -59,8 +59,8 @@ class TestRandnOpForDygraph(unittest.TestCase): x1 = paddle.randn(shape, 'float32') x2 = paddle.randn(shape, 'float64') - dim_1 = paddle.fill_constant([1], "int64", 20) - dim_2 = paddle.fill_constant([1], "int32", 50) + dim_1 = paddle.fluid.layers.fill_constant([1], "int64", 20) + dim_2 = paddle.fluid.layers.fill_constant([1], "int32", 50) x3 = paddle.randn(shape=[dim_1, dim_2, 784]) var_shape = paddle.to_tensor(np.array(shape)) diff --git a/python/paddle/fluid/tests/unittests/test_reshape_op.py b/python/paddle/fluid/tests/unittests/test_reshape_op.py index 275f9d21f9..295891605c 100644 --- a/python/paddle/fluid/tests/unittests/test_reshape_op.py +++ b/python/paddle/fluid/tests/unittests/test_reshape_op.py @@ -229,8 +229,8 @@ class TestReshapeUint8Op(TestReshapeInt8Op): # Test python API class TestReshapeAPI(unittest.TestCase): def _set_paddle_api(self): - self.fill_constant = paddle.fill_constant - self.data = paddle.data + self.fill_constant = paddle.fluid.layers.fill_constant + self.data = paddle.fluid.data self.reshape = paddle.reshape self.to_tensor = paddle.to_tensor @@ -305,7 +305,7 @@ class TestReshapeAPI(unittest.TestCase): # Test Input Error class TestReshapeOpError(unittest.TestCase): def _set_paddle_api(self): - self.data = paddle.data + self.data = paddle.fluid.data self.reshape = paddle.reshape def _set_fluid_api(self): diff --git a/python/paddle/fluid/tests/unittests/test_retain_graph.py b/python/paddle/fluid/tests/unittests/test_retain_graph.py index 98c7e3800c..5829ba624e 100644 --- a/python/paddle/fluid/tests/unittests/test_retain_graph.py +++ b/python/paddle/fluid/tests/unittests/test_retain_graph.py @@ -73,7 +73,7 @@ class TestRetainGraph(unittest.TestCase): fake_AB = paddle.concat((real_data.detach(), interpolatesv), 1) disc_interpolates = netD(fake_AB) - outs = paddle.fill_constant(disc_interpolates.shape, + outs = paddle.fluid.layers.fill_constant(disc_interpolates.shape, disc_interpolates.dtype, 1.0) gradients = paddle.grad( outputs=disc_interpolates, @@ -85,7 +85,7 @@ class TestRetainGraph(unittest.TestCase): gradients = paddle.reshape(gradients[0], [real_data.shape[0], -1]) - gradient_penalty = paddle.reduce_mean((paddle.norm( + gradient_penalty = paddle.fluid.layers.reduce_mean((paddle.norm( gradients + 1e-16, 2, 1) - constant)** 2) * lambda_gp # added eps return gradient_penalty, gradients @@ -113,7 +113,7 @@ class TestRetainGraph(unittest.TestCase): fake_AB = paddle.concat((realA, fakeB), 1) G_pred_fake = d(fake_AB.detach()) - false_target = paddle.fill_constant(G_pred_fake.shape, 'float32', 0.0) + false_target = paddle.fluid.layers.fill_constant(G_pred_fake.shape, 'float32', 0.0) G_gradient_penalty, _ = self.cal_gradient_penalty( d, realA, fakeB, lambda_gp=10.0) @@ -125,7 +125,7 @@ class TestRetainGraph(unittest.TestCase): optim_g.clear_gradients() fake_AB = paddle.concat((realA, fakeB), 1) G_pred_fake = d(fake_AB) - true_target = paddle.fill_constant(G_pred_fake.shape, 'float32', 1.0) + true_target = paddle.fluid.layers.fill_constant(G_pred_fake.shape, 'float32', 1.0) loss_g = l1_criterion(fakeB, realB) + gan_criterion(G_pred_fake, true_target) diff --git a/python/paddle/fluid/tests/unittests/test_row_conv.py b/python/paddle/fluid/tests/unittests/test_row_conv.py index abec23c7f6..7b6068c32c 100644 --- a/python/paddle/fluid/tests/unittests/test_row_conv.py +++ b/python/paddle/fluid/tests/unittests/test_row_conv.py @@ -69,7 +69,7 @@ class RowConvTestCase(unittest.TestCase): x = fluid.data( "input", [-1, -1, self.num_channels], dtype=self.dtype) w = fluid.data("weight", self.weight_shape, dtype=self.dtype) - y = F.row_conv(x, w, act=self.act) + y = F.extension.row_conv(x, w, act=self.act) exe = fluid.Executor(place) exe.run(start) y_np, = exe.run(main, @@ -82,7 +82,7 @@ class RowConvTestCase(unittest.TestCase): with dg.guard(place): x_var = dg.to_variable(self.input) w_var = dg.to_variable(self.weight) - y_var = F.row_conv(x_var, w_var, act=self.act) + y_var = F.extension.row_conv(x_var, w_var, act=self.act) y_np = y_var.numpy() return y_np diff --git a/python/paddle/fluid/tests/unittests/test_selu_op.py b/python/paddle/fluid/tests/unittests/test_selu_op.py index b5a2e84a53..95ae1eecc6 100644 --- a/python/paddle/fluid/tests/unittests/test_selu_op.py +++ b/python/paddle/fluid/tests/unittests/test_selu_op.py @@ -93,7 +93,7 @@ class TestSeluAPI(unittest.TestCase): def test_static_api(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.selu(x, self.scale, self.alpha) selu = paddle.nn.SELU(self.scale, self.alpha) out2 = selu(x) @@ -128,15 +128,15 @@ class TestSeluAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.selu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32') + x_int32 = paddle.fluid.data(name='x_int32', shape=[12, 10], dtype='int32') self.assertRaises(TypeError, F.selu, x_int32) # The scale must be greater than 1.0 - x_fp32 = paddle.data(name='x_fp32', shape=[12, 10], dtype='float32') + x_fp32 = paddle.fluid.data(name='x_fp32', shape=[12, 10], dtype='float32') self.assertRaises(ValueError, F.selu, x_fp32, -1.0) # The alpha must be no less than 0 self.assertRaises(ValueError, F.selu, x_fp32, 1.6, -1.0) # support the input dtype is float16 - x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16') + x_fp16 = paddle.fluid.data(name='x_fp16', shape=[12, 10], dtype='float16') F.selu(x_fp16) diff --git a/python/paddle/fluid/tests/unittests/test_sigmoid_focal_loss.py b/python/paddle/fluid/tests/unittests/test_sigmoid_focal_loss.py index 71e119739e..85f9501e53 100644 --- a/python/paddle/fluid/tests/unittests/test_sigmoid_focal_loss.py +++ b/python/paddle/fluid/tests/unittests/test_sigmoid_focal_loss.py @@ -42,13 +42,13 @@ def test_static(place, prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - logit = paddle.data(name='logit', shape=logit_np.shape, dtype='float64') - label = paddle.data(name='label', shape=label_np.shape, dtype='float64') + logit = paddle.fluid.data(name='logit', shape=logit_np.shape, dtype='float64') + label = paddle.fluid.data(name='label', shape=label_np.shape, dtype='float64') feed_dict = {"logit": logit_np, "label": label_np} normalizer = None if normalizer_np is not None: - normalizer = paddle.data( + normalizer = paddle.fluid.data( name='normalizer', shape=normalizer_np.shape, dtype='float64') feed_dict["normalizer"] = normalizer_np diff --git a/python/paddle/fluid/tests/unittests/test_softmax_op.py b/python/paddle/fluid/tests/unittests/test_softmax_op.py index a37fad9cf0..71df2c4acc 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_op.py @@ -315,7 +315,7 @@ class TestSoftmaxAPI(unittest.TestCase): def test_static_check(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', self.x_np.shape, 'float32') + x = paddle.fluid.data('X', self.x_np.shape, 'float32') out1 = F.softmax(x) m = paddle.nn.Softmax() out2 = m(x) @@ -354,10 +354,10 @@ class TestSoftmaxAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.softmax, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.data(name='x_int32', shape=[2, 3], dtype='int32') + x_int32 = paddle.fluid.data(name='x_int32', shape=[2, 3], dtype='int32') self.assertRaises(TypeError, F.softmax, x_int32) # support the input dtype is float16 - x_fp16 = paddle.data(name='x_fp16', shape=[2, 3], dtype='float16') + x_fp16 = paddle.fluid.data(name='x_fp16', shape=[2, 3], dtype='float16') F.softmax(x_fp16) diff --git a/python/paddle/fluid/tests/unittests/test_std_layer.py b/python/paddle/fluid/tests/unittests/test_std_layer.py index e455151481..2196996aff 100644 --- a/python/paddle/fluid/tests/unittests/test_std_layer.py +++ b/python/paddle/fluid/tests/unittests/test_std_layer.py @@ -44,7 +44,7 @@ class TestStdAPI(unittest.TestCase): def static(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', self.shape, self.dtype) + x = paddle.fluid.data('X', self.shape, self.dtype) out = paddle.std(x, self.axis, self.unbiased, self.keepdim) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x}, fetch_list=[out]) @@ -111,7 +111,7 @@ class TestStdAPI_alias(unittest.TestCase): class TestStdError(unittest.TestCase): def test_error(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', [2, 3, 4], 'int32') + x = paddle.fluid.data('X', [2, 3, 4], 'int32') self.assertRaises(TypeError, paddle.std, x) diff --git a/python/paddle/fluid/tests/unittests/test_temporal_shift_op.py b/python/paddle/fluid/tests/unittests/test_temporal_shift_op.py index 1fbc0fc460..a102bcea99 100644 --- a/python/paddle/fluid/tests/unittests/test_temporal_shift_op.py +++ b/python/paddle/fluid/tests/unittests/test_temporal_shift_op.py @@ -81,7 +81,7 @@ class TestTemporalShift3(TestTemporalShift): class TestTemporalShiftAPI(unittest.TestCase): def test_api(self): input = paddle.randn([6, 4, 2, 2]) - out = paddle.nn.functional.temporal_shift( + out = paddle.fluid.layers.temporal_shift( x=input, seg_num=2, shift_ratio=0.2) diff --git a/python/paddle/fluid/tests/unittests/test_unique.py b/python/paddle/fluid/tests/unittests/test_unique.py index a2c60d870e..a4bef436e1 100644 --- a/python/paddle/fluid/tests/unittests/test_unique.py +++ b/python/paddle/fluid/tests/unittests/test_unique.py @@ -254,7 +254,7 @@ class TestUniqueAPI(unittest.TestCase): def test_static_graph(self): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - x = paddle.data(name='x', shape=[3, 2], dtype='float64') + x = paddle.fluid.data(name='x', shape=[3, 2], dtype='float64') unique, inverse, counts = paddle.unique( x, return_inverse=True, return_counts=True, axis=0) place = paddle.CPUPlace() @@ -274,13 +274,13 @@ class TestUniqueError(unittest.TestCase): def test_x_dtype(): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - x = paddle.data(name='x', shape=[10, 10], dtype='float16') + x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float16') result = paddle.unique(x) self.assertRaises(TypeError, test_x_dtype) def test_attr(self): - x = paddle.data(name='x', shape=[10, 10], dtype='float64') + x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') def test_return_index(): result = paddle.unique(x, return_index=0) diff --git a/python/paddle/fluid/tests/unittests/test_variance_layer.py b/python/paddle/fluid/tests/unittests/test_variance_layer.py index b5bb3cc978..13e3cf4df1 100644 --- a/python/paddle/fluid/tests/unittests/test_variance_layer.py +++ b/python/paddle/fluid/tests/unittests/test_variance_layer.py @@ -44,7 +44,7 @@ class TestVarAPI(unittest.TestCase): def static(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', self.shape, self.dtype) + x = paddle.fluid.data('X', self.shape, self.dtype) out = paddle.var(x, self.axis, self.unbiased, self.keepdim) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x}, fetch_list=[out]) @@ -111,7 +111,7 @@ class TestVarAPI_alias(unittest.TestCase): class TestVarError(unittest.TestCase): def test_error(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.data('X', [2, 3, 4], 'int32') + x = paddle.fluid.data('X', [2, 3, 4], 'int32') self.assertRaises(TypeError, paddle.var, x) diff --git a/python/paddle/fluid/tests/unittests/test_zeros_like_op.py b/python/paddle/fluid/tests/unittests/test_zeros_like_op.py index 2cea307280..6546d7b99f 100644 --- a/python/paddle/fluid/tests/unittests/test_zeros_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_zeros_like_op.py @@ -25,7 +25,7 @@ from paddle.fluid import core, Program, program_guard class TestZerosLikeAPIError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): - x = paddle.data('x', [3, 4]) + x = paddle.fluid.data('x', [3, 4]) self.assertRaises(TypeError, zeros_like, x, 'int8') @@ -35,7 +35,7 @@ class TestZerosLikeAPI(unittest.TestCase): startup_program = Program() train_program = Program() with program_guard(train_program, startup_program): - x = paddle.data('X', shape) + x = paddle.fluid.data('X', shape) # 'bool', 'float32', 'float64', 'int32', 'int64' out1 = zeros_like(x) diff --git a/python/paddle/framework/__init__.py b/python/paddle/framework/__init__.py index c3a5e151f3..e52d9da99c 100644 --- a/python/paddle/framework/__init__.py +++ b/python/paddle/framework/__init__.py @@ -14,7 +14,7 @@ # TODO: import framework api under this directory __all__ = [ - 'create_global_var', 'create_parameter', 'ParamAttr', 'Variable', + 'create_parameter', 'ParamAttr', 'CPUPlace', 'CUDAPlace', 'CUDAPinnedPlace', 'get_default_dtype', 'set_default_dtype' ] @@ -29,10 +29,9 @@ from .random import manual_seed from .framework import get_default_dtype from .framework import set_default_dtype -from ..fluid.framework import Variable #DEFINE_ALIAS from ..fluid.framework import ComplexVariable #DEFINE_ALIAS from ..fluid.param_attr import ParamAttr #DEFINE_ALIAS -from ..fluid.layers.tensor import create_global_var #DEFINE_ALIAS +# from ..fluid.layers.tensor import create_global_var #DEFINE_ALIAS from ..fluid.layers.tensor import create_parameter #DEFINE_ALIAS from ..fluid.core import CPUPlace #DEFINE_ALIAS from ..fluid.core import CUDAPlace #DEFINE_ALIAS diff --git a/python/paddle/nn/__init__.py b/python/paddle/nn/__init__.py index b1f3737805..dd435f12e3 100644 --- a/python/paddle/nn/__init__.py +++ b/python/paddle/nn/__init__.py @@ -37,10 +37,10 @@ from .clip import ClipGradByValue #DEFINE_ALIAS # from .clip import set_gradient_clip #DEFINE_ALIAS from .clip import clip #DEFINE_ALIAS from .clip import clip_by_norm #DEFINE_ALIAS -from .control_flow import cond #DEFINE_ALIAS +# from .control_flow import cond #DEFINE_ALIAS # from .control_flow import DynamicRNN #DEFINE_ALIAS # from .control_flow import StaticRNN #DEFINE_ALIAS -from .control_flow import while_loop #DEFINE_ALIAS +# from .control_flow import while_loop #DEFINE_ALIAS # from .control_flow import rnn #DEFINE_ALIAS # from .decode import BeamSearchDecoder #DEFINE_ALIAS # from .decode import Decoder #DEFINE_ALIAS @@ -49,7 +49,7 @@ from .control_flow import while_loop #DEFINE_ALIAS # from .decode import crf_decoding #DEFINE_ALIAS # from .decode import ctc_greedy_decoder #DEFINE_ALIAS # from .decode import dynamic_decode #DEFINE_ALIAS -from .decode import gather_tree #DEFINE_ALIAS +# from .decode import gather_tree #DEFINE_ALIAS # from .input import Input #DEFINE_ALIAS from .layer.activation import ELU #DEFINE_ALIAS from .layer.activation import GELU #DEFINE_ALIAS @@ -74,9 +74,6 @@ from .layer.activation import Tanhshrink #DEFINE_ALIAS from .layer.activation import ThresholdedReLU #DEFINE_ALIAS from .layer.activation import LogSoftmax #DEFINE_ALIAS from .layer.activation import Maxout #DEFINE_ALIAS -from .layer.common import BilinearTensorProduct #DEFINE_ALIAS -from .layer.common import Pool2D #DEFINE_ALIAS -from .layer.common import Pad2D #DEFINE_ALIAS from .layer.common import ReflectionPad1d #DEFINE_ALIAS from .layer.common import ReplicationPad1d #DEFINE_ALIAS from .layer.common import ConstantPad1d #DEFINE_ALIAS diff --git a/python/paddle/nn/control_flow.py b/python/paddle/nn/control_flow.py deleted file mode 100644 index a78b65c3c6..0000000000 --- a/python/paddle/nn/control_flow.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# TODO: define the control flow api -from ..fluid.layers import cond #DEFINE_ALIAS -from ..fluid.layers import while_loop #DEFINE_ALIAS - -__all__ = [ - 'cond', - # 'DynamicRNN', - # 'StaticRNN', - 'while_loop', - # 'rnn' -] diff --git a/python/paddle/nn/decode.py b/python/paddle/nn/decode.py deleted file mode 100644 index 214744217e..0000000000 --- a/python/paddle/nn/decode.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# TODO: define api to implement decoding algorithm -# from ..fluid.layers import beam_search #DEFINE_ALIAS -# from ..fluid.layers import beam_search_decode #DEFINE_ALIAS - -from ..fluid.layers import gather_tree #DEFINE_ALIAS - -__all__ = [ - # 'BeamSearchDecoder', - # 'Decoder', - # 'beam_search', - # 'beam_search_decode', - # 'crf_decoding', - # 'ctc_greedy_decoder', - # 'dynamic_decode', - 'gather_tree' -] diff --git a/python/paddle/nn/functional/__init__.py b/python/paddle/nn/functional/__init__.py index b95693f386..574721bd2b 100644 --- a/python/paddle/nn/functional/__init__.py +++ b/python/paddle/nn/functional/__init__.py @@ -30,7 +30,7 @@ __all__ += pooling.__all__ from . import loss __all__ += loss.__all__ from .activation import elu #DEFINE_ALIAS -from .activation import erf #DEFINE_ALIAS +# from .activation import erf #DEFINE_ALIAS from .activation import gelu #DEFINE_ALIAS from .activation import hardshrink #DEFINE_ALIAS from .activation import hardtanh #DEFINE_ALIAS @@ -44,7 +44,7 @@ from .activation import relu #DEFINE_ALIAS from .activation import relu6 #DEFINE_ALIAS from .activation import selu #DEFINE_ALIAS from .activation import sigmoid #DEFINE_ALIAS -from .activation import soft_relu #DEFINE_ALIAS +# from .activation import soft_relu #DEFINE_ALIAS from .activation import softmax #DEFINE_ALIAS from .activation import softplus #DEFINE_ALIAS from .activation import softshrink #DEFINE_ALIAS @@ -61,10 +61,10 @@ from .common import alpha_dropout #DEFINE_ALIAS # from .common import embedding #DEFINE_ALIAS # from .common import fc #DEFINE_ALIAS from .common import label_smooth -from .common import one_hot #DEFINE_ALIAS +# from .common import one_hot #DEFINE_ALIAS from .common import pad #DEFINE_ALIAS -from .common import pad_constant_like #DEFINE_ALIAS -from .common import pad2d #DEFINE_ALIAS +# from .common import pad_constant_like #DEFINE_ALIAS +# from .common import pad2d #DEFINE_ALIAS from .common import cosine_similarity #DEFINE_ALIAS from .common import unfold #DEFINE_ALIAS # from .common import bilinear_tensor_product #DEFINE_ALIAS @@ -79,21 +79,21 @@ from .conv import conv2d #DEFINE_ALIAS from .conv import conv_transpose2d #DEFINE_ALIAS from .conv import conv3d #DEFINE_ALIAS from .conv import conv_transpose3d #DEFINE_ALIAS -from .extension import add_position_encoding #DEFINE_ALIAS +# from .extension import add_position_encoding #DEFINE_ALIAS # from .extension import autoincreased_step_counter #DEFINE_ALIAS -from .extension import continuous_value_model #DEFINE_ALIAS -from .extension import filter_by_instag #DEFINE_ALIAS +# from .extension import continuous_value_model #DEFINE_ALIAS +# from .extension import filter_by_instag #DEFINE_ALIAS # from .extension import linear_chain_crf #DEFINE_ALIAS # from .extension import merge_selected_rows #DEFINE_ALIAS -from .extension import multiclass_nms #DEFINE_ALIAS -from .extension import polygon_box_transform #DEFINE_ALIAS -from .extension import random_crop #DEFINE_ALIAS -from .extension import row_conv #DEFINE_ALIAS -from .extension import rpn_target_assign #DEFINE_ALIAS -from .extension import similarity_focus #DEFINE_ALIAS -from .extension import target_assign #DEFINE_ALIAS -from .extension import temporal_shift #DEFINE_ALIAS -from .extension import warpctc #DEFINE_ALIAS +# from .extension import multiclass_nms #DEFINE_ALIAS +# from .extension import polygon_box_transform #DEFINE_ALIAS +# from .extension import random_crop #DEFINE_ALIAS +# from .extension import row_conv #DEFINE_ALIAS +# from .extension import rpn_target_assign #DEFINE_ALIAS +# from .extension import similarity_focus #DEFINE_ALIAS +# from .extension import target_assign #DEFINE_ALIAS +# from .extension import temporal_shift #DEFINE_ALIAS +# from .extension import warpctc #DEFINE_ALIAS from .extension import diag_embed #DEFINE_ALIAS # from .lod import sequence_concat #DEFINE_ALIAS # from .lod import sequence_conv #DEFINE_ALIAS @@ -115,7 +115,7 @@ from .extension import diag_embed #DEFINE_ALIAS # from .lod import array_read #DEFINE_ALIAS # from .lod import array_write #DEFINE_ALIAS # from .lod import create_array #DEFINE_ALIAS -from .lod import hash #DEFINE_ALIAS +# from .lod import hash #DEFINE_ALIAS # from .lod import im2sequence #DEFINE_ALIAS # from .lod import lod_append #DEFINE_ALIAS # from .lod import lod_reset #DEFINE_ALIAS @@ -126,11 +126,10 @@ from .lod import hash #DEFINE_ALIAS # from .lod import dynamic_lstmp #DEFINE_ALIAS from .loss import binary_cross_entropy #DEFINE_ALIAS from .loss import binary_cross_entropy_with_logits #DEFINE_ALIAS -from .loss import bpr_loss #DEFINE_ALIAS -from .loss import center_loss #DEFINE_ALIAS +# from .loss import bpr_loss #DEFINE_ALIAS +# from .loss import center_loss #DEFINE_ALIAS from .loss import cross_entropy #DEFINE_ALIAS from .loss import dice_loss #DEFINE_ALIAS -from .loss import edit_distance #DEFINE_ALIAS from .loss import hsigmoid_loss #DEFINE_ALIAS from .loss import iou_similarity #DEFINE_ALIAS from .loss import kl_div #DEFINE_ALIAS @@ -141,15 +140,13 @@ from .loss import mse_loss #DEFINE_ALIAS from .loss import nll_loss #DEFINE_ALIAS # from .loss import nce #DEFINE_ALIAS from .loss import npair_loss #DEFINE_ALIAS -from .loss import rank_loss #DEFINE_ALIAS -from .loss import sampled_softmax_with_cross_entropy #DEFINE_ALIAS from .loss import sigmoid_focal_loss #DEFINE_ALIAS -from .loss import smooth_l1 #DEFINE_ALIAS +# from .loss import smooth_l1 #DEFINE_ALIAS from .loss import smooth_l1_loss #DEFINE_ALIAS from .loss import softmax_with_cross_entropy #DEFINE_ALIAS from .loss import square_error_cost #DEFINE_ALIAS from .loss import ssd_loss #DEFINE_ALIAS -from .loss import teacher_student_sigmoid_loss #DEFINE_ALIAS +# from .loss import teacher_student_sigmoid_loss #DEFINE_ALIAS from .loss import ctc_loss #DEFINE_ALIAS # from .norm import data_norm #DEFINE_ALIAS # from .norm import group_norm #DEFINE_ALIAS @@ -159,8 +156,8 @@ from .norm import layer_norm #DEFINE_ALIAS from .norm import local_response_norm #DEFINE_ALIAS from .norm import normalize #DEFINE_ALIAS # from .norm import spectral_norm #DEFINE_ALIAS -from .pooling import pool2d #DEFINE_ALIAS -from .pooling import pool3d #DEFINE_ALIAS +# from .pooling import pool2d #DEFINE_ALIAS +# from .pooling import pool3d #DEFINE_ALIAS from .pooling import avg_pool1d #DEFINE_ALIAS from .pooling import avg_pool2d #DEFINE_ALIAS from .pooling import avg_pool3d #DEFINE_ALIAS @@ -175,43 +172,47 @@ from .pooling import adaptive_avg_pool1d #DEFINE_ALIAS from .pooling import adaptive_avg_pool2d #DEFINE_ALIAS from .pooling import adaptive_avg_pool3d #DEFINE_ALIAS -from .rnn import rnn #DEFINE_ALIAS -from .rnn import birnn #DEFINE_ALIAS +# from .rnn import rnn #DEFINE_ALIAS +# from .rnn import birnn #DEFINE_ALIAS # from .rnn import gru_unit #DEFINE_ALIAS # from .rnn import lstm #DEFINE_ALIAS # from .rnn import lstm_unit #DEFINE_ALIAS -from .vision import affine_channel #DEFINE_ALIAS +# from .vision import affine_channel #DEFINE_ALIAS from .vision import affine_grid #DEFINE_ALIAS -from .vision import anchor_generator #DEFINE_ALIAS -from .vision import bipartite_match #DEFINE_ALIAS -from .vision import box_clip #DEFINE_ALIAS -from .vision import box_coder #DEFINE_ALIAS -from .vision import box_decoder_and_assign #DEFINE_ALIAS -from .vision import collect_fpn_proposals #DEFINE_ALIAS +# from .vision import anchor_generator #DEFINE_ALIAS +# from .vision import bipartite_match #DEFINE_ALIAS +# from .vision import box_clip #DEFINE_ALIAS +# from .vision import box_coder #DEFINE_ALIAS +# from .vision import box_decoder_and_assign #DEFINE_ALIAS +# from .vision import collect_fpn_proposals #DEFINE_ALIAS # from .vision import deformable_conv #DEFINE_ALIAS -from .vision import deformable_roi_pooling #DEFINE_ALIAS -from .vision import density_prior_box #DEFINE_ALIAS -from .vision import detection_output #DEFINE_ALIAS -from .vision import distribute_fpn_proposals #DEFINE_ALIAS -from .vision import fsp_matrix #DEFINE_ALIAS -from .vision import generate_mask_labels #DEFINE_ALIAS -from .vision import generate_proposal_labels #DEFINE_ALIAS -from .vision import generate_proposals #DEFINE_ALIAS +# from .vision import deformable_roi_pooling #DEFINE_ALIAS +# from .vision import density_prior_box #DEFINE_ALIAS +# from .vision import detection_output #DEFINE_ALIAS +# from .vision import distribute_fpn_proposals #DEFINE_ALIAS +# from .vision import fsp_matrix #DEFINE_ALIAS +# from .vision import generate_mask_labels #DEFINE_ALIAS +# from .vision import generate_proposal_labels #DEFINE_ALIAS +# from .vision import generate_proposals #DEFINE_ALIAS from .vision import grid_sample #DEFINE_ALIAS -from .vision import image_resize_short #DEFINE_ALIAS +# from .vision import image_resize #DEFINE_ALIAS +# from .vision import image_resize_short #DEFINE_ALIAS # from .vision import multi_box_head #DEFINE_ALIAS from .vision import pixel_shuffle #DEFINE_ALIAS -from .vision import prior_box #DEFINE_ALIAS -from .vision import prroi_pool #DEFINE_ALIAS -from .vision import psroi_pool #DEFINE_ALIAS -from .vision import retinanet_detection_output #DEFINE_ALIAS -from .vision import retinanet_target_assign #DEFINE_ALIAS -from .vision import roi_align #DEFINE_ALIAS -from .vision import roi_perspective_transform #DEFINE_ALIAS -from .vision import roi_pool #DEFINE_ALIAS -from .vision import shuffle_channel #DEFINE_ALIAS -from .vision import space_to_depth #DEFINE_ALIAS -from .vision import yolo_box #DEFINE_ALIAS -from .vision import yolov3_loss #DEFINE_ALIAS +# from .vision import prior_box #DEFINE_ALIAS +# from .vision import prroi_pool #DEFINE_ALIAS +# from .vision import psroi_pool #DEFINE_ALIAS +# from .vision import resize_bilinear #DEFINE_ALIAS +# from .vision import resize_nearest #DEFINE_ALIAS +# from .vision import resize_trilinear #DEFINE_ALIAS +# from .vision import retinanet_detection_output #DEFINE_ALIAS +# from .vision import retinanet_target_assign #DEFINE_ALIAS +# from .vision import roi_align #DEFINE_ALIAS +# from .vision import roi_perspective_transform #DEFINE_ALIAS +# from .vision import roi_pool #DEFINE_ALIAS +# from .vision import shuffle_channel #DEFINE_ALIAS +# from .vision import space_to_depth #DEFINE_ALIAS +# from .vision import yolo_box #DEFINE_ALIAS +# from .vision import yolov3_loss #DEFINE_ALIAS from .input import one_hot #DEFINE_ALIAS from .input import embedding #DEFINE_ALIAS diff --git a/python/paddle/nn/functional/activation.py b/python/paddle/nn/functional/activation.py index 33ecd29162..0f79aa012c 100644 --- a/python/paddle/nn/functional/activation.py +++ b/python/paddle/nn/functional/activation.py @@ -13,14 +13,18 @@ # limitations under the License. # TODO: define activation functions of neural network -from ...fluid.layers import erf #DEFINE_ALIAS -from ...fluid.layers import soft_relu #DEFINE_ALIAS +from ...fluid.layers import brelu #DEFINE_ALIAS +# from ...fluid.layers import erf #DEFINE_ALIAS +from ...fluid.layers import hard_sigmoid #DEFINE_ALIAS +from ...fluid.layers import hard_swish #DEFINE_ALIAS +from ...fluid.layers import maxout #DEFINE_ALIAS +# from ...fluid.layers import soft_relu #DEFINE_ALIAS +from ...fluid.layers import swish #DEFINE_ALIAS from ...fluid.layers import sigmoid #DEFINE_ALIAS from ...tensor.math import tanh #DEFINE_ALIAS __all__ = [ 'elu', - 'erf', 'gelu', 'hardshrink', 'hardtanh', @@ -33,7 +37,6 @@ __all__ = [ 'relu', 'relu6', 'selu', - 'soft_relu', 'softmax', 'softplus', 'softshrink', diff --git a/python/paddle/nn/functional/common.py b/python/paddle/nn/functional/common.py index 81c38c0be6..0b18dec943 100644 --- a/python/paddle/nn/functional/common.py +++ b/python/paddle/nn/functional/common.py @@ -20,13 +20,12 @@ from paddle.fluid.layers.tensor import Variable, fill_constant, zeros, concat from ...fluid.layers import core from ...fluid import dygraph_utils # TODO: define the common functions to build a neural network -from ...fluid import one_hot #DEFINE_ALIAS -from ...fluid.layers import pad2d #DEFINE_ALIAS +# from ...fluid import one_hot #DEFINE_ALIAS +# from ...fluid.layers import pad2d #DEFINE_ALIAS from ...fluid.layers import unfold #DEFINE_ALIAS from ...fluid.layers import assign #DEFINE_ALIAS from ...fluid.layers import squeeze #DEFINE_ALIAS from ...fluid.layers import unsqueeze #DEFINE_ALIAS -from ...fluid.layers import elementwise_mul #DEFINE_ALIAS from ...tensor import clip from ...tensor import sum from ...tensor import sqrt @@ -36,7 +35,7 @@ from ...fluid.data_feeder import check_variable_and_dtype, check_dtype from ...fluid.framework import Variable, in_dygraph_mode, _varbase_creator #from ...fluid.layers import fc #DEFINE_ALIAS -from ...fluid.layers import pad_constant_like #DEFINE_ALIAS +# from ...fluid.layers import pad_constant_like #DEFINE_ALIAS from ...fluid.framework import in_dygraph_mode from ...fluid import core, dygraph_utils from ...fluid import core, layers @@ -51,10 +50,7 @@ __all__ = [ # 'fc', 'label_smooth', 'linear', - 'one_hot', 'pad', - 'pad_constant_like', - 'pad2d', 'unfold', # 'bilinear_tensor_product', 'assign', @@ -1395,9 +1391,9 @@ def cosine_similarity(x1, x2, axis=1, eps=1e-8): # [0.99806249 0.9817672 0.94987036] """ - w12 = sum(elementwise_mul(x1, x2), axis=axis) - w1 = sum(elementwise_mul(x1, x1), axis=axis) - w2 = sum(elementwise_mul(x2, x2), axis=axis) + w12 = sum(paddle.multiply(x1, x2), axis=axis) + w1 = sum(paddle.multiply(x1, x1), axis=axis) + w2 = sum(paddle.multiply(x2, x2), axis=axis) n12 = sqrt(clip(w1 * w2, min=eps * eps)) cos_sim = w12 / n12 return cos_sim diff --git a/python/paddle/nn/functional/extension.py b/python/paddle/nn/functional/extension.py index 87210b3832..4ec0f8407f 100644 --- a/python/paddle/nn/functional/extension.py +++ b/python/paddle/nn/functional/extension.py @@ -13,36 +13,10 @@ # limitations under the License. # TODO: define the extention functions -from ...fluid.layers import add_position_encoding #DEFINE_ALIAS -from ...fluid.layers import multiclass_nms #DEFINE_ALIAS -from ...fluid.layers import target_assign #DEFINE_ALIAS -from ...fluid.layers import temporal_shift #DEFINE_ALIAS - -from ...fluid.layers import continuous_value_model #DEFINE_ALIAS -from ...fluid.layers import filter_by_instag #DEFINE_ALIAS -from ...fluid.layers import polygon_box_transform #DEFINE_ALIAS -from ...fluid.layers import random_crop #DEFINE_ALIAS -from ...fluid.layers import rpn_target_assign #DEFINE_ALIAS -from ...fluid.layers import similarity_focus #DEFINE_ALIAS -from ...fluid.layers import warpctc #DEFINE_ALIAS __all__ = [ - 'add_position_encoding', - # 'autoincreased_step_counter', - 'continuous_value_model', - 'filter_by_instag', - # 'linear_chain_crf', - # 'merge_selected_rows', - 'multiclass_nms', - 'polygon_box_transform', - 'random_crop', - 'row_conv', - 'rpn_target_assign', - 'similarity_focus', - 'target_assign', - 'temporal_shift', - 'warpctc', - 'diag_embed' + 'diag_embed', + 'row_conv' ] import numpy as np @@ -176,8 +150,6 @@ def diag_embed(input, offset=0, dim1=-2, dim2=-1): @templatedoc() def row_conv(input, weight, act=None): """ - :alias_main: paddle.nn.functional.row_conv - :alias: paddle.nn.functional.row_conv,paddle.nn.functional.extension.row_conv ${comment} @@ -217,7 +189,7 @@ def row_conv(input, weight, act=None): with dg.guard(place): x_var = dg.to_variable(x) w_var = dg.to_variable(weight) - y_var = F.row_conv(x_var, w_var) + y_var = F.extension.row_conv(x_var, w_var) y_np = y_var.numpy() print(y_np.shape) diff --git a/python/paddle/nn/functional/input.py b/python/paddle/nn/functional/input.py index 0794b95c80..b22a3bd1be 100644 --- a/python/paddle/nn/functional/input.py +++ b/python/paddle/nn/functional/input.py @@ -74,7 +74,7 @@ def one_hot(x, num_classes, name=None): import paddle # Correspond to the first example above, where label.shape is 4 and one_hot_label.shape is [4, 4]. - label = paddle.data(name="label", shape=[4, 1], dtype="int64") + label = paddle.fluid.data(name="label", shape=[4, 1], dtype="int64") # label.shape = [4] # label.data = [1, 1, 3, 0] one_hot_label = paddle.nn.functional.one_hot(x=label, num_classes=4) @@ -183,7 +183,7 @@ def embedding(x, weight, padding_idx=None, sparse=False, name=None): weight = prog.global_block().create_parameter( (128, 100), dtype="float32", default_initializer=Constant(1.0)) - label = paddle.data( + label = paddle.fluid.data( name="label", shape=[4], append_batch_size=False, diff --git a/python/paddle/nn/functional/lod.py b/python/paddle/nn/functional/lod.py deleted file mode 100644 index 266e3f9c71..0000000000 --- a/python/paddle/nn/functional/lod.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# TODO: define functions which accept only LoDTensor as input -from ...fluid.layers import hash #DEFINE_ALIAS - -__all__ = [ - # 'sequence_concat', - # 'sequence_conv', - # 'sequence_enumerate', - # 'sequence_expand_as', - # 'sequence_expand', - # 'sequence_first_step', - # 'sequence_last_step', - # 'sequence_mask', - # 'sequence_pad', - # 'sequence_pool', - # 'sequence_reshape', - # 'sequence_reverse', - # 'sequence_scatter', - # 'sequence_slice', - # 'sequence_softmax', - # 'sequence_unpad', - # 'array_length', - # 'array_read', - # 'array_write', - # 'create_array', - 'hash', - # 'im2sequence', - # 'lod_append', - # 'lod_reset', - # 'reorder_lod_tensor_by_rank', - # 'tensor_array_to_tensor', - # 'dynamic_gru', - # 'dynamic_lstm', - # 'dynamic_lstmp' -] diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index 4102ec3fa0..2ca2015b72 100644 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -23,19 +23,14 @@ import paddle import paddle.fluid as fluid from ...fluid.framework import core, in_dygraph_mode from ...fluid.layers.nn import _elementwise_op_in_dygraph -from ...fluid.layers import bpr_loss #DEFINE_ALIAS -from ...fluid.layers import center_loss #DEFINE_ALIAS from ...fluid.layers import dice_loss #DEFINE_ALIAS from ...fluid.layers import iou_similarity #DEFINE_ALIAS from ...fluid.layers import log_loss #DEFINE_ALIAS from ...fluid.layers import npair_loss #DEFINE_ALIAS -from ...fluid.layers import rank_loss #DEFINE_ALIAS from ...fluid.layers import reshape -from ...fluid.layers import smooth_l1 #DEFINE_ALIAS from ...fluid.layers import softmax_with_cross_entropy #DEFINE_ALIAS from ...fluid.layers import square_error_cost #DEFINE_ALIAS from ...fluid.layers import ssd_loss #DEFINE_ALIAS -from ...fluid.layers import teacher_student_sigmoid_loss #DEFINE_ALIAS from ...fluid.layers import edit_distance #DEFINE_ALIAS from ...fluid.layers import sampled_softmax_with_cross_entropy #DEFINE_ALIAS @@ -48,11 +43,8 @@ from ...fluid.framework import Variable __all__ = [ 'binary_cross_entropy', 'binary_cross_entropy_with_logits', - 'bpr_loss', - 'center_loss', 'cross_entropy', 'dice_loss', - 'edit_distance', 'hsigmoid_loss', 'iou_similarity', 'kl_div', @@ -63,15 +55,11 @@ __all__ = [ # 'nce', 'nll_loss', 'npair_loss', - 'rank_loss', - 'sampled_softmax_with_cross_entropy', 'sigmoid_focal_loss', - 'smooth_l1', 'smooth_l1_loss', 'softmax_with_cross_entropy', 'square_error_cost', 'ssd_loss', - 'teacher_student_sigmoid_loss', 'ctc_loss', ] @@ -179,7 +167,7 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean', outputs={'Out': [out]}) if weight is not None: - if isinstance(weight, paddle.framework.Variable): + if isinstance(weight, paddle.static.Variable): weight_name = name if reduction is 'none' else None out = paddle.multiply(out, weight, axis=-1, name=weight_name) else: @@ -317,13 +305,13 @@ def binary_cross_entropy_with_logits(logit, out = paddle.fluid.layers.sigmoid_cross_entropy_with_logits( logit, label, name=sigmoid_name) - one = paddle.fill_constant(shape=[1], value=1.0, dtype=logit.dtype) + one = paddle.fluid.layers.fill_constant(shape=[1], value=1.0, dtype=logit.dtype) if pos_weight is not None: fluid.data_feeder.check_variable_and_dtype( pos_weight, 'pos_weight', ['float32', 'float64'], 'binary_cross_entropy_with_logits') log_weight = paddle.add( - paddle.multiply(label, paddle.elementwise_sub(pos_weight, one)), + paddle.multiply(label, paddle.fluid.layers.elementwise_sub(pos_weight, one)), one) pos_weight_name = name if reduction == 'none' and weight is None else None out = paddle.multiply(out, log_weight, name=pos_weight_name) @@ -625,12 +613,12 @@ def margin_ranking_loss(input, fluid.data_feeder.check_variable_and_dtype( label, 'label', ['float32', 'float64'], 'margin_rank_loss') - out = paddle.elementwise_sub(other, input) + out = paddle.fluid.layers.elementwise_sub(other, input) out = paddle.multiply(out, label) if margin != 0.0: margin_var = out.block.create_var(dtype=out.dtype) - paddle.fill_constant([1], out.dtype, margin, out=margin_var) + paddle.fluid.layers.fill_constant([1], out.dtype, margin, out=margin_var) out = paddle.add(out, margin_var) result_out = helper.create_variable_for_type_inference(input.dtype) @@ -735,13 +723,13 @@ def l1_loss(input, label, reduction='mean', name=None): label, 'label', ['float32', 'float64', 'int32', 'int64'], 'l1_loss') if reduction == 'sum': - unreduced = paddle.elementwise_sub(input, label, act='abs') + unreduced = paddle.fluid.layers.elementwise_sub(input, label, act='abs') return paddle.sum(unreduced, name=name) elif reduction == 'mean': - unreduced = paddle.elementwise_sub(input, label, act='abs') + unreduced = paddle.fluid.layers.elementwise_sub(input, label, act='abs') return paddle.mean(unreduced, name=name) else: - return paddle.elementwise_sub(input, label, act='abs', name=name) + return paddle.fluid.layers.elementwise_sub(input, label, act='abs', name=name) def nll_loss(input, @@ -1008,8 +996,8 @@ def mse_loss(input, label, reduction='mean', name=None): # static graph mode paddle.enable_static() mse_loss = paddle.nn.loss.MSELoss() - input = paddle.data(name="input", shape=[1]) - label = paddle.data(name="label", shape=[1]) + input = paddle.fluid.data(name="input", shape=[1]) + label = paddle.fluid.data(name="label", shape=[1]) place = paddle.CPUPlace() output = mse_loss(input,label) @@ -1354,7 +1342,7 @@ def sigmoid_focal_loss(logit, label = paddle.to_tensor([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype='float32') one = paddle.to_tensor([1.], dtype='float32') fg_label = paddle.greater_equal(label, one) - fg_num = paddle.reduce_sum(paddle.cast(fg_label, dtype='float32')) + fg_num = paddle.fluid.layers.reduce_sum(paddle.cast(fg_label, dtype='float32')) output = paddle.nn.functional.sigmoid_focal_loss(logit, label, normalizer=fg_num) print(output.numpy()) # [0.65782464] diff --git a/python/paddle/nn/functional/norm.py b/python/paddle/nn/functional/norm.py index c2e01cb82f..4a6d4abfe8 100644 --- a/python/paddle/nn/functional/norm.py +++ b/python/paddle/nn/functional/norm.py @@ -109,8 +109,8 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None): helper.append_op( type='p_norm', inputs={'X': x}, outputs={'Out': out}, attrs=attrs) eps = out.block.create_var(dtype=out.dtype) - paddle.fill_constant([1], out.dtype, epsilon, out=eps) - return paddle.elementwise_div(x, paddle.maximum(out, eps), name=name) + paddle.fluid.layers.fill_constant([1], out.dtype, epsilon, out=eps) + return paddle.fluid.layers.elementwise_div(x, paddle.maximum(out, eps), name=name) def batch_norm(x, diff --git a/python/paddle/nn/functional/pooling.py b/python/paddle/nn/functional/pooling.py index 1b8e1fb576..73652ff126 100755 --- a/python/paddle/nn/functional/pooling.py +++ b/python/paddle/nn/functional/pooling.py @@ -13,16 +13,12 @@ # limitations under the License. # TODO: define pooling functions -from ...fluid.layers import pool2d #DEFINE_ALIAS -from ...fluid.layers import pool3d #DEFINE_ALIAS from ...fluid import core from ...fluid.framework import in_dygraph_mode from ...fluid.layers import utils, LayerHelper, unsqueeze, squeeze from ...fluid.data_feeder import check_type, check_variable_and_dtype __all__ = [ - 'pool2d', - 'pool3d', 'avg_pool1d', 'avg_pool2d', 'avg_pool3d', diff --git a/python/paddle/nn/functional/rnn.py b/python/paddle/nn/functional/rnn.py deleted file mode 100644 index b7a97bc5aa..0000000000 --- a/python/paddle/nn/functional/rnn.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle.fluid.layers.rnn import rnn, birnn - -__all__ = ['rnn', 'birnn'] diff --git a/python/paddle/nn/functional/vision.py b/python/paddle/nn/functional/vision.py index 3d18281b60..5e1cb377bd 100644 --- a/python/paddle/nn/functional/vision.py +++ b/python/paddle/nn/functional/vision.py @@ -20,71 +20,44 @@ from ...fluid import dygraph_utils import numpy as np # TODO: define specitial functions used in computer vision task -from ...fluid.layers import affine_channel #DEFINE_ALIAS -from ...fluid.layers import anchor_generator #DEFINE_ALIAS -from ...fluid.layers import bipartite_match #DEFINE_ALIAS -from ...fluid.layers import box_clip #DEFINE_ALIAS -from ...fluid.layers import box_coder #DEFINE_ALIAS -from ...fluid.layers import box_decoder_and_assign #DEFINE_ALIAS -from ...fluid.layers import collect_fpn_proposals #DEFINE_ALIAS -from ...fluid.layers import deformable_roi_pooling #DEFINE_ALIAS -from ...fluid.layers import density_prior_box #DEFINE_ALIAS -from ...fluid.layers import detection_output #DEFINE_ALIAS -from ...fluid.layers import distribute_fpn_proposals #DEFINE_ALIAS -from ...fluid.layers import generate_mask_labels #DEFINE_ALIAS -from ...fluid.layers import generate_proposal_labels #DEFINE_ALIAS -from ...fluid.layers import generate_proposals #DEFINE_ALIAS -from ...fluid.layers import prior_box #DEFINE_ALIAS -from ...fluid.layers import prroi_pool #DEFINE_ALIAS -from ...fluid.layers import psroi_pool #DEFINE_ALIAS -from ...fluid.layers import roi_align #DEFINE_ALIAS -from ...fluid.layers import roi_pool #DEFINE_ALIAS -from ...fluid.layers import space_to_depth #DEFINE_ALIAS -from ...fluid.layers import yolo_box #DEFINE_ALIAS -from ...fluid.layers import yolov3_loss #DEFINE_ALIAS - -from ...fluid.layers import fsp_matrix #DEFINE_ALIAS -from ...fluid.layers import image_resize_short #DEFINE_ALIAS +# from ...fluid.layers import affine_channel #DEFINE_ALIAS +# from ...fluid.layers import anchor_generator #DEFINE_ALIAS +# from ...fluid.layers import bipartite_match #DEFINE_ALIAS +# from ...fluid.layers import box_clip #DEFINE_ALIAS +# from ...fluid.layers import box_coder #DEFINE_ALIAS +# from ...fluid.layers import box_decoder_and_assign #DEFINE_ALIAS +# from ...fluid.layers import collect_fpn_proposals #DEFINE_ALIAS +# from ...fluid.layers import deformable_roi_pooling #DEFINE_ALIAS +# from ...fluid.layers import density_prior_box #DEFINE_ALIAS +# from ...fluid.layers import detection_output #DEFINE_ALIAS +# from ...fluid.layers import distribute_fpn_proposals #DEFINE_ALIAS +# from ...fluid.layers import generate_mask_labels #DEFINE_ALIAS +# from ...fluid.layers import generate_proposal_labels #DEFINE_ALIAS +# from ...fluid.layers import generate_proposals #DEFINE_ALIAS +# from ...fluid.layers import image_resize #DEFINE_ALIAS +# from ...fluid.layers import prior_box #DEFINE_ALIAS +# from ...fluid.layers import prroi_pool #DEFINE_ALIAS +# from ...fluid.layers import psroi_pool #DEFINE_ALIAS +# from ...fluid.layers import resize_bilinear #DEFINE_ALIAS +# from ...fluid.layers import resize_nearest #DEFINE_ALIAS +# from ...fluid.layers import resize_trilinear #DEFINE_ALIAS +# from ...fluid.layers import roi_align #DEFINE_ALIAS +# from ...fluid.layers import roi_pool #DEFINE_ALIAS +# from ...fluid.layers import space_to_depth #DEFINE_ALIAS +# from ...fluid.layers import yolo_box #DEFINE_ALIAS +# from ...fluid.layers import yolov3_loss #DEFINE_ALIAS +# from ...fluid.layers import fsp_matrix #DEFINE_ALIAS +# from ...fluid.layers import image_resize_short #DEFINE_ALIAS # from ...fluid.layers import pixel_shuffle #DEFINE_ALIAS -from ...fluid.layers import retinanet_detection_output #DEFINE_ALIAS -from ...fluid.layers import retinanet_target_assign #DEFINE_ALIAS -from ...fluid.layers import roi_perspective_transform #DEFINE_ALIAS -from ...fluid.layers import shuffle_channel #DEFINE_ALIAS +# from ...fluid.layers import retinanet_detection_output #DEFINE_ALIAS +# from ...fluid.layers import retinanet_target_assign #DEFINE_ALIAS +# from ...fluid.layers import roi_perspective_transform #DEFINE_ALIAS +# from ...fluid.layers import shuffle_channel #DEFINE_ALIAS __all__ = [ - 'affine_channel', 'affine_grid', - 'anchor_generator', - 'bipartite_match', - 'box_clip', - 'box_coder', - 'box_decoder_and_assign', - 'collect_fpn_proposals', - # 'deformable_conv', - 'deformable_roi_pooling', - 'density_prior_box', - 'detection_output', - 'distribute_fpn_proposals', - 'fsp_matrix', - 'generate_mask_labels', - 'generate_proposal_labels', - 'generate_proposals', 'grid_sample', - 'image_resize_short', - # 'multi_box_head', - 'pixel_shuffle', - 'prior_box', - 'prroi_pool', - 'psroi_pool', - 'retinanet_detection_output', - 'retinanet_target_assign', - 'roi_align', - 'roi_perspective_transform', - 'roi_pool', - 'shuffle_channel', - 'space_to_depth', - 'yolo_box', - 'yolov3_loss' + 'pixel_shuffle' ] diff --git a/python/paddle/nn/layer/common.py b/python/paddle/nn/layer/common.py index 05cbd96863..bf2c58d45c 100644 --- a/python/paddle/nn/layer/common.py +++ b/python/paddle/nn/layer/common.py @@ -13,6 +13,7 @@ # limitations under the License. # TODO: define the common classes to build a neural network +import paddle from ...fluid.dygraph import BilinearTensorProduct #DEFINE_ALIAS from ...fluid.dygraph import Pool2D #DEFINE_ALIAS from ...fluid.dygraph import Flatten #DEFINE_ALIAS @@ -560,8 +561,6 @@ class UpsamplingBilinear2d(layers.Layer): class Pad2D(layers.Layer): """ - :alias_main: paddle.nn.Pad2D - :alias: paddle.nn.Pad2D,paddle.nn.layer.Pad2D,paddle.nn.layer.common.Pad2D This interface is used to construct a callable object of the ``Pad2D`` class. The Pad2D layer pads the input tensor boundaries according to 'paddings' and 'mode'. If mode is 'reflect', paddings[0] and paddings[1] must be no greater @@ -611,7 +610,7 @@ class Pad2D(layers.Layer): import paddle.nn as nn import numpy as np data = np.ones((2, 2, 2, 2)).astype('float32') - my_pad = nn.Pad2D(paddings=[1, 1, 1, 1]) + my_pad = nn.layer.Pad2D(paddings=[1, 1, 1, 1]) with fluid.dygraph.guard(): data = fluid.dygraph.to_variable(data) result = my_pad(data) @@ -630,7 +629,7 @@ class Pad2D(layers.Layer): int) else paddings def forward(self, input): - return F.pad2d( + return paddle.fluid.layers.pad2d( input, paddings=self._paddings, mode=self._mode, diff --git a/python/paddle/nn/layer/distance.py b/python/paddle/nn/layer/distance.py index 334b71151b..28b29a583d 100644 --- a/python/paddle/nn/layer/distance.py +++ b/python/paddle/nn/layer/distance.py @@ -86,7 +86,7 @@ class PairwiseDistance(layers.Layer): 'PairwiseDistance') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'PairwiseDistance') - sub = paddle.elementwise_sub(x, y) + sub = paddle.fluid.layers.elementwise_sub(x, y) helper = LayerHelper("PairwiseDistance", name=self.name) attrs = { diff --git a/python/paddle/nn/layer/extension.py b/python/paddle/nn/layer/extension.py index 01ca472315..3972a1b715 100644 --- a/python/paddle/nn/layer/extension.py +++ b/python/paddle/nn/layer/extension.py @@ -102,5 +102,5 @@ class RowConv(layers.Layer): filter_shape, attr=param_attr, dtype=dtype) def forward(self, input): - out = F.row_conv(input, self.weight, act=self._act) + out = F.extension.row_conv(input, self.weight, act=self._act) return out diff --git a/python/paddle/nn/layer/rnn.py b/python/paddle/nn/layer/rnn.py index 7e9c31f422..ebb29ccfbb 100644 --- a/python/paddle/nn/layer/rnn.py +++ b/python/paddle/nn/layer/rnn.py @@ -807,7 +807,7 @@ class RNN(Layer): initial_states=None, sequence_length=None, **kwargs): - final_outputs, final_states = F.rnn(self.cell, + final_outputs, final_states = paddle.fluid.layers.rnn(self.cell, inputs, initial_states=initial_states, sequence_length=sequence_length, @@ -909,7 +909,7 @@ class BiRNN(Layer): assert len(initial_states) == 2, \ "length of initial_states should be 2 when it is a list/tuple" - outputs, final_states = F.birnn(self.cell_fw, self.cell_bw, inputs, + outputs, final_states = paddle.fluid.layers.birnn(self.cell_fw, self.cell_bw, inputs, initial_states, sequence_length, self.time_major, **kwargs) return outputs, final_states diff --git a/python/paddle/optimizer/__init__.py b/python/paddle/optimizer/__init__.py index 1ca52a806d..e75fbb2f20 100644 --- a/python/paddle/optimizer/__init__.py +++ b/python/paddle/optimizer/__init__.py @@ -13,17 +13,14 @@ # limitations under the License. __all__ = [ - 'Adadelta', 'AdadeltaOptimizer', 'Adagrad', 'AdagradOptimizer', 'Adam', - 'Adamax', 'AdamW', 'DecayedAdagrad', 'DecayedAdagradOptimizer', 'Dpsgd', - 'DpsgdOptimizer', 'Ftrl', 'FtrlOptimizer', 'Momentum', 'MomentumOptimizer', - 'RMSProp', 'SGD', 'SGDOptimizer', 'Optimizer' + 'Adadelta', 'Adam', 'Adamax', 'AdamW', 'Momentum', 'MomentumOptimizer', + 'RMSProp', 'SGD', 'SGDOptimizer', 'Optimizer', '_LRScheduler', 'NoamLR', + 'PiecewiseLR', 'NaturalExpLR', 'InverseTimeLR', 'PolynomialLR', + 'LinearLrWarmup', 'ExponentialLR', 'MultiStepLR', 'StepLR', 'LambdaLR', + 'ReduceLROnPlateau', 'CosineAnnealingLR' ] -from ..fluid.optimizer import Momentum, Dpsgd, DecayedAdagrad, Ftrl,\ - AdagradOptimizer, DpsgdOptimizer, DecayedAdagradOptimizer, \ - FtrlOptimizer, AdadeltaOptimizer - from .optimizer import Optimizer from .adagrad import Adagrad from .adam import Adam diff --git a/python/paddle/optimizer/lr.py b/python/paddle/optimizer/lr.py index fc7752c444..ab2c0fe905 100644 --- a/python/paddle/optimizer/lr.py +++ b/python/paddle/optimizer/lr.py @@ -229,7 +229,7 @@ class NoamDecay(LRScheduler): for batch_id in range(2): x = paddle.uniform([10, 10]) out = linear(x) - loss = paddle.reduce_mean(out) + loss = paddle.fluid.layers.reduce_mean(out) loss.backward() sgd.step() sgd.clear_gradients() @@ -325,7 +325,7 @@ class PiecewiseDecay(LRScheduler): for batch_id in range(2): x = paddle.uniform([10, 10]) out = linear(x) - loss = paddle.reduce_mean(out) + loss = paddle.fluid.layers.reduce_mean(out) loss.backward() sgd.step() sgd.clear_gradients() @@ -407,7 +407,7 @@ class NaturalExpDecay(LRScheduler): for batch_id in range(2): x = paddle.uniform([10, 10]) out = linear(x) - loss = paddle.reduce_mean(out) + loss = paddle.fluid.layers.reduce_mean(out) loss.backward() sgd.step() sgd.clear_gradients() @@ -485,7 +485,7 @@ class InverseTimeDecay(LRScheduler): for batch_id in range(2): x = paddle.uniform([10, 10]) out = linear(x) - loss = paddle.reduce_mean(out) + loss = paddle.fluid.layers.reduce_mean(out) loss.backward() sgd.step() sgd.clear_gradients() @@ -580,7 +580,7 @@ class PolynomialDecay(LRScheduler): for batch_id in range(2): x = paddle.uniform([10, 10]) out = linear(x) - loss = paddle.reduce_mean(out) + loss = paddle.fluid.layers.reduce_mean(out) loss.backward() sgd.step() sgd.clear_gradients() @@ -695,7 +695,7 @@ class LinearWarmup(LRScheduler): for batch_id in range(2): x = paddle.uniform([10, 10]) out = linear(x) - loss = paddle.reduce_mean(out) + loss = paddle.fluid.layers.reduce_mean(out) loss.backward() sgd.step() sgd.clear_gradients() @@ -798,7 +798,7 @@ class ExponentialDecay(LRScheduler): for batch_id in range(2): x = paddle.uniform([10, 10]) out = linear(x) - loss = paddle.reduce_mean(out) + loss = paddle.fluid.layers.reduce_mean(out) loss.backward() sgd.step() sgd.clear_gradients() @@ -885,7 +885,7 @@ class MultiStepDecay(LRScheduler): for batch_id in range(2): x = paddle.uniform([10, 10]) out = linear(x) - loss = paddle.reduce_mean(out) + loss = paddle.fluid.layers.reduce_mean(out) loss.backward() sgd.step() sgd.clear_gradients() @@ -992,7 +992,7 @@ class StepDecay(LRScheduler): for batch_id in range(2): x = paddle.uniform([10, 10]) out = linear(x) - loss = paddle.reduce_mean(out) + loss = paddle.fluid.layers.reduce_mean(out) loss.backward() sgd.step() sgd.clear_gradients() @@ -1086,7 +1086,7 @@ class LambdaDecay(LRScheduler): for batch_id in range(2): x = paddle.uniform([10, 10]) out = linear(x) - loss = paddle.reduce_mean(out) + loss = paddle.fluid.layers.reduce_mean(out) loss.backward() sgd.step() sgd.clear_gradients() @@ -1184,7 +1184,7 @@ class ReduceOnPlateau(LRScheduler): for batch_id in range(2): x = paddle.uniform([10, 10]) out = linear(x) - loss = paddle.reduce_mean(out) + loss = paddle.fluid.layers.reduce_mean(out) loss.backward() sgd.step() sgd.clear_gradients() @@ -1390,7 +1390,7 @@ class CosineAnnealingDecay(LRScheduler): for batch_id in range(2): x = paddle.uniform([10, 10]) out = linear(x) - loss = paddle.reduce_mean(out) + loss = paddle.fluid.layers.reduce_mean(out) loss.backward() sgd.step() sgd.clear_gradients() diff --git a/python/paddle/optimizer/optimizer.py b/python/paddle/optimizer/optimizer.py index 0f97955706..eeedbbdd1b 100644 --- a/python/paddle/optimizer/optimizer.py +++ b/python/paddle/optimizer/optimizer.py @@ -377,7 +377,7 @@ class Optimizer(object): linear = paddle.nn.Linear(10, 10) inp = paddle.to_tensor(inp) out = linear(inp) - loss = paddle.reduce_mean(out) + loss = paddle.fluid.layers.reduce_mean(out) bd = [2, 4, 6, 8] value = [0.2, 0.4, 0.6, 0.8, 1.0] diff --git a/python/paddle/static/__init__.py b/python/paddle/static/__init__.py index 909a1b6f39..a6ce437982 100644 --- a/python/paddle/static/__init__.py +++ b/python/paddle/static/__init__.py @@ -19,7 +19,7 @@ __all__ = [ 'name_scope', 'ParallelExecutor', 'program_guard', 'WeightNormParamAttr', 'default_main_program', 'default_startup_program', 'Program', 'data', 'InputSpec', 'save', 'load', 'save_inference_model', 'load_inference_model', - 'load_program_state', 'set_program_state', 'cpu_places', 'cuda_places' + 'load_program_state', 'set_program_state', 'cpu_places', 'cuda_places', 'Variable' ] from . import nn @@ -41,6 +41,7 @@ from ..fluid.framework import name_scope #DEFINE_ALIAS from ..fluid.framework import program_guard #DEFINE_ALIAS from ..fluid.framework import cpu_places #DEFINE_ALIAS from ..fluid.framework import cuda_places #DEFINE_ALIAS +from ..fluid.framework import Variable #DEFINE_ALIAS from ..fluid.layers.control_flow import Print #DEFINE_ALIAS from ..fluid.layers.nn import py_func #DEFINE_ALIAS from ..fluid.parallel_executor import ParallelExecutor #DEFINE_ALIAS diff --git a/python/paddle/static/nn/__init__.py b/python/paddle/static/nn/__init__.py index 3ae65e879f..9161bb7af4 100644 --- a/python/paddle/static/nn/__init__.py +++ b/python/paddle/static/nn/__init__.py @@ -18,6 +18,7 @@ __all__ = [ 'embedding', 'bilinear_tensor_product', 'case', + 'cond', 'conv2d', 'conv2d_transpose', 'conv3d', @@ -36,6 +37,7 @@ __all__ = [ 'row_conv', 'spectral_norm', 'switch_case', + 'while_loop', ] from .common import fc #DEFINE_ALIAS @@ -44,6 +46,7 @@ from .common import deform_conv2d #DEFINE_ALIAS from ...fluid.layers import batch_norm #DEFINE_ALIAS from ...fluid.layers import bilinear_tensor_product #DEFINE_ALIAS from ...fluid.layers import case #DEFINE_ALIAS +from ...fluid.layers import cond #DEFINE_ALIAS from ...fluid.layers import conv2d #DEFINE_ALIAS from ...fluid.layers import conv2d_transpose #DEFINE_ALIAS from ...fluid.layers import conv3d #DEFINE_ALIAS @@ -61,5 +64,6 @@ from ...fluid.layers import py_func #DEFINE_ALIAS from ...fluid.layers import row_conv #DEFINE_ALIAS from ...fluid.layers import spectral_norm #DEFINE_ALIAS from ...fluid.layers import switch_case #DEFINE_ALIAS +from ...fluid.layers import while_loop #DEFINE_ALIAS from ...fluid.input import embedding #DEFINE_ALIAS diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index 3483bca7e1..773e6ebc7a 100755 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -23,10 +23,9 @@ from .random import randperm from .attribute import rank #DEFINE_ALIAS from .attribute import shape #DEFINE_ALIAS from .creation import to_tensor #DEFINE_ALIAS -from .creation import crop_tensor #DEFINE_ALIAS from .creation import diag #DEFINE_ALIAS from .creation import eye #DEFINE_ALIAS -from .creation import fill_constant #DEFINE_ALIAS +# from .creation import fill_constant #DEFINE_ALIAS # from .creation import get_tensor_from_selected_rows #DEFINE_ALIAS from .creation import linspace #DEFINE_ALIAS from .creation import ones #DEFINE_ALIAS @@ -67,8 +66,8 @@ from .logic import logical_not #DEFINE_ALIAS from .logic import logical_or #DEFINE_ALIAS from .logic import logical_xor #DEFINE_ALIAS from .logic import not_equal #DEFINE_ALIAS -from .logic import reduce_all #DEFINE_ALIAS -from .logic import reduce_any #DEFINE_ALIAS +# from .logic import reduce_all #DEFINE_ALIAS +# from .logic import reduce_any #DEFINE_ALIAS from .logic import allclose #DEFINE_ALIAS from .logic import equal_all #DEFINE_ALIAS # from .logic import isnan #DEFINE_ALIAS @@ -108,13 +107,13 @@ from .math import ceil #DEFINE_ALIAS from .math import cos #DEFINE_ALIAS from .math import cosh #DEFINE_ALIAS from .math import cumsum #DEFINE_ALIAS -from .math import elementwise_add #DEFINE_ALIAS -from .math import elementwise_div #DEFINE_ALIAS -from .math import elementwise_floordiv #DEFINE_ALIAS -from .math import elementwise_mul #DEFINE_ALIAS -from .math import elementwise_mod #DEFINE_ALIAS -from .math import elementwise_pow #DEFINE_ALIAS -from .math import elementwise_sub #DEFINE_ALIAS +# from .math import elementwise_add #DEFINE_ALIAS +# from .math import elementwise_div #DEFINE_ALIAS +# from .math import elementwise_floordiv #DEFINE_ALIAS +# from .math import elementwise_mul #DEFINE_ALIAS +# from .math import elementwise_mod #DEFINE_ALIAS +# from .math import elementwise_pow #DEFINE_ALIAS +# from .math import elementwise_sub #DEFINE_ALIAS from .math import exp #DEFINE_ALIAS from .math import floor #DEFINE_ALIAS from .math import increment #DEFINE_ALIAS @@ -122,10 +121,10 @@ from .math import log #DEFINE_ALIAS from .math import multiplex #DEFINE_ALIAS from .math import pow #DEFINE_ALIAS from .math import reciprocal #DEFINE_ALIAS -from .math import reduce_max #DEFINE_ALIAS -from .math import reduce_min #DEFINE_ALIAS -from .math import reduce_prod #DEFINE_ALIAS -from .math import reduce_sum #DEFINE_ALIAS +# from .math import reduce_max #DEFINE_ALIAS +# from .math import reduce_min #DEFINE_ALIAS +# from .math import reduce_prod #DEFINE_ALIAS +# from .math import reduce_sum #DEFINE_ALIAS from .math import round #DEFINE_ALIAS from .math import rsqrt #DEFINE_ALIAS from .math import scale #DEFINE_ALIAS @@ -155,7 +154,7 @@ from .math import logsumexp #DEFINE_ALIAS from .math import inverse #DEFINE_ALIAS from .math import log1p #DEFINE_ALIAS from .math import erf #DEFINE_ALIAS -from .math import addcmul #DEFINE_ALIAS +# from .math import addcmul #DEFINE_ALIAS from .math import addmm #DEFINE_ALIAS from .math import clip #DEFINE_ALIAS from .math import trace #DEFINE_ALIAS @@ -175,8 +174,8 @@ from .random import randperm #DEFINE_ALIAS from .search import argmax #DEFINE_ALIAS from .search import argmin #DEFINE_ALIAS from .search import argsort #DEFINE_ALIAS -from .search import has_inf #DEFINE_ALIAS -from .search import has_nan #DEFINE_ALIAS +# from .search import has_inf #DEFINE_ALIAS +# from .search import has_nan #DEFINE_ALIAS # from .search import masked_select #DEFINE_ALIAS from .search import topk #DEFINE_ALIAS from .search import where #DEFINE_ALIAS @@ -186,7 +185,7 @@ from .search import sort #DEFINE_ALIAS from .search import index_sample #DEFINE_ALIAS from .search import masked_select #DEFINE_ALIAS from .stat import mean #DEFINE_ALIAS -from .stat import reduce_mean #DEFINE_ALIAS +# from .stat import reduce_mean #DEFINE_ALIAS from .stat import std #DEFINE_ALIAS from .stat import var #DEFINE_ALIAS from .stat import numel #DEFINE_ALIAS diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index d783d6329e..d5592c7d69 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -24,20 +24,15 @@ from ..fluid.layers import core from ..fluid.layer_helper import LayerHelper from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype from ..fluid.framework import convert_np_dtype_to_dtype_, in_dygraph_mode, _varbase_creator, device_guard, OpProtoHolder -from ..fluid.layers import fill_constant from paddle.common_ops_import import * # TODO: define functions to get create a tensor -from ..fluid.layers import crop_tensor #DEFINE_ALIAS -from ..fluid.layers import fill_constant #DEFINE_ALIAS from ..fluid.layers import linspace #DEFINE_ALIAS import paddle __all__ = [ 'to_tensor', - 'crop_tensor', 'diag', - 'fill_constant', # 'get_tensor_from_selected_rows', 'linspace', 'ones', @@ -317,7 +312,7 @@ def ones(shape, dtype=None, name=None): # [1 1]] # shape is a Tensor - shape = paddle.fill_constant(shape=[2], dtype='int32', value=2) + shape = paddle.fluid.layers.fill_constant(shape=[2], dtype='int32', value=2) data3 = paddle.ones(shape=shape, dtype='int32') # [[1 1] # [1 1]] @@ -398,7 +393,7 @@ def zeros(shape, dtype=None, name=None): # [0. 0.]] # shape is a Tensor - shape = paddle.fill_constant(shape=[2], dtype='int32', value=2) + shape = paddle.fluid.layers.fill_constant(shape=[2], dtype='int32', value=2) data3 = paddle.zeros(shape=shape, dtype='int32') # [[0 0] # [0 0]] @@ -526,18 +521,18 @@ def full(shape, fill_value, dtype=None, name=None): # [0]] # attr shape is a list which contains Tensor. - positive_2 = paddle.fill_constant([1], "int32", 2) + positive_2 = paddle.fluid.layers.fill_constant([1], "int32", 2) data3 = paddle.full(shape=[1, positive_2], dtype='float32', fill_value=1.5) # [[1.5 1.5]] # attr shape is a Tensor. - shape = paddle.fill_constant([2], "int32", 2) + shape = paddle.fluid.layers.fill_constant([2], "int32", 2) data4 = paddle.full(shape=shape, dtype='bool', fill_value=True) # [[True True] # [True True]] # attr fill_value is a Tensor. - val = paddle.fill_constant([1], "float32", 2.0) + val = paddle.fluid.layers.fill_constant([1], "float32", 2.0) data5 = paddle.full(shape=[2,1], fill_value=val, dtype='float32') # [[2.0] # [2.0]] diff --git a/python/paddle/tensor/logic.py b/python/paddle/tensor/logic.py index 5fd714421c..7b0d210987 100644 --- a/python/paddle/tensor/logic.py +++ b/python/paddle/tensor/logic.py @@ -26,8 +26,6 @@ from ..fluid.layers import logical_and #DEFINE_ALIAS from ..fluid.layers import logical_not #DEFINE_ALIAS from ..fluid.layers import logical_or #DEFINE_ALIAS from ..fluid.layers import logical_xor #DEFINE_ALIAS -from ..fluid.layers import reduce_all #DEFINE_ALIAS -from ..fluid.layers import reduce_any #DEFINE_ALIAS __all__ = [ 'equal', @@ -43,8 +41,6 @@ __all__ = [ 'logical_or', 'logical_xor', 'not_equal', - 'reduce_all', - 'reduce_any', 'allclose', # 'isnan' ] diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 8c588c1584..c773c1cc74 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -35,21 +35,21 @@ from ..fluid.layers import ceil #DEFINE_ALIAS from ..fluid.layers import cos #DEFINE_ALIAS from ..fluid.layers import sinh #DEFINE_ALIAS from ..fluid.layers import cosh #DEFINE_ALIAS -from ..fluid.layers import elementwise_add #DEFINE_ALIAS -from ..fluid.layers import elementwise_div #DEFINE_ALIAS -from ..fluid.layers import elementwise_floordiv #DEFINE_ALIAS -from ..fluid.layers import elementwise_mod #DEFINE_ALIAS -from ..fluid.layers import elementwise_mul #DEFINE_ALIAS -from ..fluid.layers import elementwise_pow #DEFINE_ALIAS -from ..fluid.layers import elementwise_sub #DEFINE_ALIAS +# from ..fluid.layers import elementwise_add #DEFINE_ALIAS +# from ..fluid.layers import elementwise_div #DEFINE_ALIAS +# from ..fluid.layers import elementwise_floordiv #DEFINE_ALIAS +# from ..fluid.layers import elementwise_mod #DEFINE_ALIAS +# from ..fluid.layers import elementwise_mul #DEFINE_ALIAS +# from ..fluid.layers import elementwise_pow #DEFINE_ALIAS +# from ..fluid.layers import elementwise_sub #DEFINE_ALIAS from ..fluid.layers import exp #DEFINE_ALIAS from ..fluid.layers import floor #DEFINE_ALIAS from ..fluid.layers import log #DEFINE_ALIAS from ..fluid.layers import reciprocal #DEFINE_ALIAS -from ..fluid.layers import reduce_max #DEFINE_ALIAS -from ..fluid.layers import reduce_min #DEFINE_ALIAS -from ..fluid.layers import reduce_prod #DEFINE_ALIAS -from ..fluid.layers import reduce_sum #DEFINE_ALIAS +# from ..fluid.layers import reduce_max #DEFINE_ALIAS +# from ..fluid.layers import reduce_min #DEFINE_ALIAS +# from ..fluid.layers import reduce_prod #DEFINE_ALIAS +# from ..fluid.layers import reduce_sum #DEFINE_ALIAS from ..fluid.layers import round #DEFINE_ALIAS from ..fluid.layers import rsqrt #DEFINE_ALIAS from ..fluid.layers import scale #DEFINE_ALIAS @@ -73,12 +73,6 @@ __all__ = [ 'cos', 'cosh', 'cumsum', - 'elementwise_add', - 'elementwise_div', - 'elementwise_floordiv', - 'elementwise_mod', - 'elementwise_pow', - 'elementwise_sub', 'exp', 'floor', 'increment', @@ -89,10 +83,6 @@ __all__ = [ 'pow', 'prod', 'reciprocal', - 'reduce_max', - 'reduce_min', - 'reduce_prod', - 'reduce_sum', 'round', 'rsqrt', 'scale', @@ -180,7 +170,7 @@ def pow(x, y, name=None): print(res.numpy()) # [1 4 9] # example 2: y is a Tensor - y = paddle.fill_constant(shape=[1], value=2, dtype='float32') + y = paddle.fluid.layers.fill_constant(shape=[1], value=2, dtype='float32') res = paddle.pow(x, y) print(res.numpy()) # [1 4 9] @@ -1348,7 +1338,7 @@ def addcmul(input, tensor1, tensor2, value=1.0, name=None): input = paddle.ones([2,2]) tensor1 = paddle.ones([2,2]) tensor2 = paddle.ones([2,2]) - out = paddle.addcmul(input, tensor1, tensor2, value=0.5) + out = paddle.tensor.math.addcmul(input, tensor1, tensor2, value=0.5) print(out.numpy()) # [[1.5 1.5] # [1.5 1.5]] diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index a46946cea8..632cbbdd0e 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -431,8 +431,8 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None): # example 2: # attr shape is a list which contains Tensor. - dim_1 = paddle.fill_constant([1], "int64", 2) - dim_2 = paddle.fill_constant([1], "int32", 3) + dim_1 = paddle.fluid.layers.fill_constant([1], "int64", 2) + dim_2 = paddle.fluid.layers.fill_constant([1], "int32", 3) result_2 = paddle.tensor.random.uniform(shape=[dim_1, dim_2]) # [[-0.9951253, 0.30757582, 0.9899647 ], # [ 0.5864527, 0.6607096, -0.8886161 ]] diff --git a/python/paddle/tensor/search.py b/python/paddle/tensor/search.py index 7f722d1957..7adf1b7cc4 100644 --- a/python/paddle/tensor/search.py +++ b/python/paddle/tensor/search.py @@ -18,15 +18,13 @@ from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtyp from ..fluid import core, layers # TODO: define searching & indexing functions of a tensor -from ..fluid.layers import has_inf #DEFINE_ALIAS -from ..fluid.layers import has_nan #DEFINE_ALIAS +# from ..fluid.layers import has_inf #DEFINE_ALIAS +# from ..fluid.layers import has_nan #DEFINE_ALIAS __all__ = [ 'argmax', 'argmin', 'argsort', - 'has_inf', - 'has_nan', 'masked_select', 'topk', 'where', diff --git a/python/paddle/tensor/stat.py b/python/paddle/tensor/stat.py index d56dff5a81..24f62bfcd8 100644 --- a/python/paddle/tensor/stat.py +++ b/python/paddle/tensor/stat.py @@ -13,9 +13,8 @@ # limitations under the License. # TODO: define statistical functions of a tensor -from ..fluid.layers import reduce_mean #DEFINE_ALIAS -__all__ = ['mean', 'reduce_mean', 'std', 'var', 'numel'] +__all__ = ['mean', 'std', 'var', 'numel'] import numpy as np from ..fluid.framework import Variable diff --git a/python/paddle/tests/test_model.py b/python/paddle/tests/test_model.py index 56105b6d7f..46ea5ec995 100644 --- a/python/paddle/tests/test_model.py +++ b/python/paddle/tests/test_model.py @@ -25,7 +25,7 @@ import tempfile import paddle from paddle import fluid from paddle import to_tensor -from paddle.nn import Conv2d, Pool2D, Linear, ReLU, Sequential, Softmax +from paddle.nn import Conv2d, Linear, ReLU, Sequential, Softmax from paddle import Model from paddle.static import InputSpec @@ -47,11 +47,11 @@ class LeNetDygraph(paddle.nn.Layer): Conv2d( 1, 6, 3, stride=1, padding=1), ReLU(), - Pool2D(2, 'max', 2), + paddle.fluid.dygraph.Pool2D(2, 'max', 2), Conv2d( 6, 16, 5, stride=1, padding=0), ReLU(), - Pool2D(2, 'max', 2)) + paddle.fluid.dygraph.Pool2D(2, 'max', 2)) if num_classes > 0: self.fc = Sequential(