Remove and reorganize the alias of APIs (#27717)

* modify cond while_loop to paddle.static.nn.cond

* modify crop_tensor to paddle.crop

* modify Variable to paddle.static.Variable

* remove nn.beam_search, nn.beam_search_decode, nn.gather_tree

* remove bpr_loss, center_loss, rank_loss, smooth_l1, teacher_student_sigmoid_loss, edit_distance, sampled_softmax_with_cross_entropy in nn.functional

* remove apis in nn.functional.learn_rate.py

* remove pool2d, pool3d, adaptive_pool2d, adaptive_pool3d in nn.functional

* remove apis in nn.functional.vision

* remove erf, soft_relu in nn.functional.activation

* remove apis in nn.functional.extension

* remove nn.functional.rnn

* remove hash from nn.functional.lod

* remove row_conv from nn.functional.extension

* remove one_hot, pad2d, pad_constant_like from nn.functional.common

* remove nn.gather_tree, nn.BilinearTensorProduct, nn.Pool2D, nn.Pad2D

* remove apis from optimizer.__init

* remove tensor.creation.fill_constant

* remove elementwise_mul in nn.functional.common and  modify to paddle.multiply

* remove  tensor.stat.reduce_mean

* remove reduce_all, reduce_any in tensor.logic

* remove apis in tensor.math

* remove apis in tensor.__init__

* remove has_inf, has_nan in tensor.search

* remove apis in framework.__init__

* remove apis in paddle.__init__

* remove apis in nn.functional.__init__

* modify removed alias apis to raw api in doc and unittests

* fix remove grid_sample bug

* modify removed alias apis to raw api in doc and unittests

* modify removed alias apis to raw api in doc and unittests

* modify removed alias apis to raw api in doc and unittests

* modify removed alias apis to raw api in doc and unittests

* modify removed alias apis to raw api in doc and unittests

* modify removed alias apis to raw api in doc and unittests

* delete alias api relastions in doc

* reserve paddle.compat, paddle.sysconfig

* remove unittest for paddle.reduce_all, paddle.reduce_any

* modify removed alias apis to raw api in doc and unittests

* recover paddle.save and paddle.load

* resolve conflicts

* fix sample code missing paddle.enable_static() bug

* fix sample code missing paddle.enable_static() bug

* fix to_string sample code error
my_2.0rc
chentianyu03 4 years ago committed by GitHub
parent 6e5034e248
commit d05058d268
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -712,7 +712,7 @@ void BindImperative(py::module *m_ptr) {
tmp.stop_gradient=False tmp.stop_gradient=False
inputs.append(tmp) inputs.append(tmp)
ret = paddle.sums(inputs2) ret = paddle.sums(inputs2)
loss = paddle.reduce_sum(ret) loss = paddle.fluid.layers.reduce_sum(ret)
loss.backward() loss.backward()
print("Before clear_gradient {}".format(loss.grad)) print("Before clear_gradient {}".format(loss.grad))
loss.clear_gradient() loss.clear_gradient()

@ -59,10 +59,9 @@ from .tensor.random import bernoulli
from .tensor.attribute import rank #DEFINE_ALIAS from .tensor.attribute import rank #DEFINE_ALIAS
from .tensor.attribute import shape #DEFINE_ALIAS from .tensor.attribute import shape #DEFINE_ALIAS
from .tensor.creation import to_tensor #DEFINE_ALIAS from .tensor.creation import to_tensor #DEFINE_ALIAS
from .tensor.creation import crop_tensor #DEFINE_ALIAS
from .tensor.creation import diag #DEFINE_ALIAS from .tensor.creation import diag #DEFINE_ALIAS
from .tensor.creation import eye #DEFINE_ALIAS from .tensor.creation import eye #DEFINE_ALIAS
from .tensor.creation import fill_constant #DEFINE_ALIAS # from .tensor.creation import fill_constant #DEFINE_ALIAS
# from .tensor.creation import get_tensor_from_selected_rows #DEFINE_ALIAS # from .tensor.creation import get_tensor_from_selected_rows #DEFINE_ALIAS
from .tensor.creation import linspace #DEFINE_ALIAS from .tensor.creation import linspace #DEFINE_ALIAS
from .tensor.creation import ones #DEFINE_ALIAS from .tensor.creation import ones #DEFINE_ALIAS
@ -103,8 +102,8 @@ from .tensor.logic import logical_not #DEFINE_ALIAS
from .tensor.logic import logical_or #DEFINE_ALIAS from .tensor.logic import logical_or #DEFINE_ALIAS
from .tensor.logic import logical_xor #DEFINE_ALIAS from .tensor.logic import logical_xor #DEFINE_ALIAS
from .tensor.logic import not_equal #DEFINE_ALIAS from .tensor.logic import not_equal #DEFINE_ALIAS
from .tensor.logic import reduce_all #DEFINE_ALIAS # from .tensor.logic import reduce_all #DEFINE_ALIAS
from .tensor.logic import reduce_any #DEFINE_ALIAS # from .tensor.logic import reduce_any #DEFINE_ALIAS
from .tensor.logic import allclose #DEFINE_ALIAS from .tensor.logic import allclose #DEFINE_ALIAS
from .tensor.logic import equal_all #DEFINE_ALIAS from .tensor.logic import equal_all #DEFINE_ALIAS
# from .tensor.logic import isnan #DEFINE_ALIAS # from .tensor.logic import isnan #DEFINE_ALIAS
@ -144,12 +143,12 @@ from .tensor.math import ceil #DEFINE_ALIAS
from .tensor.math import cos #DEFINE_ALIAS from .tensor.math import cos #DEFINE_ALIAS
from .tensor.math import cosh #DEFINE_ALIAS from .tensor.math import cosh #DEFINE_ALIAS
from .tensor.math import cumsum #DEFINE_ALIAS from .tensor.math import cumsum #DEFINE_ALIAS
from .tensor.math import elementwise_add #DEFINE_ALIAS # from .tensor.math import elementwise_add #DEFINE_ALIAS
from .tensor.math import elementwise_div #DEFINE_ALIAS # from .tensor.math import elementwise_div #DEFINE_ALIAS
from .tensor.math import elementwise_floordiv #DEFINE_ALIAS # from .tensor.math import elementwise_floordiv #DEFINE_ALIAS
from .tensor.math import elementwise_mod #DEFINE_ALIAS # from .tensor.math import elementwise_mod #DEFINE_ALIAS
from .tensor.math import elementwise_pow #DEFINE_ALIAS # from .tensor.math import elementwise_pow #DEFINE_ALIAS
from .tensor.math import elementwise_sub #DEFINE_ALIAS # from .tensor.math import elementwise_sub #DEFINE_ALIAS
from .tensor.math import exp #DEFINE_ALIAS from .tensor.math import exp #DEFINE_ALIAS
from .tensor.math import floor #DEFINE_ALIAS from .tensor.math import floor #DEFINE_ALIAS
from .tensor.math import increment #DEFINE_ALIAS from .tensor.math import increment #DEFINE_ALIAS
@ -157,10 +156,10 @@ from .tensor.math import log #DEFINE_ALIAS
from .tensor.math import multiplex #DEFINE_ALIAS from .tensor.math import multiplex #DEFINE_ALIAS
from .tensor.math import pow #DEFINE_ALIAS from .tensor.math import pow #DEFINE_ALIAS
from .tensor.math import reciprocal #DEFINE_ALIAS from .tensor.math import reciprocal #DEFINE_ALIAS
from .tensor.math import reduce_max #DEFINE_ALIAS # from .tensor.math import reduce_max #DEFINE_ALIAS
from .tensor.math import reduce_min #DEFINE_ALIAS # from .tensor.math import reduce_min #DEFINE_ALIAS
from .tensor.math import reduce_prod #DEFINE_ALIAS # from .tensor.math import reduce_prod #DEFINE_ALIAS
from .tensor.math import reduce_sum #DEFINE_ALIAS # from .tensor.math import reduce_sum #DEFINE_ALIAS
from .tensor.math import round #DEFINE_ALIAS from .tensor.math import round #DEFINE_ALIAS
from .tensor.math import rsqrt #DEFINE_ALIAS from .tensor.math import rsqrt #DEFINE_ALIAS
from .tensor.math import scale #DEFINE_ALIAS from .tensor.math import scale #DEFINE_ALIAS
@ -190,7 +189,7 @@ from .tensor.math import logsumexp #DEFINE_ALIAS
from .tensor.math import inverse #DEFINE_ALIAS from .tensor.math import inverse #DEFINE_ALIAS
from .tensor.math import log1p #DEFINE_ALIAS from .tensor.math import log1p #DEFINE_ALIAS
from .tensor.math import erf #DEFINE_ALIAS from .tensor.math import erf #DEFINE_ALIAS
from .tensor.math import addcmul #DEFINE_ALIAS # from .tensor.math import addcmul #DEFINE_ALIAS
from .tensor.math import addmm #DEFINE_ALIAS from .tensor.math import addmm #DEFINE_ALIAS
from .tensor.math import clip #DEFINE_ALIAS from .tensor.math import clip #DEFINE_ALIAS
from .tensor.math import trace #DEFINE_ALIAS from .tensor.math import trace #DEFINE_ALIAS
@ -210,8 +209,8 @@ from .tensor.random import randperm #DEFINE_ALIAS
from .tensor.search import argmax #DEFINE_ALIAS from .tensor.search import argmax #DEFINE_ALIAS
from .tensor.search import argmin #DEFINE_ALIAS from .tensor.search import argmin #DEFINE_ALIAS
from .tensor.search import argsort #DEFINE_ALIAS from .tensor.search import argsort #DEFINE_ALIAS
from .tensor.search import has_inf #DEFINE_ALIAS # from .tensor.search import has_inf #DEFINE_ALIAS
from .tensor.search import has_nan #DEFINE_ALIAS # from .tensor.search import has_nan #DEFINE_ALIAS
from .tensor.search import masked_select #DEFINE_ALIAS from .tensor.search import masked_select #DEFINE_ALIAS
from .tensor.search import topk #DEFINE_ALIAS from .tensor.search import topk #DEFINE_ALIAS
from .tensor.search import where #DEFINE_ALIAS from .tensor.search import where #DEFINE_ALIAS
@ -224,9 +223,8 @@ from .tensor.to_string import set_printoptions
from .framework.random import manual_seed #DEFINE_ALIAS from .framework.random import manual_seed #DEFINE_ALIAS
from .framework.random import get_cuda_rng_state #DEFINE_ALIAS from .framework.random import get_cuda_rng_state #DEFINE_ALIAS
from .framework.random import set_cuda_rng_state #DEFINE_ALIAS from .framework.random import set_cuda_rng_state #DEFINE_ALIAS
from .framework import Variable #DEFINE_ALIAS
from .framework import ParamAttr #DEFINE_ALIAS from .framework import ParamAttr #DEFINE_ALIAS
from .framework import create_global_var #DEFINE_ALIAS # from .framework import create_global_var #DEFINE_ALIAS
from .framework import create_parameter #DEFINE_ALIAS from .framework import create_parameter #DEFINE_ALIAS
from .framework import CPUPlace #DEFINE_ALIAS from .framework import CPUPlace #DEFINE_ALIAS
from .framework import CUDAPlace #DEFINE_ALIAS from .framework import CUDAPlace #DEFINE_ALIAS
@ -243,10 +241,10 @@ from .framework import get_default_dtype #DEFINE_ALIAS
from .tensor.search import index_sample #DEFINE_ALIAS from .tensor.search import index_sample #DEFINE_ALIAS
from .tensor.stat import mean #DEFINE_ALIAS from .tensor.stat import mean #DEFINE_ALIAS
from .tensor.stat import reduce_mean #DEFINE_ALIAS # from .tensor.stat import reduce_mean #DEFINE_ALIAS
from .tensor.stat import std #DEFINE_ALIAS from .tensor.stat import std #DEFINE_ALIAS
from .tensor.stat import var #DEFINE_ALIAS from .tensor.stat import var #DEFINE_ALIAS
from .fluid.data import data # from .fluid.data import data
from .tensor.stat import numel #DEFINE_ALIAS from .tensor.stat import numel #DEFINE_ALIAS
from .device import get_cudnn_version from .device import get_cudnn_version
from .device import set_device from .device import set_device
@ -262,6 +260,8 @@ from .fluid.dygraph.base import enable_dygraph as disable_static #DEFINE_ALIAS
from .fluid.dygraph.base import disable_dygraph as enable_static #DEFINE_ALIAS from .fluid.dygraph.base import disable_dygraph as enable_static #DEFINE_ALIAS
from .fluid.framework import in_dygraph_mode as in_dynamic_mode #DEFINE_ALIAS from .fluid.framework import in_dygraph_mode as in_dynamic_mode #DEFINE_ALIAS
from .fluid.dygraph.base import no_grad_ as no_grad #DEFINE_ALIAS from .fluid.dygraph.base import no_grad_ as no_grad #DEFINE_ALIAS
from .fluid.layers import crop_tensor as crop #DEFINE_ALIAS
from . import jit from . import jit
from . import static from . import static

@ -56,7 +56,7 @@ class GradScaler(AmpScaler):
data = paddle.rand([10, 3, 32, 32]) data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast(): with paddle.amp.auto_cast():
conv = model(data) conv = model(data)
loss = paddle.reduce_mean(conv) loss = paddle.fluid.layers.reduce_mean(conv)
scaled = scaler.scale(loss) # scale the loss scaled = scaler.scale(loss) # scale the loss
scaled.backward() # do backward scaled.backward() # do backward
scaler.minimize(optimizer, scaled) # update parameters scaler.minimize(optimizer, scaled) # update parameters
@ -96,7 +96,7 @@ class GradScaler(AmpScaler):
data = paddle.rand([10, 3, 32, 32]) data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast(): with paddle.amp.auto_cast():
conv = model(data) conv = model(data)
loss = paddle.reduce_mean(conv) loss = paddle.fluid.layers.reduce_mean(conv)
scaled = scaler.scale(loss) # scale the loss scaled = scaler.scale(loss) # scale the loss
scaled.backward() # do backward scaled.backward() # do backward
scaler.minimize(optimizer, scaled) # update parameters scaler.minimize(optimizer, scaled) # update parameters
@ -128,7 +128,7 @@ class GradScaler(AmpScaler):
data = paddle.rand([10, 3, 32, 32]) data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast(): with paddle.amp.auto_cast():
conv = model(data) conv = model(data)
loss = paddle.reduce_mean(conv) loss = paddle.fluid.layers.reduce_mean(conv)
scaled = scaler.scale(loss) # scale the loss scaled = scaler.scale(loss) # scale the loss
scaled.backward() # do backward scaled.backward() # do backward
scaler.minimize(optimizer, scaled) # update parameters scaler.minimize(optimizer, scaled) # update parameters

@ -439,7 +439,7 @@ def barrier(group=0):
paddle.distributed.barrier() paddle.distributed.barrier()
""" """
op_type = 'barrier' op_type = 'barrier'
temp = paddle.fill_constant([1], dtype="int32", value="1") temp = fill_constant([1], dtype="int32", value="1")
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.barrier(temp, temp, 'ring_id', group) return core.ops.barrier(temp, temp, 'ring_id', group)
if not isinstance(group, int): if not isinstance(group, int):

@ -25,9 +25,9 @@ from .fluid.layers import control_flow
from .fluid.layers import tensor from .fluid.layers import tensor
from .fluid.layers import ops from .fluid.layers import ops
from .fluid.layers import nn from .fluid.layers import nn
from .fluid.layers import elementwise_mul, elementwise_div, elementwise_add, elementwise_sub
from .fluid import core from .fluid import core
from .fluid.framework import in_dygraph_mode from .fluid.framework import in_dygraph_mode
from .tensor.math import elementwise_mul, elementwise_div, elementwise_add, elementwise_sub
from .tensor import arange, gather_nd, concat, multinomial from .tensor import arange, gather_nd, concat, multinomial
import math import math
import numpy as np import numpy as np

@ -480,7 +480,7 @@ def grad(outputs,
paddle.disable_static() paddle.disable_static()
def test_dygraph_grad(grad_outputs=None): def test_dygraph_grad(grad_outputs=None):
x = paddle.fill_constant(shape=[1], value=2.0, dtype='float32') x = paddle.fluid.layers.fill_constant(shape=[1], value=2.0, dtype='float32')
x.stop_gradient = False x.stop_gradient = False
y1 = x * x y1 = x * x
@ -503,7 +503,7 @@ def grad(outputs,
return dx.numpy() return dx.numpy()
grad_value = paddle.fill_constant(shape=[1], value=4.0, dtype='float32') grad_value = paddle.fluid.layers.fill_constant(shape=[1], value=4.0, dtype='float32')
# dy1 = [1], dy2 = [1] # dy1 = [1], dy2 = [1]
print(test_dygraph_grad(None)) # [7.] print(test_dygraph_grad(None)) # [7.]
@ -515,7 +515,7 @@ def grad(outputs,
print(test_dygraph_grad([grad_value, None])) # [19.] print(test_dygraph_grad([grad_value, None])) # [19.]
# dy1 = [3], dy2 = [4] # dy1 = [3], dy2 = [4]
grad_y1 = paddle.fill_constant(shape=[1], value=3.0, dtype='float32') grad_y1 = paddle.fluid.layers.fill_constant(shape=[1], value=3.0, dtype='float32')
print(test_dygraph_grad([grad_y1, grad_value])) # [24.] print(test_dygraph_grad([grad_y1, grad_value])) # [24.]
''' '''

@ -87,7 +87,7 @@ def create_static_variable_gast_node(name):
def create_fill_constant_node(name, value): def create_fill_constant_node(name, value):
func_code = "{} = paddle.fill_constant(shape=[1], ".format(name) func_code = "{} = paddle.fluid.layers.fill_constant(shape=[1], ".format(name)
if isinstance(value, bool): if isinstance(value, bool):
func_code += "dtype='bool', value={})".format(value) func_code += "dtype='bool', value={})".format(value)
return gast.parse(func_code).body[0] return gast.parse(func_code).body[0]

@ -702,9 +702,6 @@ class Conv3DTranspose(layers.Layer):
class Pool2D(layers.Layer): class Pool2D(layers.Layer):
""" """
:alias_main: paddle.nn.Pool2D
:alias: paddle.nn.Pool2D,paddle.nn.layer.Pool2D,paddle.nn.layer.common.Pool2D
:old_api: paddle.fluid.dygraph.Pool2D
This interface is used to construct a callable object of the ``Pool2D`` class. This interface is used to construct a callable object of the ``Pool2D`` class.
For more details, refer to code examples. For more details, refer to code examples.
@ -2354,9 +2351,6 @@ class PRelu(layers.Layer):
class BilinearTensorProduct(layers.Layer): class BilinearTensorProduct(layers.Layer):
""" """
:alias_main: paddle.nn.BilinearTensorProduct
:alias: paddle.nn.BilinearTensorProduct,paddle.nn.layer.BilinearTensorProduct,paddle.nn.layer.common.BilinearTensorProduct
:old_api: paddle.fluid.dygraph.BilinearTensorProduct
**Add Bilinear Tensor Product Layer** **Add Bilinear Tensor Product Layer**

@ -163,7 +163,7 @@ def monkey_patch_varbase():
tmp.stop_gradient=False tmp.stop_gradient=False
inputs.append(tmp) inputs.append(tmp)
ret = paddle.sums(inputs) ret = paddle.sums(inputs)
loss = paddle.reduce_sum(ret) loss = paddle.fluid.layers.reduce_sum(ret)
loss.backward() loss.backward()
""" """

@ -543,7 +543,7 @@ def name_scope(prefix=None):
import paddle import paddle
paddle.enable_static() paddle.enable_static()
with paddle.static.name_scope("s1"): with paddle.static.name_scope("s1"):
a = paddle.data(name='data', shape=[None, 1], dtype='int32') a = paddle.fluid.data(name='data', shape=[None, 1], dtype='int32')
b = a + 1 b = a + 1
with paddle.static.name_scope("s2"): with paddle.static.name_scope("s2"):
c = b * 1 c = b * 1
@ -1193,7 +1193,7 @@ class Variable(object):
tmp.stop_gradient=False tmp.stop_gradient=False
inputs.append(tmp) inputs.append(tmp)
ret = paddle.sums(inputs) ret = paddle.sums(inputs)
loss = paddle.reduce_sum(ret) loss = paddle.fluid.layers.reduce_sum(ret)
loss.backward() loss.backward()
""" """
@ -1343,7 +1343,9 @@ class Variable(object):
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
cur_program = fluid.Program() cur_program = fluid.Program()
cur_block = cur_program.current_block() cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X", new_variable = cur_block.create_var(name="X",
@ -5355,8 +5357,8 @@ def default_startup_program():
main_program = paddle.static.Program() main_program = paddle.static.Program()
startup_program = paddle.static.Program() startup_program = paddle.static.Program()
with paddle.static.program_guard(main_program=main_program, startup_program=startup_program): with paddle.static.program_guard(main_program=main_program, startup_program=startup_program):
x = paddle.data(name="x", shape=[-1, 784], dtype='float32') x = paddle.fluid.data(name="x", shape=[-1, 784], dtype='float32')
y = paddle.data(name="y", shape=[-1, 1], dtype='int32') y = paddle.fluid.data(name="y", shape=[-1, 1], dtype='int32')
z = paddle.static.nn.fc(name="fc", x=x, size=10, activation="relu") z = paddle.static.nn.fc(name="fc", x=x, size=10, activation="relu")
print("main program is: {}".format(paddle.static.default_main_program())) print("main program is: {}".format(paddle.static.default_main_program()))
@ -5370,7 +5372,7 @@ def default_main_program():
This API can be used to get ``default main program`` which store the This API can be used to get ``default main program`` which store the
descriptions of Ops and tensors. descriptions of Ops and tensors.
For example ``z = paddle.elementwise_add(x, y)`` will create a new ``elementwise_add`` For example ``z = paddle.fluid.layers.elementwise_add(x, y)`` will create a new ``elementwise_add``
Op and a new ``z`` tensor, and they will be recorded in ``default main program`` . Op and a new ``z`` tensor, and they will be recorded in ``default main program`` .
The ``default main program`` is the default value for ``Program`` parameter in The ``default main program`` is the default value for ``Program`` parameter in
@ -5389,15 +5391,15 @@ def default_main_program():
paddle.enable_static() paddle.enable_static()
# Sample Network: # Sample Network:
data = paddle.data(name='image', shape=[None, 3, 224, 224], dtype='float32') data = paddle.fluid.data(name='image', shape=[None, 3, 224, 224], dtype='float32')
label = paddle.data(name='label', shape=[None, 1], dtype='int64') label = paddle.fluid.data(name='label', shape=[None, 1], dtype='int64')
conv1 = paddle.static.nn.conv2d(data, 4, 5, 1, act=None) conv1 = paddle.static.nn.conv2d(data, 4, 5, 1, act=None)
bn1 = paddle.static.nn.batch_norm(conv1, act='relu') bn1 = paddle.static.nn.batch_norm(conv1, act='relu')
pool1 = paddle.nn.functional.pool2d(bn1, 2, 'max', 2) pool1 = paddle.fluid.layers.pool2d(bn1, 2, 'max', 2)
conv2 = paddle.static.nn.conv2d(pool1, 16, 5, 1, act=None) conv2 = paddle.static.nn.conv2d(pool1, 16, 5, 1, act=None)
bn2 = paddle.static.nn.batch_norm(conv2, act='relu') bn2 = paddle.static.nn.batch_norm(conv2, act='relu')
pool2 = paddle.nn.functional.pool2d(bn2, 2, 'max', 2) pool2 = paddle.fluid.layers.pool2d(bn2, 2, 'max', 2)
fc1 = paddle.static.nn.fc(x=pool2, size=50, activation='relu') fc1 = paddle.static.nn.fc(x=pool2, size=50, activation='relu')
fc2 = paddle.static.nn.fc(x=fc1, size=102, activation='softmax') fc2 = paddle.static.nn.fc(x=fc1, size=102, activation='softmax')

@ -1110,9 +1110,6 @@ def assign_skip_lod_tensor_array(input, output):
def while_loop(cond, body, loop_vars, is_test=False, name=None): def while_loop(cond, body, loop_vars, is_test=False, name=None):
""" """
:api_attr: Static Graph :api_attr: Static Graph
:alias_main: paddle.nn.while_loop
:alias: paddle.nn.while_loop,paddle.nn.control_flow.while_loop
:old_api: paddle.fluid.layers.while_loop
while_loop is one of the control flows. Repeats while_loop `body` until `cond` returns False. while_loop is one of the control flows. Repeats while_loop `body` until `cond` returns False.
@ -1151,6 +1148,9 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
import paddle
paddle.enable_static()
def cond(i, ten): def cond(i, ten):
return i < ten return i < ten
@ -2506,21 +2506,21 @@ def case(pred_fn_pairs, default=None, name=None):
paddle.enable_static() paddle.enable_static()
def fn_1(): def fn_1():
return paddle.fill_constant(shape=[1, 2], dtype='float32', value=1) return paddle.fluid.layers.fill_constant(shape=[1, 2], dtype='float32', value=1)
def fn_2(): def fn_2():
return paddle.fill_constant(shape=[2, 2], dtype='int32', value=2) return paddle.fluid.layers.fill_constant(shape=[2, 2], dtype='int32', value=2)
def fn_3(): def fn_3():
return paddle.fill_constant(shape=[3], dtype='int32', value=3) return paddle.fluid.layers.fill_constant(shape=[3], dtype='int32', value=3)
main_program = paddle.static.default_startup_program() main_program = paddle.static.default_startup_program()
startup_program = paddle.static.default_main_program() startup_program = paddle.static.default_main_program()
with paddle.static.program_guard(main_program, startup_program): with paddle.static.program_guard(main_program, startup_program):
x = paddle.fill_constant(shape=[1], dtype='float32', value=0.3) x = paddle.fluid.layers.fill_constant(shape=[1], dtype='float32', value=0.3)
y = paddle.fill_constant(shape=[1], dtype='float32', value=0.1) y = paddle.fluid.layers.fill_constant(shape=[1], dtype='float32', value=0.1)
z = paddle.fill_constant(shape=[1], dtype='float32', value=0.2) z = paddle.fluid.layers.fill_constant(shape=[1], dtype='float32', value=0.2)
pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3 pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3
pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1 pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1
@ -3626,19 +3626,19 @@ def switch_case(branch_index, branch_fns, default=None, name=None):
paddle.enable_static() paddle.enable_static()
def fn_1(): def fn_1():
return paddle.fill_constant(shape=[1, 2], dtype='float32', value=1) return paddle.fluid.layers.fill_constant(shape=[1, 2], dtype='float32', value=1)
def fn_2(): def fn_2():
return paddle.fill_constant(shape=[2, 2], dtype='int32', value=2) return paddle.fluid.layers.fill_constant(shape=[2, 2], dtype='int32', value=2)
def fn_3(): def fn_3():
return paddle.fill_constant(shape=[3], dtype='int32', value=3) return paddle.fluid.layers.fill_constant(shape=[3], dtype='int32', value=3)
main_program = paddle.static.default_startup_program() main_program = paddle.static.default_startup_program()
startup_program = paddle.static.default_main_program() startup_program = paddle.static.default_main_program()
with paddle.static.program_guard(main_program, startup_program): with paddle.static.program_guard(main_program, startup_program):
index_1 = paddle.fill_constant(shape=[1], dtype='int32', value=1) index_1 = paddle.fluid.layers.fill_constant(shape=[1], dtype='int32', value=1)
index_2 = paddle.fill_constant(shape=[1], dtype='int32', value=2) index_2 = paddle.fluid.layers.fill_constant(shape=[1], dtype='int32', value=2)
out_1 = paddle.static.nn.switch_case( out_1 = paddle.static.nn.switch_case(
branch_index=index_1, branch_index=index_1,

File diff suppressed because it is too large Load Diff

@ -52,9 +52,6 @@ def _decay_step_counter(begin=0):
def noam_decay(d_model, warmup_steps, learning_rate=1.0): def noam_decay(d_model, warmup_steps, learning_rate=1.0):
""" """
:alias_main: paddle.nn.functional.noam_decay
:alias: paddle.nn.functional.noam_decay,paddle.nn.functional.learning_rate.noam_decay
:old_api: paddle.fluid.layers.noam_decay
Noam decay method. The numpy implementation of noam decay as follows. Noam decay method. The numpy implementation of noam decay as follows.
@ -115,9 +112,6 @@ def noam_decay(d_model, warmup_steps, learning_rate=1.0):
def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False): def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False):
""" """
:alias_main: paddle.nn.functional.exponential_decay
:alias: paddle.nn.functional.exponential_decay,paddle.nn.functional.learning_rate.exponential_decay
:old_api: paddle.fluid.layers.exponential_decay
Applies exponential decay to the learning rate. Applies exponential decay to the learning rate.
@ -149,6 +143,9 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False):
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
base_lr = 0.1 base_lr = 0.1
sgd_optimizer = fluid.optimizer.SGD( sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.exponential_decay( learning_rate=fluid.layers.exponential_decay(
@ -176,9 +173,6 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False):
def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False): def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False):
""" """
:alias_main: paddle.nn.functional.natural_exp_decay
:alias: paddle.nn.functional.natural_exp_decay,paddle.nn.functional.learning_rate.natural_exp_decay
:old_api: paddle.fluid.layers.natural_exp_decay
Applies natural exponential decay to the initial learning rate. Applies natural exponential decay to the initial learning rate.
@ -210,6 +204,9 @@ Applies natural exponential decay to the initial learning rate.
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
base_lr = 0.1 base_lr = 0.1
sgd_optimizer = fluid.optimizer.SGD( sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.natural_exp_decay( learning_rate=fluid.layers.natural_exp_decay(
@ -237,9 +234,6 @@ Applies natural exponential decay to the initial learning rate.
def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False): def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False):
""" """
:alias_main: paddle.nn.functional.inverse_time_decay
:alias: paddle.nn.functional.inverse_time_decay,paddle.nn.functional.learning_rate.inverse_time_decay
:old_api: paddle.fluid.layers.inverse_time_decay
Applies inverse time decay to the initial learning rate. Applies inverse time decay to the initial learning rate.
@ -271,6 +265,8 @@ def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False):
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
base_lr = 0.1 base_lr = 0.1
sgd_optimizer = fluid.optimizer.SGD( sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.inverse_time_decay( learning_rate=fluid.layers.inverse_time_decay(
@ -302,10 +298,6 @@ def polynomial_decay(learning_rate,
power=1.0, power=1.0,
cycle=False): cycle=False):
""" """
:alias_main: paddle.nn.functional.polynomial_decay
:alias: paddle.nn.functional.polynomial_decay,paddle.nn.functional.learning_rate.polynomial_decay
:old_api: paddle.fluid.layers.polynomial_decay
2
Applies polynomial decay to the initial learning rate. Applies polynomial decay to the initial learning rate.
.. code-block:: text .. code-block:: text
@ -371,9 +363,6 @@ def polynomial_decay(learning_rate,
def piecewise_decay(boundaries, values): def piecewise_decay(boundaries, values):
""" """
:alias_main: paddle.nn.functional.piecewise_decay
:alias: paddle.nn.functional.piecewise_decay,paddle.nn.functional.learning_rate.piecewise_decay
:old_api: paddle.fluid.layers.piecewise_decay
Applies piecewise decay to the initial learning rate. Applies piecewise decay to the initial learning rate.
@ -401,6 +390,8 @@ Applies piecewise decay to the initial learning rate.
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
boundaries = [10000, 20000] boundaries = [10000, 20000]
values = [1.0, 0.5, 0.1] values = [1.0, 0.5, 0.1]
optimizer = fluid.optimizer.Momentum( optimizer = fluid.optimizer.Momentum(
@ -450,9 +441,6 @@ Applies piecewise decay to the initial learning rate.
def cosine_decay(learning_rate, step_each_epoch, epochs): def cosine_decay(learning_rate, step_each_epoch, epochs):
""" """
:alias_main: paddle.nn.functional.cosine_decay
:alias: paddle.nn.functional.cosine_decay,paddle.nn.functional.learning_rate.cosine_decay
:old_api: paddle.fluid.layers.cosine_decay
Applies cosine decay to the learning rate. Applies cosine decay to the learning rate.
@ -499,9 +487,6 @@ def cosine_decay(learning_rate, step_each_epoch, epochs):
def linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr): def linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr):
""" """
:alias_main: paddle.nn.functional.linear_lr_warmup
:alias: paddle.nn.functional.linear_lr_warmup,paddle.nn.functional.learning_rate.linear_lr_warmup
:old_api: paddle.fluid.layers.linear_lr_warmup
This operator use the linear learning rate warm up strategy to adjust the learning rate preliminarily before the normal learning rate scheduling. This operator use the linear learning rate warm up strategy to adjust the learning rate preliminarily before the normal learning rate scheduling.
For more information, please refer to `Bag of Tricks for Image Classification with Convolutional Neural Networks <https://arxiv.org/abs/1812.01187>`_ For more information, please refer to `Bag of Tricks for Image Classification with Convolutional Neural Networks <https://arxiv.org/abs/1812.01187>`_

@ -59,9 +59,6 @@ def center_loss(input,
update_center=True): update_center=True):
""" """
:api_attr: Static Graph :api_attr: Static Graph
:alias_main: paddle.nn.functional.center_loss
:alias: paddle.nn.functional.center_loss,paddle.nn.functional.loss.center_loss
:old_api: paddle.fluid.layers.center_loss
**Center loss Cost layer** **Center loss Cost layer**
@ -92,6 +89,8 @@ def center_loss(input,
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
input = fluid.data(name='x',shape=[20,30],dtype='float32') input = fluid.data(name='x',shape=[20,30],dtype='float32')
label = fluid.data(name='y',shape=[20,1],dtype='int64') label = fluid.data(name='y',shape=[20,1],dtype='int64')
@ -153,9 +152,6 @@ def center_loss(input,
def bpr_loss(input, label, name=None): def bpr_loss(input, label, name=None):
""" """
:alias_main: paddle.nn.functional.bpr_loss
:alias: paddle.nn.functional.bpr_loss,paddle.nn.functional.loss.bpr_loss
:old_api: paddle.fluid.layers.bpr_loss
**Bayesian Personalized Ranking Loss Operator** **Bayesian Personalized Ranking Loss Operator**
@ -183,6 +179,9 @@ def bpr_loss(input, label, name=None):
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
neg_size = 10 neg_size = 10
label = fluid.data( label = fluid.data(
@ -1309,9 +1308,6 @@ def softmax_with_cross_entropy(logits,
def rank_loss(label, left, right, name=None): def rank_loss(label, left, right, name=None):
""" """
:alias_main: paddle.nn.functional.rank_loss
:alias: paddle.nn.functional.rank_loss,paddle.nn.functional.loss.rank_loss
:old_api: paddle.fluid.layers.rank_loss
This operator implements the sort loss layer in the RankNet model. RankNet is a pairwise ranking model This operator implements the sort loss layer in the RankNet model. RankNet is a pairwise ranking model
with a training sample consisting of a pair of documents (A and B), The label (P) with a training sample consisting of a pair of documents (A and B), The label (P)
@ -1349,6 +1345,8 @@ def rank_loss(label, left, right, name=None):
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
label = fluid.data(name="label", shape=[-1, 1], dtype="float32") label = fluid.data(name="label", shape=[-1, 1], dtype="float32")
left = fluid.data(name="left", shape=[-1, 1], dtype="float32") left = fluid.data(name="left", shape=[-1, 1], dtype="float32")
right = fluid.data(name="right", shape=[-1, 1], dtype="float32") right = fluid.data(name="right", shape=[-1, 1], dtype="float32")
@ -1491,9 +1489,6 @@ def teacher_student_sigmoid_loss(input,
soft_max_up_bound=15.0, soft_max_up_bound=15.0,
soft_max_lower_bound=-15.0): soft_max_lower_bound=-15.0):
""" """
:alias_main: paddle.nn.functional.teacher_student_sigmoid_loss
:alias: paddle.nn.functional.teacher_student_sigmoid_loss,paddle.nn.functional.loss.teacher_student_sigmoid_loss
:old_api: paddle.fluid.layers.teacher_student_sigmoid_loss
**Teacher Student Log Loss Layer** **Teacher Student Log Loss Layer**
@ -1521,7 +1516,8 @@ def teacher_student_sigmoid_loss(input,
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
batch_size = 64 batch_size = 64
label = fluid.data( label = fluid.data(
name="label", shape=[batch_size, 1], dtype="int64") name="label", shape=[batch_size, 1], dtype="int64")

File diff suppressed because it is too large Load Diff

@ -488,7 +488,7 @@ def rnn(cell,
inputs = paddle.rand((4, 23, 16)) inputs = paddle.rand((4, 23, 16))
prev_h = paddle.randn((4, 32)) prev_h = paddle.randn((4, 32))
outputs, final_states = paddle.nn.functional.rnn(cell, inputs, prev_h) outputs, final_states = paddle.fluid.layers.rnn(cell, inputs, prev_h)
""" """
if in_dygraph_mode(): if in_dygraph_mode():
@ -711,7 +711,7 @@ def birnn(cell_fw,
hf, cf = paddle.rand((4, 32)), paddle.rand((4, 32)) hf, cf = paddle.rand((4, 32)), paddle.rand((4, 32))
hb, cb = paddle.rand((4, 32)), paddle.rand((4, 32)) hb, cb = paddle.rand((4, 32)), paddle.rand((4, 32))
initial_states = ((hf, cf), (hb, cb)) initial_states = ((hf, cf), (hb, cb))
outputs, final_states = paddle.nn.functional.birnn( outputs, final_states = paddle.fluid.layers.birnn(
cell_fw, cell_bw, inputs, initial_states) cell_fw, cell_bw, inputs, initial_states)
""" """
@ -3046,9 +3046,6 @@ def beam_search(pre_ids,
name=None, name=None,
return_parent_idx=False): return_parent_idx=False):
""" """
:alias_main: paddle.nn.beam_search
:alias: paddle.nn.beam_search,paddle.nn.decode.beam_search
:old_api: paddle.fluid.layers.beam_search
Beam search is a classical algorithm for selecting candidate words in a Beam search is a classical algorithm for selecting candidate words in a
machine translation task. machine translation task.
@ -3126,6 +3123,8 @@ def beam_search(pre_ids,
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
# Suppose `probs` contains predicted results from the computation # Suppose `probs` contains predicted results from the computation
# cell and `pre_ids` and `pre_scores` is the output of beam_search # cell and `pre_ids` and `pre_scores` is the output of beam_search
@ -3197,9 +3196,6 @@ def beam_search(pre_ids,
def beam_search_decode(ids, scores, beam_size, end_id, name=None): def beam_search_decode(ids, scores, beam_size, end_id, name=None):
""" """
:alias_main: paddle.nn.beam_search_decode
:alias: paddle.nn.beam_search_decode,paddle.nn.decode.beam_search_decode
:old_api: paddle.fluid.layers.beam_search_decode
This operator is used after beam search has completed. It constructs the This operator is used after beam search has completed. It constructs the
full predicted sequences for each sample by walking back along the search full predicted sequences for each sample by walking back along the search
@ -3246,7 +3242,8 @@ def beam_search_decode(ids, scores, beam_size, end_id, name=None):
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
# Suppose `ids` and `scores` are LodTensorArray variables reserving # Suppose `ids` and `scores` are LodTensorArray variables reserving
# the selected ids and scores of all steps # the selected ids and scores of all steps
ids = fluid.layers.create_array(dtype='int64') ids = fluid.layers.create_array(dtype='int64')

@ -605,8 +605,6 @@ def assign(input, output=None):
def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None): def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
""" """
:alias_main: paddle.fill_constant
:alias: paddle.tensor.fill_constant, paddle.tensor.creation.fill_constant
This OP creates a Tensor with specified `shape` and `dtype`, and This OP creates a Tensor with specified `shape` and `dtype`, and
initializes it with a constant specified by `value`. initializes it with a constant specified by `value`.
@ -715,7 +713,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
return out return out
@deprecated(since='1.8.0', update_to="paddle.fill_constant") @deprecated(since='1.8.0', update_to="paddle.fluid.layers.fill_constant")
@templatedoc() @templatedoc()
def fill_constant_batch_size_like(input, def fill_constant_batch_size_like(input,
shape, shape,
@ -1228,7 +1226,7 @@ def has_inf(x):
import paddle import paddle
data = paddle.randn(shape=[4, 32, 32], dtype="float32") data = paddle.randn(shape=[4, 32, 32], dtype="float32")
res = paddle.has_inf(data) res = paddle.fluid.layers.has_inf(data)
# [False] # [False]
""" """
@ -1257,7 +1255,7 @@ def has_nan(x):
import paddle import paddle
data = paddle.randn(shape=[2,3], dtype="float32") data = paddle.randn(shape=[2,3], dtype="float32")
res = paddle.has_nan(data) res = paddle.fluid.layers.has_nan(data)
# [False] # [False]
""" """

@ -851,6 +851,9 @@ class DetectionMAP(object):
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
paddle.enable_static()
batch_size = None # can be any size batch_size = None # can be any size
image_boxs_num = 10 image_boxs_num = 10
bounding_bboxes_num = 21 bounding_bboxes_num = 21

@ -105,7 +105,7 @@ class ReduceMeanLayer(object):
""" """
operation operation
""" """
mean = paddle.reduce_mean(input) mean = paddle.fluid.layers.reduce_mean(input)
return mean return mean
@ -181,7 +181,7 @@ class ElementwiseSubLayer(object):
""" """
operation operation
""" """
sub = paddle.elementwise_sub(x, y) sub = paddle.fluid.layers.elementwise_sub(x, y)
return sub return sub
@ -203,7 +203,7 @@ class ConstantLayer(object):
shape = list(shape) shape = list(shape)
input_shape = paddle.shape(input) input_shape = paddle.shape(input)
shape[0] = input_shape[0] shape[0] = input_shape[0]
constant = paddle.fill_constant(shape, dtype, value) constant = paddle.fluid.layers.fill_constant(shape, dtype, value)
return constant return constant
@ -473,8 +473,8 @@ class BOW(paddle.nn.Layer):
right_emb = paddle.reshape( right_emb = paddle.reshape(
right_emb, shape=[-1, self.seq_len, self.bow_dim]) right_emb, shape=[-1, self.seq_len, self.bow_dim])
bow_left = paddle.reduce_sum(left_emb, dim=1) bow_left = paddle.fluid.layers.reduce_sum(left_emb, dim=1)
bow_right = paddle.reduce_sum(right_emb, dim=1) bow_right = paddle.fluid.layers.reduce_sum(right_emb, dim=1)
softsign_layer = SoftsignLayer() softsign_layer = SoftsignLayer()
left_soft = softsign_layer.ops(bow_left) left_soft = softsign_layer.ops(bow_left)
right_soft = softsign_layer.ops(bow_right) right_soft = softsign_layer.ops(bow_right)

@ -64,9 +64,9 @@ def get_source_code(func):
class StaticCode1(): class StaticCode1():
# TODO: Transform return statement # TODO: Transform return statement
def dyfunc_with_if_else(x_v, label=None): def dyfunc_with_if_else(x_v, label=None):
__return_1 = paddle.fill_constant(shape=[1], dtype='bool', value=False) __return_1 = paddle.fluid.layers.fill_constant(shape=[1], dtype='bool', value=False)
__return_0 = paddle.fill_constant(shape=[1], dtype='bool', value=False) __return_0 = paddle.fluid.layers.fill_constant(shape=[1], dtype='bool', value=False)
__return_value_init_0 = paddle.fill_constant( __return_value_init_0 = paddle.fluid.layers.fill_constant(
shape=[1], dtype='float64', value=0.0) shape=[1], dtype='float64', value=0.0)
__return_value_0 = __return_value_init_0 __return_value_0 = __return_value_init_0
@ -84,7 +84,7 @@ class StaticCode1():
def true_fn_1(__return_0, __return_value_0, label, x_v): def true_fn_1(__return_0, __return_value_0, label, x_v):
loss = fluid.layers.cross_entropy(x_v, label) loss = fluid.layers.cross_entropy(x_v, label)
__return_0 = paddle.fill_constant( __return_0 = paddle.fluid.layers.fill_constant(
shape=[1], dtype='bool', value=True) shape=[1], dtype='bool', value=True)
__return_value_0 = loss __return_value_0 = loss
return __return_0, __return_value_0 return __return_0, __return_value_0
@ -98,7 +98,7 @@ class StaticCode1():
(__return_0, __return_value_0), (__return_0, __return_value_0))) (__return_0, __return_value_0), (__return_0, __return_value_0)))
def true_fn_2(__return_1, __return_value_0, x_v): def true_fn_2(__return_1, __return_value_0, x_v):
__return_1 = paddle.fill_constant( __return_1 = paddle.fluid.layers.fill_constant(
shape=[1], dtype='bool', value=True) shape=[1], dtype='bool', value=True)
__return_value_0 = x_v __return_value_0 = x_v
return __return_1, __return_value_0 return __return_1, __return_value_0
@ -116,9 +116,9 @@ class StaticCode1():
class StaticCode2(): class StaticCode2():
# TODO: Transform return statement # TODO: Transform return statement
def dyfunc_with_if_else(x_v, label=None): def dyfunc_with_if_else(x_v, label=None):
__return_3 = paddle.fill_constant(shape=[1], dtype='bool', value=False) __return_3 = paddle.fluid.layers.fill_constant(shape=[1], dtype='bool', value=False)
__return_2 = paddle.fill_constant(shape=[1], dtype='bool', value=False) __return_2 = paddle.fluid.layers.fill_constant(shape=[1], dtype='bool', value=False)
__return_value_init_1 = paddle.fill_constant( __return_value_init_1 = paddle.fluid.layers.fill_constant(
shape=[1], dtype='float64', value=0.0) shape=[1], dtype='float64', value=0.0)
__return_value_1 = __return_value_init_1 __return_value_1 = __return_value_init_1
@ -136,7 +136,7 @@ class StaticCode2():
def true_fn_4(__return_2, __return_value_1, label, x_v): def true_fn_4(__return_2, __return_value_1, label, x_v):
loss = fluid.layers.cross_entropy(x_v, label) loss = fluid.layers.cross_entropy(x_v, label)
__return_2 = paddle.fill_constant( __return_2 = paddle.fluid.layers.fill_constant(
shape=[1], dtype='bool', value=True) shape=[1], dtype='bool', value=True)
__return_value_1 = loss __return_value_1 = loss
return __return_2, __return_value_1 return __return_2, __return_value_1
@ -150,7 +150,7 @@ class StaticCode2():
(__return_2, __return_value_1), (__return_2, __return_value_1))) (__return_2, __return_value_1), (__return_2, __return_value_1)))
def true_fn_5(__return_3, __return_value_1, x_v): def true_fn_5(__return_3, __return_value_1, x_v):
__return_3 = paddle.fill_constant( __return_3 = paddle.fluid.layers.fill_constant(
shape=[1], dtype='bool', value=True) shape=[1], dtype='bool', value=True)
__return_value_1 = x_v __return_value_1 = x_v
return __return_3, __return_value_1 return __return_3, __return_value_1

@ -187,8 +187,8 @@ class PtbModel(paddle.nn.Layer):
loss = paddle.nn.functional.softmax_with_cross_entropy( loss = paddle.nn.functional.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False) logits=projection, label=label, soft_label=False)
loss = paddle.reshape(loss, shape=[-1, self.num_steps]) loss = paddle.reshape(loss, shape=[-1, self.num_steps])
loss = paddle.reduce_mean(loss, dim=[0]) loss = paddle.fluid.layers.reduce_mean(loss, dim=[0])
loss = paddle.reduce_sum(loss) loss = paddle.fluid.layers.reduce_sum(loss)
return loss, last_hidden, last_cell return loss, last_hidden, last_cell

@ -153,7 +153,7 @@ class ResNet(paddle.nn.Layer):
self.conv = ConvBNLayer( self.conv = ConvBNLayer(
num_channels=3, num_filters=64, filter_size=7, stride=2, act='relu') num_channels=3, num_filters=64, filter_size=7, stride=2, act='relu')
self.pool2d_max = paddle.nn.Pool2D( self.pool2d_max = paddle.fluid.dygraph.Pool2D(
pool_size=3, pool_stride=2, pool_padding=1, pool_type='max') pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
self.bottleneck_block_list = [] self.bottleneck_block_list = []
@ -171,7 +171,7 @@ class ResNet(paddle.nn.Layer):
self.bottleneck_block_list.append(bottleneck_block) self.bottleneck_block_list.append(bottleneck_block)
shortcut = True shortcut = True
self.pool2d_avg = paddle.nn.Pool2D( self.pool2d_avg = paddle.fluid.dygraph.Pool2D(
pool_size=7, pool_type='avg', global_pooling=True) pool_size=7, pool_type='avg', global_pooling=True)
self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 4 * 1 * 1 self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 4 * 1 * 1

@ -51,24 +51,24 @@ class TestDataLayerNotCheck(unittest.TestCase):
class TestVariableTransFunc(unittest.TestCase): class TestVariableTransFunc(unittest.TestCase):
def test_create_fill_constant_node(self): def test_create_fill_constant_node(self):
node = create_fill_constant_node("a", 1.0) node = create_fill_constant_node("a", 1.0)
source = "a = paddle.fill_constant(shape=[1], dtype='float64', value=1.0)" source = "a = paddle.fluid.layers.fill_constant(shape=[1], dtype='float64', value=1.0)"
self.assertEqual(ast_to_source_code(node).strip(), source) self.assertEqual(ast_to_source_code(node).strip(), source)
node = create_fill_constant_node("b", True) node = create_fill_constant_node("b", True)
source = "b = paddle.fill_constant(shape=[1], dtype='bool', value=True)" source = "b = paddle.fluid.layers.fill_constant(shape=[1], dtype='bool', value=True)"
self.assertEqual(ast_to_source_code(node).strip(), source) self.assertEqual(ast_to_source_code(node).strip(), source)
if six.PY2: if six.PY2:
node = create_fill_constant_node("c", 214) node = create_fill_constant_node("c", 214)
source = "c = paddle.fill_constant(shape=[1], dtype='int32', value=214)" source = "c = paddle.fluid.layers.fill_constant(shape=[1], dtype='int32', value=214)"
self.assertEqual(ast_to_source_code(node).strip(), source) self.assertEqual(ast_to_source_code(node).strip(), source)
node = create_fill_constant_node("d", long(10086)) node = create_fill_constant_node("d", long(10086))
source = "d = paddle.fill_constant(shape=[1], dtype='int64', value=10086)" source = "d = paddle.fluid.layers.fill_constant(shape=[1], dtype='int64', value=10086)"
self.assertEqual(ast_to_source_code(node).strip(), source) self.assertEqual(ast_to_source_code(node).strip(), source)
else: else:
node = create_fill_constant_node("c", 4293) node = create_fill_constant_node("c", 4293)
source = "c = paddle.fill_constant(shape=[1], dtype='int64', value=4293)" source = "c = paddle.fluid.layers.fill_constant(shape=[1], dtype='int64', value=4293)"
self.assertEqual(ast_to_source_code(node).strip(), source) self.assertEqual(ast_to_source_code(node).strip(), source)
self.assertIsNone(create_fill_constant_node("e", None)) self.assertIsNone(create_fill_constant_node("e", None))

@ -40,9 +40,9 @@ class SquaredMatSubFusePassTest(InferencePassTest):
matmul_ab_square = paddle.square(matmul_ab) matmul_ab_square = paddle.square(matmul_ab)
matmul_square_ab = paddle.matmul(data_a_square, data_b_square) matmul_square_ab = paddle.matmul(data_a_square, data_b_square)
scale = paddle.fill_constant(shape=[1], value=0.5, dtype='float32') scale = paddle.fluid.layers.fill_constant(shape=[1], value=0.5, dtype='float32')
sub_val = paddle.elementwise_sub(matmul_ab_square, matmul_square_ab) sub_val = paddle.fluid.layers.elementwise_sub(matmul_ab_square, matmul_square_ab)
squared_mat_sub_out = fluid.layers.elementwise_mul(sub_val, scale) squared_mat_sub_out = fluid.layers.elementwise_mul(sub_val, scale)
self.feeds = { self.feeds = {

@ -26,7 +26,7 @@ import paddle.fluid as fluid
import paddle.fluid.dygraph as dygraph import paddle.fluid.dygraph as dygraph
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.optimizer import SGDOptimizer from paddle.fluid.optimizer import SGDOptimizer
from paddle.nn import Conv2d, Pool2D, Linear, SyncBatchNorm from paddle.nn import Conv2d, Linear, SyncBatchNorm
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save