[API 2.0]Update 2.0 api from fluid to paddle. (#27598)

my_2.0rc
Wilber 4 years ago committed by GitHub
parent 7f9b198d59
commit 488152a6d0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -94,12 +94,13 @@ def scope_guard(scope):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
import numpy import numpy
paddle.enable_static()
new_scope = fluid.Scope() new_scope = paddle.static.Scope()
with fluid.scope_guard(new_scope): with paddle.static.scope_guard(new_scope):
fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace()) paddle.static.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), paddle.CPUPlace())
numpy.array(new_scope.find_var("data").get_tensor()) numpy.array(new_scope.find_var("data").get_tensor())
""" """

@ -13546,15 +13546,15 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
""" """
:api_attr: Static Graph :api_attr: Static Graph
This OP is used to register customized Python OP to Paddle Fluid. The design This OP is used to register customized Python OP to Paddle. The design
principe of py_func is that LodTensor and numpy array can be converted to each principe of py_func is that Tensor and numpy array can be converted to each
other easily. So you can use Python and numpy API to register a python OP. other easily. So you can use Python and numpy API to register a python OP.
The forward function of the registered OP is ``func`` and the backward function The forward function of the registered OP is ``func`` and the backward function
of that is ``backward_func``. Paddle will call ``func`` at forward runtime and of that is ``backward_func``. Paddle will call ``func`` at forward runtime and
call ``backward_func`` at backward runtime(if ``backward_func`` is not None). call ``backward_func`` at backward runtime(if ``backward_func`` is not None).
``x`` is the input of ``func``, whose type must be LoDTensor; ``out`` is ``x`` is the input of ``func``, whose type must be Tensor; ``out`` is
the output of ``func``, whose type can be either LoDTensor or numpy array. the output of ``func``, whose type can be either Tensor or numpy array.
The input of the backward function ``backward_func`` is ``x``, ``out`` and The input of the backward function ``backward_func`` is ``x``, ``out`` and
the gradient of ``out``. If some variables of ``out`` have no gradient, the the gradient of ``out``. If some variables of ``out`` have no gradient, the
@ -13572,14 +13572,14 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
func (callable): The forward function of the registered OP. When the network func (callable): The forward function of the registered OP. When the network
is running, the forward output ``out`` will be calculated according to this is running, the forward output ``out`` will be calculated according to this
function and the forward input ``x``. In ``func`` , it's suggested that we function and the forward input ``x``. In ``func`` , it's suggested that we
actively convert LoDTensor into a numpy array, so that we can use Python and actively convert Tensor into a numpy array, so that we can use Python and
numpy API arbitrarily. If not, some operations of numpy may not be compatible. numpy API arbitrarily. If not, some operations of numpy may not be compatible.
x (Variable|tuple(Variale)|list[Variale]): The input of the forward function ``func``. x (Variable|tuple(Variale)|list[Variale]): The input of the forward function ``func``.
It can be Variable|tuple(Variale)|list[Variale], where Variable is LoDTensor or It can be Variable|tuple(Variale)|list[Variale], where Variable is Tensor or
Tenosor. In addition, Multiple Variable should be passed in the form of tuple(Variale) Tenosor. In addition, Multiple Variable should be passed in the form of tuple(Variale)
or list[Variale]. or list[Variale].
out (Variable|tuple(Variale)|list[Variale]): The output of the forward function ``func``, out (Variable|tuple(Variale)|list[Variale]): The output of the forward function ``func``,
it can be Variable|tuple(Variale)|list[Variale], where Variable can be either LoDTensor it can be Variable|tuple(Variale)|list[Variale], where Variable can be either Tensor
or numpy array. Since Paddle cannot automatically infer the shape and type of ``out``, or numpy array. Since Paddle cannot automatically infer the shape and type of ``out``,
you must create ``out`` in advance. you must create ``out`` in advance.
backward_func (callable, optional): The backward function of the registered OP. backward_func (callable, optional): The backward function of the registered OP.
@ -13600,16 +13600,18 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
.. code-block:: python .. code-block:: python
# example 1: # example 1:
import paddle.fluid as fluid import paddle
import six import six
# Creates a forward function, LodTensor can be input directly without paddle.enable_static()
# Creates a forward function, Tensor can be input directly without
# being converted into numpy array. # being converted into numpy array.
def tanh(x): def tanh(x):
return np.tanh(x) return np.tanh(x)
# Skip x in backward function and return the gradient of x # Skip x in backward function and return the gradient of x
# LodTensor must be actively converted to numpy array, otherwise, # Tensor must be actively converted to numpy array, otherwise,
# operations such as +/- can't be used. # operations such as +/- can't be used.
def tanh_grad(y, dy): def tanh_grad(y, dy):
return np.array(dy) * (1 - np.square(np.array(y))) return np.array(dy) * (1 - np.square(np.array(y)))
@ -13619,36 +13621,38 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
print(x) print(x)
def create_tmp_var(name, dtype, shape): def create_tmp_var(name, dtype, shape):
return fluid.default_main_program().current_block().create_var( return paddle.static.default_main_program().current_block().create_var(
name=name, dtype=dtype, shape=shape) name=name, dtype=dtype, shape=shape)
def simple_net(img, label): def simple_net(img, label):
hidden = img hidden = img
for idx in six.moves.range(4): for idx in six.moves.range(4):
hidden = fluid.layers.fc(hidden, size=200) hidden = paddle.static.nn.fc(hidden, size=200)
new_hidden = create_tmp_var(name='hidden_{}'.format(idx), new_hidden = create_tmp_var(name='hidden_{}'.format(idx),
dtype=hidden.dtype, shape=hidden.shape) dtype=hidden.dtype, shape=hidden.shape)
# User-defined forward and backward # User-defined forward and backward
hidden = fluid.layers.py_func(func=tanh, x=hidden, hidden = paddle.static.nn.py_func(func=tanh, x=hidden,
out=new_hidden, backward_func=tanh_grad, out=new_hidden, backward_func=tanh_grad,
skip_vars_in_backward_input=hidden) skip_vars_in_backward_input=hidden)
# User-defined debug functions that print out the input LodTensor # User-defined debug functions that print out the input Tensor
fluid.layers.py_func(func=debug_func, x=hidden, out=None) paddle.static.nn.py_func(func=debug_func, x=hidden, out=None)
prediction = fluid.layers.fc(hidden, size=10, act='softmax') prediction = paddle.static.nn.fc(hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label) loss = paddle.static.nn.cross_entropy(input=prediction, label=label)
return fluid.layers.mean(loss) return paddle.mean(loss)
# example 2: # example 2:
# This example shows how to turn LoDTensor into numpy array and # This example shows how to turn Tensor into numpy array and
# use numpy API to register an Python OP # use numpy API to register an Python OP
import paddle.fluid as fluid import paddle
import numpy as np import numpy as np
paddle.enable_static()
def element_wise_add(x, y): def element_wise_add(x, y):
# LodTensor must be actively converted to numpy array, otherwise, # Tensor must be actively converted to numpy array, otherwise,
# numpy.shape can't be used. # numpy.shape can't be used.
x = np.array(x) x = np.array(x)
y = np.array(y) y = np.array(y)
@ -13664,24 +13668,24 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
return result return result
def create_tmp_var(name, dtype, shape): def create_tmp_var(name, dtype, shape):
return fluid.default_main_program().current_block().create_var( return paddle.static.default_main_program().current_block().create_var(
name=name, dtype=dtype, shape=shape) name=name, dtype=dtype, shape=shape)
def py_func_demo(): def py_func_demo():
start_program = fluid.default_startup_program() start_program = paddle.static.default_startup_program()
main_program = fluid.default_main_program() main_program = paddle.static.default_main_program()
# Input of the forward function # Input of the forward function
x = fluid.data(name='x', shape=[2,3], dtype='int32') x = paddle.static.data(name='x', shape=[2,3], dtype='int32')
y = fluid.data(name='y', shape=[2,3], dtype='int32') y = paddle.static.data(name='y', shape=[2,3], dtype='int32')
# Output of the forward function, name/dtype/shape must be specified # Output of the forward function, name/dtype/shape must be specified
output = create_tmp_var('output','int32', [3,1]) output = create_tmp_var('output','int32', [3,1])
# Multiple Variable should be passed in the form of tuple(Variale) or list[Variale] # Multiple Variable should be passed in the form of tuple(Variale) or list[Variale]
fluid.layers.py_func(func=element_wise_add, x=[x,y], out=output) paddle.static.nn.py_func(func=element_wise_add, x=[x,y], out=output)
exe=fluid.Executor(fluid.CPUPlace()) exe=paddle.static.Executor(paddle.CPUPlace())
exe.run(start_program) exe.run(start_program)
# Feed numpy array to main_program # Feed numpy array to main_program

@ -103,9 +103,9 @@ def create_parameter(shape,
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
import paddle.fluid.layers as layers paddle.enable_static()
W = layers.create_parameter(shape=[784, 200], dtype='float32') W = paddle.static.create_parameter(shape=[784, 200], dtype='float32')
""" """
check_type(shape, 'shape', (list, tuple, numpy.ndarray), 'create_parameter') check_type(shape, 'shape', (list, tuple, numpy.ndarray), 'create_parameter')
for item in shape: for item in shape:
@ -161,9 +161,9 @@ def create_global_var(shape,
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
import paddle.fluid.layers as layers paddle.enable_static()
var = layers.create_global_var(shape=[2,3], value=1.0, dtype='float32', var = paddle.static.create_global_var(shape=[2,3], value=1.0, dtype='float32',
persistable=True, force_cpu=True, name='new_var') persistable=True, force_cpu=True, name='new_var')
""" """
check_type(shape, 'shape', (list, tuple, numpy.ndarray), check_type(shape, 'shape', (list, tuple, numpy.ndarray),

@ -61,15 +61,15 @@ class ParamAttr(object):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
paddle.enable_static()
w_param_attrs = fluid.ParamAttr(name="fc_weight", weight_attr = paddle.ParamAttr(name="weight",
learning_rate=0.5, learning_rate=0.5,
regularizer=fluid.regularizer.L2Decay(1.0), regularizer=paddle.regularizer.L2Decay(1.0),
trainable=True) trainable=True)
print(w_param_attrs.name) # "fc_weight" print(weight_attr.name) # "weight"
x = fluid.data(name='X', shape=[None, 1], dtype='float32') paddle.nn.Linear(3, 4, weight_attr=weight_attr)
y_predict = fluid.layers.fc(input=x, size=10, param_attr=w_param_attrs)
""" """
def __init__(self, def __init__(self,

@ -23,6 +23,7 @@ __all__ = [
] ]
from . import nn from . import nn
from ..fluid import Scope #DEFINE_ALIAS
from .input import data #DEFINE_ALIAS from .input import data #DEFINE_ALIAS
from .input import InputSpec #DEFINE_ALIAS from .input import InputSpec #DEFINE_ALIAS
from ..fluid.executor import Executor #DEFINE_ALIAS from ..fluid.executor import Executor #DEFINE_ALIAS
@ -50,3 +51,5 @@ from ..fluid.io import save_inference_model #DEFINE_ALIAS
from ..fluid.io import load_inference_model #DEFINE_ALIAS from ..fluid.io import load_inference_model #DEFINE_ALIAS
from ..fluid.io import load_program_state #DEFINE_ALIAS from ..fluid.io import load_program_state #DEFINE_ALIAS
from ..fluid.io import set_program_state #DEFINE_ALIAS from ..fluid.io import set_program_state #DEFINE_ALIAS
from ..fluid.layers import create_parameter #DEFINE_ALIAS
from ..fluid.layers import create_global_var #DEFINE_ALIAS

@ -33,6 +33,7 @@ __all__ = [
'multi_box_head', 'multi_box_head',
'nce', 'nce',
'prelu', 'prelu',
'py_func',
'row_conv', 'row_conv',
'spectral_norm', 'spectral_norm',
'switch_case', 'switch_case',
@ -57,6 +58,7 @@ from ...fluid.layers import layer_norm #DEFINE_ALIAS
from ...fluid.layers import multi_box_head #DEFINE_ALIAS from ...fluid.layers import multi_box_head #DEFINE_ALIAS
from ...fluid.layers import nce #DEFINE_ALIAS from ...fluid.layers import nce #DEFINE_ALIAS
from ...fluid.layers import prelu #DEFINE_ALIAS from ...fluid.layers import prelu #DEFINE_ALIAS
from ...fluid.layers import py_func #DEFINE_ALIAS
from ...fluid.layers import row_conv #DEFINE_ALIAS from ...fluid.layers import row_conv #DEFINE_ALIAS
from ...fluid.layers import spectral_norm #DEFINE_ALIAS from ...fluid.layers import spectral_norm #DEFINE_ALIAS
from ...fluid.layers import switch_case #DEFINE_ALIAS from ...fluid.layers import switch_case #DEFINE_ALIAS

@ -279,7 +279,6 @@
"thresholded_relu", "thresholded_relu",
"group_norm", "group_norm",
"random_crop", "random_crop",
"py_func",
"row_conv", "row_conv",
"hard_shrink", "hard_shrink",
"ssd_loss", "ssd_loss",

Loading…
Cancel
Save