update document

wangkuiyi-patch-1
qiaolongfei 7 years ago
parent 76129f0331
commit 0ae6709174

@ -76,13 +76,13 @@ def split_lod_tensor(input, mask, level=0):
Examples:
.. code-block:: python
x = layers.data(name='x', shape=[1])
x = fluid.layers.data(name='x', shape=[1])
x.persistable = True
y = layers.data(name='y', shape=[1])
y = fluid.layers.data(name='y', shape=[1])
y.persistable = True
out_true, out_false = layers.split_lod_tensor(
out_true, out_false = fluid.layers.split_lod_tensor(
input=x, mask=y, level=level)
"""
@ -990,6 +990,7 @@ def array_read(array, i):
Variable: The tensor type variable that has the data written to it.
Examples:
.. code-block:: python
tmp = fluid.layers.zeros(shape=[10], dtype='int32')
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
arr = layers.array_read(tmp, i=i)
@ -1027,7 +1028,7 @@ def shrink_memory(x, i, table):
def array_length(array):
"""
**Get the length of Input LoDTensorArray**
**Get the Length of Input LoDTensorArray**
This function performs the operation to find the length of the input
LOD_TENSOR_ARRAY.
@ -1048,6 +1049,7 @@ def array_length(array):
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
arr = fluid.layers.array_write(tmp, i=i)
arr_len = fluid.layers.array_length(arr)
"""
helper = LayerHelper('array_length', **locals())
tmp = helper.create_tmp_variable(dtype='int64')

@ -163,7 +163,7 @@ def polynomial_decay(learning_rate,
power=1.0,
cycle=False):
"""
**polynomial_decay**
**Polynomial Decay**
Applies polynomial decay to the initial learning rate.
@ -180,9 +180,9 @@ def polynomial_decay(learning_rate,
learning_rate(Variable|float32): A scalar float32 value or a Variable. This
will be the initial learning rate during training
decay_steps(int32): A Python `int32` number.
end_learning_rate(float): A Python `float` number.
power(float): A Python `float` number
cycle(bool, Default False): Boolean. If set true, decay the learning rate every decay_steps.
end_learning_rate(float, Default: 0.0001): A Python `float` number.
power(float, Default: 1.0): A Python `float` number
cycle(bool, Default: False): Boolean. If set true, decay the learning rate every decay_steps.
Returns:
The decayed learning rate

@ -1615,7 +1615,9 @@ def batch_norm(input,
Can be used as a normalizer function for conv2d and fully_connected operations.
The required data format for this layer is one of the following:
1. NHWC `[batch, in_height, in_width, in_channels]`
2. NCHW `[batch, in_channels, in_height, in_width]`
Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing

Loading…
Cancel
Save