Dev/fix doc of some api (#28785)

* refine doc of bernoulli

* fix some problems

* fix unsqueeze

* fix squeeze

* fix doc
musl/disable_test_yolov3_temporarily
Leo Chen 4 years ago committed by GitHub
parent f77a78cdee
commit 98adc8f054
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -54,9 +54,11 @@ class GradScaler(AmpScaler):
optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters()) optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
scaler = paddle.amp.GradScaler(init_loss_scaling=1024) scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
data = paddle.rand([10, 3, 32, 32]) data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast(): with paddle.amp.auto_cast():
conv = model(data) conv = model(data)
loss = paddle.mean(conv) loss = paddle.mean(conv)
scaled = scaler.scale(loss) # scale the loss scaled = scaler.scale(loss) # scale the loss
scaled.backward() # do backward scaled.backward() # do backward
scaler.minimize(optimizer, scaled) # update parameters scaler.minimize(optimizer, scaled) # update parameters
@ -86,6 +88,7 @@ class GradScaler(AmpScaler):
The scaled tensor or original tensor. The scaled tensor or original tensor.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
@ -94,9 +97,11 @@ class GradScaler(AmpScaler):
optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters()) optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
scaler = paddle.amp.GradScaler(init_loss_scaling=1024) scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
data = paddle.rand([10, 3, 32, 32]) data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast(): with paddle.amp.auto_cast():
conv = model(data) conv = model(data)
loss = paddle.mean(conv) loss = paddle.mean(conv)
scaled = scaler.scale(loss) # scale the loss scaled = scaler.scale(loss) # scale the loss
scaled.backward() # do backward scaled.backward() # do backward
scaler.minimize(optimizer, scaled) # update parameters scaler.minimize(optimizer, scaled) # update parameters
@ -118,6 +123,7 @@ class GradScaler(AmpScaler):
kwargs: Keyword arguments, which will be forward to `optimizer.minimize()`. kwargs: Keyword arguments, which will be forward to `optimizer.minimize()`.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
@ -126,9 +132,11 @@ class GradScaler(AmpScaler):
optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters()) optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
scaler = paddle.amp.GradScaler(init_loss_scaling=1024) scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
data = paddle.rand([10, 3, 32, 32]) data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast(): with paddle.amp.auto_cast():
conv = model(data) conv = model(data)
loss = paddle.mean(conv) loss = paddle.mean(conv)
scaled = scaler.scale(loss) # scale the loss scaled = scaler.scale(loss) # scale the loss
scaled.backward() # do backward scaled.backward() # do backward
scaler.minimize(optimizer, scaled) # update parameters scaler.minimize(optimizer, scaled) # update parameters

@ -491,29 +491,27 @@ class L1Loss(fluid.dygraph.Layer):
If `reduction` is ``'mean'`` or ``'sum'``, the shape of output loss is [1]. If `reduction` is ``'mean'`` or ``'sum'``, the shape of output loss is [1].
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
paddle.disable_static() input = paddle.to_tensor([[1.5, 0.8], [0.2, 1.3]])
input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32") label = paddle.to_tensor([[1.7, 1.0], [0.4, 0.5]])
label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32")
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
l1_loss = paddle.nn.loss.L1Loss() l1_loss = paddle.nn.loss.L1Loss()
output = l1_loss(input, label) output = l1_loss(input, label)
print(output.numpy()) print(output)
# [0.35] # [0.35]
l1_loss = paddle.nn.loss.L1Loss(reduction='sum') l1_loss = paddle.nn.loss.L1Loss(reduction='sum')
output = l1_loss(input, label) output = l1_loss(input, label)
print(output.numpy()) print(output)
# [1.4] # [1.4]
l1_loss = paddle.nn.loss.L1Loss(reduction='none') l1_loss = paddle.nn.loss.L1Loss(reduction='none')
output = l1_loss(input, label) output = l1_loss(input, label)
print(output.numpy()) print(output)
# [[0.20000005 0.19999999] # [[0.20000005 0.19999999]
# [0.2 0.79999995]] # [0.2 0.79999995]]
""" """
@ -1001,7 +999,7 @@ class SmoothL1Loss(fluid.dygraph.Layer):
is the same as the shape of input. is the same as the shape of input.
Returns: Returns:
The tensor variable storing the smooth_l1_loss of input and label. The tensor storing the smooth_l1_loss of input and label.
Return type: Tensor. Return type: Tensor.

@ -354,9 +354,6 @@ def roll(x, shifts, axis=None, name=None):
def stack(x, axis=0, name=None): def stack(x, axis=0, name=None):
""" """
:alias_main: paddle.stack
:alias: paddle.stack, paddle.tensor.stack, paddle.tensor.manipulation.stack
This OP stacks all the input tensors ``x`` along ``axis`` dimemsion. This OP stacks all the input tensors ``x`` along ``axis`` dimemsion.
All tensors must be of the same shape and same dtype. All tensors must be of the same shape and same dtype.
@ -423,13 +420,12 @@ def stack(x, axis=0, name=None):
import paddle import paddle
paddle.disable_static()
x1 = paddle.to_tensor([[1.0, 2.0]]) x1 = paddle.to_tensor([[1.0, 2.0]])
x2 = paddle.to_tensor([[3.0, 4.0]]) x2 = paddle.to_tensor([[3.0, 4.0]])
x3 = paddle.to_tensor([[5.0, 6.0]]) x3 = paddle.to_tensor([[5.0, 6.0]])
out = paddle.stack([x1, x2, x3], axis=0) out = paddle.stack([x1, x2, x3], axis=0)
print(out.shape) # [3, 1, 2] print(out.shape) # [3, 1, 2]
print(out.numpy()) print(out)
# [[[1., 2.]], # [[[1., 2.]],
# [[3., 4.]], # [[3., 4.]],
# [[5., 6.]]] # [[5., 6.]]]
@ -459,34 +455,31 @@ def split(x, num_or_sections, axis=0, name=None):
Example: Example:
.. code-block:: python .. code-block:: python
import numpy as np
import paddle import paddle
# x is a Tensor which shape is [3, 9, 5] # x is a Tensor of shape [3, 9, 5]
x_np = np.random.random([3, 9, 5]).astype("int32") x = paddle.rand([3, 9, 5])
x = paddle.to_tensor(x_np)
out0, out1, out22 = paddle.split(x, num_or_sections=3, axis=1) out0, out1, out2 = paddle.split(x, num_or_sections=3, axis=1)
# out0.shape [3, 3, 5] print(out0.shape) # [3, 3, 5]
# out1.shape [3, 3, 5] print(out1.shape) # [3, 3, 5]
# out2.shape [3, 3, 5] print(out2.shape) # [3, 3, 5]
out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, 4], axis=1) out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, 4], axis=1)
# out0.shape [3, 2, 5] print(out0.shape) # [3, 2, 5]
# out1.shape [3, 3, 5] print(out1.shape) # [3, 3, 5]
# out2.shape [3, 4, 5] print(out2.shape) # [3, 4, 5]
out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, -1], axis=1) out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, -1], axis=1)
# out0.shape [3, 2, 5] print(out0.shape) # [3, 2, 5]
# out1.shape [3, 3, 5] print(out1.shape) # [3, 3, 5]
# out2.shape [3, 4, 5] print(out2.shape) # [3, 4, 5]
# axis is negative, the real axis is (rank(x) + axis) which real # axis is negative, the real axis is (rank(x) + axis)=1
# value is 1.
out0, out1, out2 = paddle.split(x, num_or_sections=3, axis=-2) out0, out1, out2 = paddle.split(x, num_or_sections=3, axis=-2)
# out0.shape [3, 3, 5] print(out0.shape) # [3, 3, 5]
# out1.shape [3, 3, 5] print(out1.shape) # [3, 3, 5]
# out2.shape [3, 3, 5] print(out2.shape) # [3, 3, 5]
""" """
return paddle.fluid.layers.split( return paddle.fluid.layers.split(
input=x, num_or_sections=num_or_sections, dim=axis, name=name) input=x, num_or_sections=num_or_sections, dim=axis, name=name)
@ -494,9 +487,6 @@ def split(x, num_or_sections, axis=0, name=None):
def squeeze(x, axis=None, name=None): def squeeze(x, axis=None, name=None):
""" """
:alias_main: paddle.squeeze
:alias: paddle.squeeze, paddle.tensor.squeeze, paddle.tensor.manipulation.squeeze
This OP will squeeze the dimension(s) of size 1 of input tensor x's shape. This OP will squeeze the dimension(s) of size 1 of input tensor x's shape.
If axis is provided, it will remove the dimension(s) by given axis that of size 1. If axis is provided, it will remove the dimension(s) by given axis that of size 1.
@ -553,11 +543,9 @@ def squeeze(x, axis=None, name=None):
import paddle import paddle
paddle.disable_static()
x = paddle.rand([5, 1, 10]) x = paddle.rand([5, 1, 10])
output = paddle.squeeze(x, axis=1) output = paddle.squeeze(x, axis=1)
# output.shape [5, 10] print(output.shape) # [5, 10]
""" """
if axis is None: if axis is None:
@ -695,9 +683,6 @@ def unique(x,
def unsqueeze(x, axis, name=None): def unsqueeze(x, axis, name=None):
""" """
:alias_main: paddle.unsqueeze
:alias: paddle.unsqueeze, paddle.tensor.unsqueeze, paddle.tensor.manipulation.unsqueeze
Insert single-dimensional entries to the shape of input Tensor ``x``. Takes one Insert single-dimensional entries to the shape of input Tensor ``x``. Takes one
required argument axis, a dimension or list of dimensions that will be inserted. required argument axis, a dimension or list of dimensions that will be inserted.
Dimension indices in axis are as seen in the output tensor. Dimension indices in axis are as seen in the output tensor.
@ -718,7 +703,6 @@ def unsqueeze(x, axis, name=None):
import paddle import paddle
paddle.disable_static()
x = paddle.rand([5, 10]) x = paddle.rand([5, 10])
print(x.shape) # [5, 10] print(x.shape) # [5, 10]
@ -728,7 +712,7 @@ def unsqueeze(x, axis, name=None):
out2 = paddle.unsqueeze(x, axis=[0, 2]) out2 = paddle.unsqueeze(x, axis=[0, 2])
print(out2.shape) # [1, 5, 1, 10] print(out2.shape) # [1, 5, 1, 10]
axis = paddle.fluid.dygraph.to_variable([0, 1, 2]) axis = paddle.to_tensor([0, 1, 2])
out3 = paddle.unsqueeze(x, axis=axis) out3 = paddle.unsqueeze(x, axis=axis)
print(out3.shape) # [1, 1, 1, 5, 10] print(out3.shape) # [1, 1, 1, 5, 10]

@ -59,17 +59,18 @@ def bernoulli(x, name=None):
import paddle import paddle
paddle.seed(100) # on CPU device paddle.set_device('cpu') # on CPU device
paddle.seed(100)
x = paddle.rand([2,3]) x = paddle.rand([2,3])
print(x.numpy()) print(x)
# [[0.5535528 0.20714243 0.01162981] # [[0.55355281, 0.20714243, 0.01162981],
# [0.51577556 0.36369765 0.2609165 ]] # [0.51577556, 0.36369765, 0.26091650]]
paddle.seed(200) # on CPU device
out = paddle.bernoulli(x) out = paddle.bernoulli(x)
print(out.numpy()) print(out)
# [[0. 0. 0.] # [[1., 0., 1.],
# [1. 1. 0.]] # [0., 1., 0.]]
""" """

Loading…
Cancel
Save