|
|
|
|
@ -7068,9 +7068,6 @@ def roi_align(input,
|
|
|
|
|
|
|
|
|
|
def dice_loss(input, label, epsilon=0.00001, name=None):
|
|
|
|
|
"""
|
|
|
|
|
:alias_main: paddle.nn.functional.dice_loss
|
|
|
|
|
:alias: paddle.nn.functional.dice_loss,paddle.nn.functional.loss.dice_loss
|
|
|
|
|
:old_api: paddle.fluid.layers.dice_loss
|
|
|
|
|
|
|
|
|
|
Dice loss for comparing the similarity between the input predictions and the label.
|
|
|
|
|
This implementation is for binary classification, where the input is sigmoid
|
|
|
|
|
@ -7106,7 +7103,6 @@ def dice_loss(input, label, epsilon=0.00001, name=None):
|
|
|
|
|
import paddle
|
|
|
|
|
import paddle.nn.functional as F
|
|
|
|
|
|
|
|
|
|
paddle.disable_static()
|
|
|
|
|
x = paddle.randn((3,224,224,2))
|
|
|
|
|
label = paddle.randint(high=2, shape=(3,224,224,1))
|
|
|
|
|
predictions = F.softmax(x)
|
|
|
|
|
@ -13039,9 +13035,6 @@ def grid_sampler(x, grid, name=None):
|
|
|
|
|
|
|
|
|
|
def log_loss(input, label, epsilon=1e-4, name=None):
|
|
|
|
|
"""
|
|
|
|
|
:alias_main: paddle.nn.functional.log_loss
|
|
|
|
|
:alias: paddle.nn.functional.log_loss,paddle.nn.functional.loss.log_loss
|
|
|
|
|
:old_api: paddle.fluid.layers.log_loss
|
|
|
|
|
|
|
|
|
|
**Negative Log Loss Layer**
|
|
|
|
|
|
|
|
|
|
@ -13073,7 +13066,6 @@ def log_loss(input, label, epsilon=1e-4, name=None):
|
|
|
|
|
import paddle
|
|
|
|
|
import paddle.nn.functional as F
|
|
|
|
|
|
|
|
|
|
paddle.disable_static()
|
|
|
|
|
label = paddle.randn((10,1))
|
|
|
|
|
prob = paddle.randn((10,1))
|
|
|
|
|
cost = F.log_loss(input=prob, label=label)
|
|
|
|
|
@ -14462,9 +14454,6 @@ def deformable_conv(input,
|
|
|
|
|
|
|
|
|
|
def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
|
|
|
|
|
"""
|
|
|
|
|
:alias_main: paddle.nn.functional.unfold
|
|
|
|
|
:alias: paddle.nn.functional.unfold,paddle.nn.functional.common.unfold
|
|
|
|
|
:old_api: paddle.fluid.layers.unfold
|
|
|
|
|
|
|
|
|
|
This op returns a col buffer of sliding local blocks of input x, also known
|
|
|
|
|
as im2col for batched 2D image tensors. For each block under the convolution filter,
|
|
|
|
|
@ -14490,7 +14479,7 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Parameters:
|
|
|
|
|
x(Varaible): 4-D Tensor, input tensor of format [N, C, H, W],
|
|
|
|
|
x(Tensor): 4-D Tensor, input tensor of format [N, C, H, W],
|
|
|
|
|
data type can be float32 or float64
|
|
|
|
|
kernel_sizes(int|list): The size of convolution kernel, should be [k_h, k_w]
|
|
|
|
|
or an integer k treated as [k, k].
|
|
|
|
|
@ -14513,22 +14502,24 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
The tensor variable corresponding to the sliding local blocks.
|
|
|
|
|
The tensor corresponding to the sliding local blocks.
|
|
|
|
|
The output shape is [N, Cout, Lout] as decriabled above.
|
|
|
|
|
Cout is the total number of values within each block,
|
|
|
|
|
and Lout is the total number of such blocks.
|
|
|
|
|
The data type of output is the same as the input :math:`x`
|
|
|
|
|
|
|
|
|
|
Return Type:
|
|
|
|
|
Variable
|
|
|
|
|
Tensor
|
|
|
|
|
|
|
|
|
|
Examples:
|
|
|
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
|
|
import paddle.fluid as fluid
|
|
|
|
|
x = fluid.data(name = 'data', shape = [100, 3, 224, 224], dtype = 'float32')
|
|
|
|
|
y = fluid.layers.unfold(x, [3, 3], 1, 1, 1)
|
|
|
|
|
import paddle
|
|
|
|
|
import paddle.nn.functional as F
|
|
|
|
|
|
|
|
|
|
x = paddle.randn((100,3,224,224))
|
|
|
|
|
y = F.unfold(x, [3, 3], 1, 1, 1)
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
helper = LayerHelper("unfold", **locals())
|
|
|
|
|
|