@ -377,9 +377,7 @@ def edit_distance(input,
So the edit distance between A and B is 3.
The input is a LoDTensor or Tensor .
If it is a LoDTensor , The separation is specified by the LoD information .
If it is a Tensor , The input_length and label_length should be supported .
The input is a Tensor , the input_length and label_length should be supported .
The ` batch_size ` of labels should be same as ` input ` .
@ -388,59 +386,36 @@ def edit_distance(input,
the edit distance value will be divided by the length of label .
Parameters :
input ( Variable ) : The input variable which is a tensor or LoDT ensor, its rank should be equal to 2 and its data type should be int64 .
label ( Variable ) : The label variable which is a tensor or LoDT ensor, its rank should be equal to 2 and its data type should be int64 .
input ( Tensor ) : The input tensor, its rank should be equal to 2 and its data type should be int64 .
label ( Tensor ) : The label tensor, its rank should be equal to 2 and its data type should be int64 .
normalized ( bool , default True ) : Indicated whether to normalize the edit distance .
ignored_tokens ( list < int > , default None ) : Tokens that will be removed before
calculating edit distance .
input_length ( Variable ) : The length for each sequence in ` input ` if it ' s of Tensor type, it should have shape `(batch_size, )` and its data type should be int64.
label_length ( Variable ) : The length for each sequence in ` label ` if it ' s of Tensor type, it should have shape `(batch_size, )` and its data type should be int64.
input_length ( Tensor ) : The length for each sequence in ` input ` if it ' s of Tensor type, it should have shape `(batch_size, )` and its data type should be int64.
label_length ( Tensor ) : The length for each sequence in ` label ` if it ' s of Tensor type, it should have shape `(batch_size, )` and its data type should be int64.
NOTE : To be avoid unexpected result , the value of every elements in input_length and label_length should be equal to the value of the second dimension of input and label . For example , The input : [ [ 1 , 2 , 3 , 4 ] , [ 5 , 6 , 7 , 8 ] , [ 9 , 10 , 11 , 12 ] ] , the shape of input is [ 3 , 4 ] and the input_length should be [ 4 , 4 , 4 ]
NOTE : This Api is different from fluid . metrics . EditDistance
Returns :
Tuple :
distance ( Variable ) : edit distance result , its data type is float32 , and its shape is ( batch_size , 1 ) .
sequence_num ( Variable ) : sequence number , its data type is float32 , and its shape is ( 1 , ) .
distance ( Tensor ) : edit distance result , its data type is float32 , and its shape is ( batch_size , 1 ) .
sequence_num ( Tensor ) : sequence number , its data type is float32 , and its shape is ( 1 , ) .
Examples :
. . code - block : : python
import paddle . fluid as fluid
import numpy as np
# using LoDTensor
x_lod = fluid . data ( name = ' x_lod ' , shape = [ None , 1 ] , dtype = ' int64 ' , lod_level = 1 )
y_lod = fluid . data ( name = ' y_lod ' , shape = [ None , 1 ] , dtype = ' int64 ' , lod_level = 1 )
distance_lod , seq_num_lod = fluid . layers . edit_distance ( input = x_lod , label = y_lod )
# using Tensor
input_data = np . array ( [ [ 1 , 2 , 3 ] , [ 4 , 5 , 6 ] , [ 4 , 4 , 4 ] , [ 1 , 1 , 1 ] ] ) . astype ( ' int64 ' )
label_data = np . array ( [ [ 1 , 3 , 4 , 1 ] , [ 4 , 5 , 8 , 1 ] , [ 7 , 7 , 7 , 1 ] , [ 1 , 1 , 1 , 1 ] ] ) . astype ( ' int64 ' )
input_len = np . array ( [ 3 , 3 , 3 , 3 ] ) . astype ( ' int64 ' )
label_len = np . array ( [ 4 , 4 , 4 , 4 ] ) . astype ( ' int64 ' )
input_t = fluid . data ( name = ' input ' , shape = [ None , 3 ] , dtype = ' int64 ' )
label_t = fluid . data ( name = ' label ' , shape = [ None , 4 ] , dtype = ' int64 ' )
input_len_t = fluid . data ( name = ' input_length ' , shape = [ None ] , dtype = ' int64 ' )
label_len_t = fluid . data ( name = ' label_length ' , shape = [ None ] , dtype = ' int64 ' )
import paddle
import paddle . nn . functional as F
distance , sequence_num = fluid . layers . edit_distance ( input = input_t , label = label_t , input_length = input_len_t , label_length = label_len_t , normalized = False )
input = paddle . to_tensor ( [ [ 1 , 2 , 3 ] , [ 4 , 5 , 6 ] , [ 4 , 4 , 4 ] , [ 1 , 1 , 1 ] ] , dtype = ' int64 ' )
label = paddle . to_tensor ( [ [ 1 , 3 , 4 , 1 ] , [ 4 , 5 , 8 , 1 ] , [ 7 , 7 , 7 , 1 ] , [ 1 , 1 , 1 , 1 ] ] , dtype = ' int64 ' )
input_len = paddle . to_tensor ( [ 3 , 3 , 3 , 3 ] , dtype = ' int64 ' )
label_len = paddle . to_tensor ( [ 4 , 4 , 4 , 4 ] , dtype = ' int64 ' )
# print(input_data.shape, label_data.shape)
# ((4,3), (4,4))
distance , sequence_num = F . loss . edit_distance ( input = input , label = label , input_length = input_len , label_length = label_len , normalized = False )
place = fluid . CPUPlace ( )
exe = fluid . Executor ( place )
exe . run ( fluid . default_startup_program ( ) )
dis , seq_num = exe . run ( fluid . default_main_program ( ) ,
feed = { " input " : input_data ,
" label " : label_data ,
" input_length " : input_len ,
" label_length " : label_len } ,
fetch_list = [ distance , sequence_num ] )
# print(dis)
# print(distance)
# [[3.]
# [2.]
# [4.]
@ -451,7 +426,7 @@ def edit_distance(input,
# [1. ]
# [0.25]
#
# print(seq _num)
# print(seq uence _num)
# [4]
"""
@ -1434,18 +1409,15 @@ def sigmoid_cross_entropy_with_logits(x,
name = None ,
normalize = False ) :
"""
: alias_main : paddle . nn . functional . sigmoid_cross_entropy_with_logits
: alias : paddle . nn . functional . sigmoid_cross_entropy_with_logits , paddle . nn . functional . loss . sigmoid_cross_entropy_with_logits
: old_api : paddle . fluid . layers . sigmoid_cross_entropy_with_logits
$ { comment }
Args :
x ( Variable ) : a 2 - D tensor with shape N x D , where N is the batch size and
x ( Tensor ) : a 2 - D tensor with shape N x D , where N is the batch size and
D is the number of classes . This input is a tensor of logits computed
by the previous operator . Logits are unscaled log probabilities given
as log ( p / ( 1 - p ) ) The data type should be float32 or float64 .
label ( Variable ) : a 2 - D tensor of the same type and shape as X .
label ( Tensor ) : a 2 - D tensor of the same type and shape as X .
This input is a tensor of probabalistic labels for each logit .
ignore_index ( int ) : Specifies a target value that is ignored and
does not contribute to the input gradient .
@ -1456,22 +1428,19 @@ def sigmoid_cross_entropy_with_logits(x,
targets != ignore_index .
Returns :
out ( $ { out_type } ) : $ { out_comment }
out ( Tensor ) : $ { out_comment }
Examples :
. . code - block : : python
import paddle . fluid as fluid
input = fluid . data (
name = ' data ' , shape = [ 10 ] , dtype = ' float32 ' )
label = fluid . data (
name = ' data ' , shape = [ 10 ] , dtype = ' float32 ' )
loss = fluid . layers . sigmoid_cross_entropy_with_logits (
x = input ,
label = label ,
ignore_index = - 1 ,
normalize = True ) # or False
# loss = fluid.layers.reduce_sum(loss) # summation of loss
import paddle
input = paddle . rand ( shape = [ 10 ] , dtype = ' float32 ' )
label = paddle . rand ( shape = [ 10 ] , dtype = ' float32 ' )
loss = paddle . fluid . layers . sigmoid_cross_entropy_with_logits ( input , label ,
ignore_index = - 1 , normalize = True )
print ( loss )
"""
check_variable_and_dtype ( x , ' input ' , [ ' float16 ' , ' float32 ' , ' float64 ' ] ,
' sigmoid_cross_entropy_with_logits ' )
@ -1619,47 +1588,44 @@ def huber_loss(input, label, delta):
@templatedoc ( )
def kldiv_loss ( x , target , reduction = ' mean ' , name = None ) :
"""
: alias_main : paddle . nn . functional . kldiv_loss
: alias : paddle . nn . functional . kldiv_loss , paddle . nn . functional . loss . kldiv_loss
: old_api : paddle . fluid . layers . kldiv_loss
$ { comment }
Args :
x ( Variable ) : $ { x_comment }
target ( Variable ) : $ { target_comment }
reduction ( Variable ) : $ { reduction_comment }
x ( Tensor ) : $ { x_comment }
target ( Tensor ) : $ { target_comment }
reduction ( Tensor ) : $ { reduction_comment }
name ( str , optional ) : For detailed information , please refer
to : ref : ` api_guide_Name ` . Usually name is no need to set and
None by default .
Returns :
Variable( Tensor) : The KL divergence loss . The data type is same as input tensor
Tensor: The KL divergence loss . The data type is same as input tensor
Examples :
. . code - block : : python
import paddle
import paddle . fluid as fluid
# 'batchmean' reduction, loss shape will be [N]
x = fluid . data ( name = ' x ' , shape = [ None , 4 , 2 , 2 ] , dtype = ' float32 ' ) # shape=[-1, 4, 2, 2]
target = fluid . layers . data ( name = ' target ' , shape = [ 4 , 2 , 2 ] , dtype = ' float32 ' )
loss = fluid . layers . kldiv_loss ( x = x , target = target , reduction = ' batchmean ' ) # shape=[-1]
x = paddle . rand ( shape = [ 3 , 4 , 2 , 2 ] , dtype = ' float32 ' )
target = paddle . rand ( shape = [ 3 , 4 , 2 , 2 ] , dtype = ' float32 ' )
# 'batchmean' reduction, loss shape will be [1]
loss = fluid . layers . kldiv_loss ( x = x , target = target , reduction = ' batchmean ' )
print ( loss . shape ) # shape=[1]
# 'mean' reduction, loss shape will be [1]
x = fluid . data ( name = ' x ' , shape = [ None , 4 , 2 , 2 ] , dtype = ' float32 ' ) # shape=[-1, 4, 2, 2]
target = fluid . layers . data ( name = ' target ' , shape = [ 4 , 2 , 2 ] , dtype = ' float32 ' )
loss = fluid . layers . kldiv_loss ( x = x , target = target , reduction = ' mean ' ) # shape=[1]
loss = fluid . layers . kldiv_loss ( x = x , target = target , reduction = ' mean ' )
print ( loss . shape ) # shape=[1]
# 'sum' reduction, loss shape will be [1]
x = fluid . data ( name = ' x ' , shape = [ None , 4 , 2 , 2 ] , dtype = ' float32 ' ) # shape=[-1, 4, 2, 2]
target = fluid . layers . data ( name = ' target ' , shape = [ 4 , 2 , 2 ] , dtype = ' float32 ' )
loss = fluid . layers . kldiv_loss ( x = x , target = target , reduction = ' sum ' ) # shape=[1]
loss = fluid . layers . kldiv_loss ( x = x , target = target , reduction = ' sum ' )
print ( loss . shape ) # shape=[1]
# 'none' reduction, loss shape is same with X shape
x = fluid . data ( name = ' x ' , shape = [ None , 4 , 2 , 2 ] , dtype = ' float32 ' ) # shape=[-1, 4, 2, 2]
target = fluid . layers . data ( name = ' target ' , shape = [ 4 , 2 , 2 ] , dtype = ' float32 ' )
loss = fluid . layers . kldiv_loss ( x = x , target = target , reduction = ' none ' ) # shape=[-1, 4, 2, 2]
loss = fluid . layers . kldiv_loss ( x = x , target = target , reduction = ' none ' )
print ( loss . shape ) # shape=[3, 4, 2, 2]
"""
helper = LayerHelper ( ' kldiv_loss ' , * * locals ( ) )