@ -250,27 +250,24 @@ class MSELoss(fluid.dygraph.layers.Layer):
class L1Loss ( fluid . dygraph . Layer ) :
"""
: alias_main : paddle . nn . L1Loss
: alias : paddle . nn . L1Loss , paddle . nn . layer . L1Loss , paddle . nn . layer . loss . L1Loss
This interface is used to construct a callable object of the ` ` L1Loss ` ` class .
The L1Loss layer calculates the L1 Loss of input predictions and target
labels as follows .
The L1Loss layer calculates the L1 Loss of ` ` x ` ` and ` ` label ` ` as follows .
If : attr : ` reduction ` set to ` ` ' none ' ` ` , the loss is :
If : attr : ` reduction ` set to ` ` ' none ' ` ` , the unreduced loss is :
. . math : :
Out = | input - label |
If : attr : ` reduction ` set to ` ` ' mean ' ` ` , the reduced mean loss is :
Out = \lvert x - label \rvert
If : attr : ` reduction ` set to ` ` ' mean ' ` ` , the loss is :
. . math : :
Out = MEAN ( | input - label | )
If : attr : ` reduction ` set to ` ` ' sum ' ` ` , the reduced sum loss is :
Out = MEAN ( \lvert x - label \rvert )
If : attr : ` reduction ` set to ` ` ' sum ' ` ` , the loss is :
. . math : :
Out = SUM ( | input - label | )
Out = SUM ( \lvert x - label \rvert )
The shape of input predictions and target labels are [ N , * ] , where N is batch_size and ` * `
means any number of additional dimensions .
If : attr : ` reduction ` is ` ` ' none ' ` ` , the shape of output loss is [ N , * ] , the same as input .
If : attr : ` reduction ` is ` ` ' mean ' ` ` or ` ` ' sum ' ` ` , the shape of output loss is [ 1 ] , which means the output is a scalar .
Parameters :
reduction ( str , optional ) : Indicate the reduction to apply to the loss ,
@ -279,63 +276,55 @@ class L1Loss(fluid.dygraph.Layer):
If : attr : ` reduction ` is ` ` ' mean ' ` ` , the reduced mean loss is returned .
If : attr : ` reduction ` is ` ` ' sum ' ` ` , the reduced sum loss is returned .
Default is ` ` ' mean ' ` ` .
Returns :
A callable object of L1Loss .
name ( str , optional ) : Name for the operation ( optional , default is None ) . For more information , please refer to : ref : ` api_guide_Name ` .
Shape :
x ( Tensor ) : The input tensor . The shapes is [ N , * ] , where N is batch size and ` * ` means any number of additional dimensions . It ' s data type should be float32, float64, int32, int64.
label ( Tensor ) : label . The shapes is [ N , * ] , same shape as ` ` x ` ` . It ' s data type should be float32, float64, int32, int64.
output ( Tensor ) : The L1 Loss of ` ` x ` ` and ` ` label ` ` .
If : attr : ` reduction ` is ` ` ' none ' ` ` , the shape of output loss is [ N , * ] , the same as ` ` x ` ` .
If : attr : ` reduction ` is ` ` ' mean ' ` ` or ` ` ' sum ' ` ` , the shape of output loss is [ 1 ] , which means the output is a scalar .
Examples :
. . code - block : : python
# declarative mode
import paddle . fluid as fluid
import numpy as np
import paddle
input = fluid . data ( name = " input " , shape = [ 1 ] )
label = fluid . data ( name = " label " , shape = [ 1 ] )
l1_loss = paddle . nn . loss . L1Loss ( reduction = ' mean ' )
output = l1_loss ( input , label )
place = fluid . CPUPlace ( )
exe = fluid . Executor ( place )
exe . run ( fluid . default_startup_program ( ) )
input_data = np . array ( [ 1.5 ] ) . astype ( " float32 " )
label_data = np . array ( [ 1.7 ] ) . astype ( " float32 " )
output_data = exe . run ( fluid . default_main_program ( ) ,
feed = { " input " : input_data , " label " : label_data } ,
fetch_list = [ output ] ,
return_numpy = True )
print ( output_data ) # [array([0.2], dtype=float32)]
# imperative mode
import paddle . fluid . dygraph as dg
with dg . guard ( place ) as g :
input = dg . to_variable ( input_data )
label = dg . to_variable ( label_data )
l1_loss = paddle . nn . loss . L1Loss ( reduction = ' mean ' )
output = l1_loss ( input , label )
print ( output . numpy ( ) ) # [0.2]
import numpy as np
paddle . disable_static ( )
x_data = np . array ( [ [ 1.5 , 0.8 ] , [ 0.2 , 1.3 ] ] ) . astype ( " float32 " )
label_data = np . array ( [ [ 1.7 , 1 ] , [ 0.4 , 0.5 ] ] ) . astype ( " float32 " )
x = paddle . to_variable ( x_data )
label = paddle . to_variable ( label_data )
l1_loss = paddle . nn . loss . L1Loss ( )
output = l1_loss ( x , label )
print ( output . numpy ( ) )
# [0.35]
l1_loss = paddle . nn . loss . L1Loss ( reduction = ' sum ' )
output = l1_loss ( x , label )
print ( output . numpy ( ) )
# [1.4]
l1_loss = paddle . nn . loss . L1Loss ( reduction = ' none ' )
output = l1_loss ( x , label )
print ( output . numpy ( ) )
# [[0.20000005 0.19999999]
# [0.2 0.79999995]]
"""
def __init__ ( self , reduction = ' mean ' ):
def __init__ ( self , reduction = ' mean ' , name = None ):
if reduction not in [ ' sum ' , ' mean ' , ' none ' ] :
raise ValueError (
" The value of ' reduction ' in L1Loss should be ' sum ' , ' mean ' or ' none ' , but "
" received %s , which is not allowed. " % reduction )
super ( L1Loss , self ) . __init__ ( )
self . reduction = reduction
self . name = name
def forward ( self , input , label ) :
fluid . data_feeder . check_variable_and_dtype (
input , ' input ' , [ ' float32 ' , ' float64 ' , ' int32 ' , ' int64 ' ] , ' l1_loss ' )
fluid . data_feeder . check_variable_and_dtype (
label , ' label ' , [ ' float32 ' , ' float64 ' , ' int32 ' , ' int64 ' ] , ' l1_loss ' )
unreduced = fluid . layers . elementwise_sub ( input , label , act = ' abs ' )
if self . reduction == ' sum ' :
return fluid . layers . reduce_sum ( unreduced )
elif self . reduction == ' mean ' :
return fluid . layers . reduce_mean ( unreduced )
else :
return unreduced
def forward ( self , x , label ) :
return paddle . nn . functional . l1_loss (
x , label , self . reduction , name = self . name )
class BCELoss ( fluid . dygraph . Layer ) :