|
|
|
@ -767,23 +767,20 @@ def nll_loss(input,
|
|
|
|
|
|
|
|
|
|
|
|
Examples:
|
|
|
|
Examples:
|
|
|
|
.. code-block:: python
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
|
|
|
|
import paddle
|
|
|
|
import paddle
|
|
|
|
import numpy as np
|
|
|
|
|
|
|
|
from paddle.nn.functional import nll_loss
|
|
|
|
from paddle.nn.functional import nll_loss
|
|
|
|
log_softmax = paddle.nn.LogSoftmax(axis=1)
|
|
|
|
log_softmax = paddle.nn.LogSoftmax(axis=1)
|
|
|
|
|
|
|
|
|
|
|
|
input_np = np.array([[0.88103855, 0.9908683 , 0.6226845 ],
|
|
|
|
input = paddle.to_tensor([[0.88103855, 0.9908683 , 0.6226845 ],
|
|
|
|
[0.53331435, 0.07999352, 0.8549948 ],
|
|
|
|
[0.53331435, 0.07999352, 0.8549948 ],
|
|
|
|
[0.25879037, 0.39530203, 0.698465 ],
|
|
|
|
[0.25879037, 0.39530203, 0.698465 ],
|
|
|
|
[0.73427284, 0.63575995, 0.18827209],
|
|
|
|
[0.73427284, 0.63575995, 0.18827209],
|
|
|
|
[0.05689114, 0.0862954 , 0.6325046 ]]).astype(np.float32)
|
|
|
|
[0.05689114, 0.0862954 , 0.6325046 ]], "float32")
|
|
|
|
label_np = np.array([0, 2, 1, 1, 0]).astype(np.int64)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
input = paddle.to_tensor(input_np)
|
|
|
|
|
|
|
|
log_out = log_softmax(input)
|
|
|
|
log_out = log_softmax(input)
|
|
|
|
label = paddle.to_tensor(label_np)
|
|
|
|
label = paddle.to_tensor([0, 2, 1, 1, 0], "int64")
|
|
|
|
result = nll_loss(log_out, label)
|
|
|
|
result = nll_loss(log_out, label)
|
|
|
|
print(result) # [1.0720209]
|
|
|
|
print(result) # Tensor(shape=[1], dtype=float32, place=CPUPlace, stop_gradient=True, [1.07202101])
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
if reduction not in ['sum', 'mean', 'none']:
|
|
|
|
if reduction not in ['sum', 'mean', 'none']:
|
|
|
|
raise ValueError(
|
|
|
|
raise ValueError(
|
|
|
|
|