Remove paddle.incubate.hapi.loss and reuse paddle.nn.layer.loss in high level API (#25590)
* Remove paddle.incubate.hapi.loss and reuse the paddle.nn.layer.loss in high level APIfix_copy_if_different
parent
2a191d8fa5
commit
af74675b5a
@ -1,140 +0,0 @@
|
|||||||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from __future__ import absolute_import
|
|
||||||
from __future__ import division
|
|
||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
from paddle import fluid
|
|
||||||
from paddle.fluid.framework import in_dygraph_mode, Variable
|
|
||||||
from paddle.fluid.dygraph.base import to_variable
|
|
||||||
|
|
||||||
from .utils import to_list
|
|
||||||
|
|
||||||
__all__ = ['Loss', 'CrossEntropy', 'SoftmaxWithCrossEntropy']
|
|
||||||
|
|
||||||
|
|
||||||
class Loss(object):
|
|
||||||
"""
|
|
||||||
Base class for loss, encapsulates loss logic and APIs
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
custom_loss = CustomLoss()
|
|
||||||
loss = custom_loss(inputs, labels)
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
from paddle.incubate.hapi.loss import Loss
|
|
||||||
from paddle import fluid
|
|
||||||
|
|
||||||
class SoftmaxWithCrossEntropy(Loss):
|
|
||||||
def __init__(self, average=True):
|
|
||||||
super(SoftmaxWithCrossEntropy, self).__init__(average)
|
|
||||||
|
|
||||||
def forward(self, outputs, labels):
|
|
||||||
return [
|
|
||||||
fluid.layers.softmax_with_cross_entropy(
|
|
||||||
o, l, return_softmax=False) for o, l in zip(outputs, labels)
|
|
||||||
]
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, average=True):
|
|
||||||
super(Loss, self).__init__()
|
|
||||||
self.average = average
|
|
||||||
|
|
||||||
def forward(self, outputs, labels):
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def __call__(self, outputs, labels=None):
|
|
||||||
labels = to_list(labels)
|
|
||||||
if in_dygraph_mode() and labels:
|
|
||||||
labels = [to_variable(l) for l in labels]
|
|
||||||
losses = to_list(self.forward(to_list(outputs), labels))
|
|
||||||
if self.average:
|
|
||||||
losses = [fluid.layers.reduce_mean(l) for l in losses]
|
|
||||||
else:
|
|
||||||
losses = [fluid.layers.reduce_sum(l) for l in losses]
|
|
||||||
return losses
|
|
||||||
|
|
||||||
|
|
||||||
class CrossEntropy(Loss):
|
|
||||||
"""
|
|
||||||
Args:
|
|
||||||
input (list[Variable]): Input tensor, the data type is float32,
|
|
||||||
float64, int32, int64.
|
|
||||||
label (list[Variable]): Label tensor, the data type is float32,
|
|
||||||
float64, int32, int64.
|
|
||||||
average (bool, optional): Indicate whether to average the loss, Default: True.
|
|
||||||
Returns:
|
|
||||||
list[Variable]: The tensor variable storing the cross_entropy_loss of inputs and labels.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
import paddle.fluid as fluid
|
|
||||||
import paddle.incubate.hapi as hapi
|
|
||||||
|
|
||||||
fluid.enable_dygraph()
|
|
||||||
|
|
||||||
model = hapi.Model(hapi.vision.LeNet())
|
|
||||||
model.prepare(loss_function=hapi.loss.CrossEntropy())
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, average=True):
|
|
||||||
super(CrossEntropy, self).__init__(average)
|
|
||||||
|
|
||||||
def forward(self, outputs, labels):
|
|
||||||
return [
|
|
||||||
fluid.layers.cross_entropy(o, l) for o, l in zip(outputs, labels)
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
class SoftmaxWithCrossEntropy(Loss):
|
|
||||||
"""
|
|
||||||
this op combined softmax and cross entropy.
|
|
||||||
Args:
|
|
||||||
input (list[Variable]): Input tensor, the data type is float32,
|
|
||||||
float64, int32, int64.
|
|
||||||
label (list[Variable]): Label tensor, the data type is float32,
|
|
||||||
float64, int32, int64.
|
|
||||||
average (bool, optional): Indicate whether to average the loss, Default: True.
|
|
||||||
Returns:
|
|
||||||
list[Variable]: The tensor variable storing the cross_entropy_loss of inputs and labels.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
import paddle.fluid as fluid
|
|
||||||
import paddle.incubate.hapi as hapi
|
|
||||||
|
|
||||||
fluid.enable_dygraph()
|
|
||||||
|
|
||||||
model = hapi.Model(hapi.vision.LeNet(classifier_activation=None))
|
|
||||||
loss = hapi.loss.SoftmaxWithCrossEntropy()
|
|
||||||
model.prepare(loss_function=loss)
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, average=True):
|
|
||||||
super(SoftmaxWithCrossEntropy, self).__init__(average)
|
|
||||||
|
|
||||||
def forward(self, outputs, labels):
|
|
||||||
return [
|
|
||||||
fluid.layers.softmax_with_cross_entropy(
|
|
||||||
o, l, return_softmax=False) for o, l in zip(outputs, labels)
|
|
||||||
]
|
|
File diff suppressed because it is too large
Load Diff
@ -1,111 +0,0 @@
|
|||||||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from __future__ import division
|
|
||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
import unittest
|
|
||||||
import os
|
|
||||||
import six
|
|
||||||
import numpy as np
|
|
||||||
import shutil
|
|
||||||
import copy
|
|
||||||
|
|
||||||
import paddle
|
|
||||||
from paddle import fluid
|
|
||||||
|
|
||||||
from paddle.incubate.hapi.model import Model, Input
|
|
||||||
from paddle.incubate.hapi.loss import CrossEntropy, SoftmaxWithCrossEntropy
|
|
||||||
|
|
||||||
|
|
||||||
def stable_softmax(x):
|
|
||||||
"""Compute the softmax of vector x in a numerically stable way."""
|
|
||||||
# clip to shiftx, otherwise, when calc loss with
|
|
||||||
# log(exp(shiftx)), may get log(0)=INF
|
|
||||||
shiftx = (x - np.max(x)).clip(-64.)
|
|
||||||
exps = np.exp(shiftx)
|
|
||||||
return exps / np.sum(exps)
|
|
||||||
|
|
||||||
|
|
||||||
def randomize_probability(batch_size, class_num, dtype='float32'):
|
|
||||||
prob = np.random.uniform(
|
|
||||||
0.1, 1.0, size=(batch_size, class_num)).astype(dtype)
|
|
||||||
prob_sum = prob.sum(axis=1)
|
|
||||||
for i in six.moves.xrange(len(prob)):
|
|
||||||
prob[i] /= prob_sum[i]
|
|
||||||
return prob
|
|
||||||
|
|
||||||
|
|
||||||
def numpy_ce(x, label):
|
|
||||||
return np.asmatrix(
|
|
||||||
[[-np.log(x[i][label[i][0]])] for i in range(x.shape[0])],
|
|
||||||
dtype="float32").mean()
|
|
||||||
|
|
||||||
|
|
||||||
class TestLoss(unittest.TestCase):
|
|
||||||
def test_cross_entropy(self):
|
|
||||||
class_num = 100
|
|
||||||
batch_size = 128
|
|
||||||
inputs = [randomize_probability(128, class_num) for _ in range(2)]
|
|
||||||
|
|
||||||
labels = [
|
|
||||||
np.random.randint(
|
|
||||||
0, class_num, (batch_size, 1), dtype="int64") for _ in range(2)
|
|
||||||
]
|
|
||||||
|
|
||||||
gt_out = [numpy_ce(inputs[i], labels[i]) for i in range(2)]
|
|
||||||
|
|
||||||
fluid.enable_dygraph()
|
|
||||||
cross_entropy = CrossEntropy()
|
|
||||||
out = cross_entropy(
|
|
||||||
[fluid.dygraph.to_variable(x) for x in inputs],
|
|
||||||
[fluid.dygraph.to_variable(label) for label in labels])
|
|
||||||
out = [o.numpy() for o in out]
|
|
||||||
|
|
||||||
for o, g in zip(out, gt_out):
|
|
||||||
np.testing.assert_allclose(o, g, atol=1e-5)
|
|
||||||
|
|
||||||
def test_soft_cross_entronpy(self):
|
|
||||||
class_num = 100
|
|
||||||
batch_size = 128
|
|
||||||
|
|
||||||
inputs = [randomize_probability(128, class_num) for _ in range(2)]
|
|
||||||
|
|
||||||
labels = [
|
|
||||||
np.random.randint(
|
|
||||||
0, class_num, (batch_size, 1), dtype="int64") for _ in range(2)
|
|
||||||
]
|
|
||||||
|
|
||||||
fluid.enable_dygraph()
|
|
||||||
softmax_cross_entropy = SoftmaxWithCrossEntropy()
|
|
||||||
|
|
||||||
softmax_cross_entropy(
|
|
||||||
[fluid.dygraph.to_variable(x) for x in inputs],
|
|
||||||
[fluid.dygraph.to_variable(label) for label in labels])
|
|
||||||
|
|
||||||
softmax_cross_entropy = SoftmaxWithCrossEntropy(average=False)
|
|
||||||
|
|
||||||
inputs = [randomize_probability(128, class_num)]
|
|
||||||
|
|
||||||
labels = [
|
|
||||||
np.random.randint(
|
|
||||||
0, class_num, (batch_size, 1), dtype="int64")
|
|
||||||
]
|
|
||||||
|
|
||||||
softmax_cross_entropy([fluid.dygraph.to_variable(x) for x in inputs],
|
|
||||||
fluid.dygraph.to_variable(labels[0]))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
unittest.main()
|
|
Loading…
Reference in new issue