Remove paddle.incubate.hapi.loss and reuse paddle.nn.layer.loss in high level API (#25590)

* Remove paddle.incubate.hapi.loss and reuse the paddle.nn.layer.loss in high level API
fix_copy_if_different
qingqing01 5 years ago committed by GitHub
parent 2a191d8fa5
commit af74675b5a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -21,7 +21,6 @@ from . import model
from .model import *
from . import metrics
from . import loss
from . import datasets
from . import distributed
from . import vision
@ -40,7 +39,6 @@ __all__ = [
'distributed',
'download',
'metrics',
'loss',
'vision',
'text',
] + model.__all__ + device.__all__

@ -291,6 +291,7 @@ class ProgBarLogger(Callback):
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.incubate.hapi as hapi
@ -299,11 +300,12 @@ class ProgBarLogger(Callback):
train_dataset = hapi.datasets.MNIST(mode='train')
model = hapi.Model(hapi.vision.LeNet(), inputs, labels)
model = hapi.Model(hapi.vision.LeNet(classifier_activation=None),
inputs, labels)
optim = fluid.optimizer.Adam(0.001)
model.prepare(optimizer=optim,
loss_function=hapi.loss.CrossEntropy(),
loss_function=paddle.nn.CrossEntropyLoss(),
metrics=hapi.metrics.Accuracy())
callback = hapi.callbacks.ProgBarLogger(log_freq=10)
@ -425,6 +427,7 @@ class ModelCheckpoint(Callback):
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.incubate.hapi as hapi
@ -433,11 +436,12 @@ class ModelCheckpoint(Callback):
train_dataset = hapi.datasets.MNIST(mode='train')
model = hapi.Model(hapi.vision.LeNet(), inputs, labels)
model = hapi.Model(hapi.vision.LeNet(classifier_activation=None),
inputs, labels)
optim = fluid.optimizer.Adam(0.001)
model.prepare(optimizer=optim,
loss_function=hapi.loss.CrossEntropy(),
loss_function=paddle.nn.CrossEntropyLoss(),
metrics=hapi.metrics.Accuracy())
callback = hapi.callbacks.ModelCheckpoint(save_dir='./temp')

@ -1,140 +0,0 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from paddle import fluid
from paddle.fluid.framework import in_dygraph_mode, Variable
from paddle.fluid.dygraph.base import to_variable
from .utils import to_list
__all__ = ['Loss', 'CrossEntropy', 'SoftmaxWithCrossEntropy']
class Loss(object):
"""
Base class for loss, encapsulates loss logic and APIs
Usage:
custom_loss = CustomLoss()
loss = custom_loss(inputs, labels)
Examples:
.. code-block:: python
from paddle.incubate.hapi.loss import Loss
from paddle import fluid
class SoftmaxWithCrossEntropy(Loss):
def __init__(self, average=True):
super(SoftmaxWithCrossEntropy, self).__init__(average)
def forward(self, outputs, labels):
return [
fluid.layers.softmax_with_cross_entropy(
o, l, return_softmax=False) for o, l in zip(outputs, labels)
]
"""
def __init__(self, average=True):
super(Loss, self).__init__()
self.average = average
def forward(self, outputs, labels):
raise NotImplementedError()
def __call__(self, outputs, labels=None):
labels = to_list(labels)
if in_dygraph_mode() and labels:
labels = [to_variable(l) for l in labels]
losses = to_list(self.forward(to_list(outputs), labels))
if self.average:
losses = [fluid.layers.reduce_mean(l) for l in losses]
else:
losses = [fluid.layers.reduce_sum(l) for l in losses]
return losses
class CrossEntropy(Loss):
"""
Args:
input (list[Variable]): Input tensor, the data type is float32,
float64, int32, int64.
label (list[Variable]): Label tensor, the data type is float32,
float64, int32, int64.
average (bool, optional): Indicate whether to average the loss, Default: True.
Returns:
list[Variable]: The tensor variable storing the cross_entropy_loss of inputs and labels.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.incubate.hapi as hapi
fluid.enable_dygraph()
model = hapi.Model(hapi.vision.LeNet())
model.prepare(loss_function=hapi.loss.CrossEntropy())
"""
def __init__(self, average=True):
super(CrossEntropy, self).__init__(average)
def forward(self, outputs, labels):
return [
fluid.layers.cross_entropy(o, l) for o, l in zip(outputs, labels)
]
class SoftmaxWithCrossEntropy(Loss):
"""
this op combined softmax and cross entropy.
Args:
input (list[Variable]): Input tensor, the data type is float32,
float64, int32, int64.
label (list[Variable]): Label tensor, the data type is float32,
float64, int32, int64.
average (bool, optional): Indicate whether to average the loss, Default: True.
Returns:
list[Variable]: The tensor variable storing the cross_entropy_loss of inputs and labels.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.incubate.hapi as hapi
fluid.enable_dygraph()
model = hapi.Model(hapi.vision.LeNet(classifier_activation=None))
loss = hapi.loss.SoftmaxWithCrossEntropy()
model.prepare(loss_function=loss)
"""
def __init__(self, average=True):
super(SoftmaxWithCrossEntropy, self).__init__(average)
def forward(self, outputs, labels):
return [
fluid.layers.softmax_with_cross_entropy(
o, l, return_softmax=False) for o, l in zip(outputs, labels)
]

@ -170,6 +170,7 @@ class Accuracy(Metric):
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.incubate.hapi as hapi
@ -177,12 +178,12 @@ class Accuracy(Metric):
train_dataset = hapi.datasets.MNIST(mode='train')
model = hapi.Model(hapi.vision.LeNet())
model = hapi.Model(hapi.vision.LeNet(classifier_activation=None))
optim = fluid.optimizer.Adam(
learning_rate=0.001, parameter_list=model.parameters())
model.prepare(
optim,
loss_function=hapi.loss.CrossEntropy(average=False),
loss_function=paddle.nn.CrossEntropyLoss(),
metrics=hapi.metrics.Accuracy())
model.fit(train_dataset, batch_size=64)

File diff suppressed because it is too large Load Diff

@ -23,7 +23,7 @@ import contextlib
from paddle import fluid
from paddle.incubate.hapi import Model, Input, set_device
from paddle.incubate.hapi.loss import CrossEntropy
from paddle.nn.layer.loss import CrossEntropyLoss
from paddle.incubate.hapi.vision.models import LeNet
from paddle.incubate.hapi.metrics import Accuracy
from paddle.incubate.hapi.callbacks import ProgBarLogger
@ -67,10 +67,10 @@ class TestDistTraning(unittest.TestCase):
inputs = [Input('image', im_shape, 'float32')]
labels = [Input('label', [None, 1], 'int64')]
model = Model(LeNet(), inputs, labels)
model = Model(LeNet(classifier_activation=None), inputs, labels)
optim = fluid.optimizer.Momentum(
learning_rate=0.001, momentum=.9, parameter_list=model.parameters())
model.prepare(optim, CrossEntropy(), Accuracy())
model.prepare(optim, CrossEntropyLoss(), Accuracy())
train_dataset = MnistDataset(mode='train')
val_dataset = MnistDataset(mode='test')

@ -23,7 +23,7 @@ import contextlib
from paddle import fluid
from paddle.incubate.hapi import Model, Input, set_device
from paddle.incubate.hapi.loss import CrossEntropy
from paddle.nn.layer.loss import CrossEntropyLoss
from paddle.incubate.hapi.vision.models import LeNet
from paddle.incubate.hapi.metrics import Accuracy
from paddle.incubate.hapi.callbacks import ProgBarLogger
@ -66,10 +66,10 @@ class TestDistTraning(unittest.TestCase):
inputs = [Input('image', im_shape, 'float32')]
labels = [Input('label', [None, 1], 'int64')]
model = Model(LeNet(), inputs, labels)
model = Model(LeNet(classifier_activation=None), inputs, labels)
optim = fluid.optimizer.Momentum(
learning_rate=0.001, momentum=.9, parameter_list=model.parameters())
model.prepare(optim, CrossEntropy(), Accuracy())
model.prepare(optim, CrossEntropyLoss(), Accuracy())
train_dataset = MnistDataset(mode='train')
val_dataset = MnistDataset(mode='test')

@ -1,111 +0,0 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import print_function
import unittest
import os
import six
import numpy as np
import shutil
import copy
import paddle
from paddle import fluid
from paddle.incubate.hapi.model import Model, Input
from paddle.incubate.hapi.loss import CrossEntropy, SoftmaxWithCrossEntropy
def stable_softmax(x):
"""Compute the softmax of vector x in a numerically stable way."""
# clip to shiftx, otherwise, when calc loss with
# log(exp(shiftx)), may get log(0)=INF
shiftx = (x - np.max(x)).clip(-64.)
exps = np.exp(shiftx)
return exps / np.sum(exps)
def randomize_probability(batch_size, class_num, dtype='float32'):
prob = np.random.uniform(
0.1, 1.0, size=(batch_size, class_num)).astype(dtype)
prob_sum = prob.sum(axis=1)
for i in six.moves.xrange(len(prob)):
prob[i] /= prob_sum[i]
return prob
def numpy_ce(x, label):
return np.asmatrix(
[[-np.log(x[i][label[i][0]])] for i in range(x.shape[0])],
dtype="float32").mean()
class TestLoss(unittest.TestCase):
def test_cross_entropy(self):
class_num = 100
batch_size = 128
inputs = [randomize_probability(128, class_num) for _ in range(2)]
labels = [
np.random.randint(
0, class_num, (batch_size, 1), dtype="int64") for _ in range(2)
]
gt_out = [numpy_ce(inputs[i], labels[i]) for i in range(2)]
fluid.enable_dygraph()
cross_entropy = CrossEntropy()
out = cross_entropy(
[fluid.dygraph.to_variable(x) for x in inputs],
[fluid.dygraph.to_variable(label) for label in labels])
out = [o.numpy() for o in out]
for o, g in zip(out, gt_out):
np.testing.assert_allclose(o, g, atol=1e-5)
def test_soft_cross_entronpy(self):
class_num = 100
batch_size = 128
inputs = [randomize_probability(128, class_num) for _ in range(2)]
labels = [
np.random.randint(
0, class_num, (batch_size, 1), dtype="int64") for _ in range(2)
]
fluid.enable_dygraph()
softmax_cross_entropy = SoftmaxWithCrossEntropy()
softmax_cross_entropy(
[fluid.dygraph.to_variable(x) for x in inputs],
[fluid.dygraph.to_variable(label) for label in labels])
softmax_cross_entropy = SoftmaxWithCrossEntropy(average=False)
inputs = [randomize_probability(128, class_num)]
labels = [
np.random.randint(
0, class_num, (batch_size, 1), dtype="int64")
]
softmax_cross_entropy([fluid.dygraph.to_variable(x) for x in inputs],
fluid.dygraph.to_variable(labels[0]))
if __name__ == '__main__':
unittest.main()

@ -28,7 +28,7 @@ from paddle.fluid.dygraph.base import to_variable
import paddle.incubate.hapi as hapi
from paddle.incubate.hapi import Model, Input
from paddle.incubate.hapi.loss import CrossEntropy
from paddle.nn.layer.loss import CrossEntropyLoss
from paddle.incubate.hapi.metrics import Accuracy
from paddle.incubate.hapi.datasets import MNIST
from paddle.incubate.hapi.vision.models import LeNet
@ -36,7 +36,7 @@ from paddle.incubate.hapi.distributed import DistributedBatchSampler, prepare_di
class LeNetDygraph(fluid.dygraph.Layer):
def __init__(self, num_classes=10, classifier_activation='softmax'):
def __init__(self, num_classes=10, classifier_activation=None):
super(LeNetDygraph, self).__init__()
self.num_classes = num_classes
self.features = Sequential(
@ -97,7 +97,7 @@ def dynamic_train(model, dataloader):
model.train()
for inputs, labels in dataloader:
outputs = model(inputs)
loss = fluid.layers.cross_entropy(outputs, labels)
loss = CrossEntropyLoss(reduction="sum")(outputs, labels)
avg_loss = fluid.layers.reduce_sum(loss)
avg_loss.backward()
optim.minimize(avg_loss)
@ -190,13 +190,13 @@ class TestModel(unittest.TestCase):
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
net = LeNet()
net = LeNet(classifier_activation=None)
optim_new = fluid.optimizer.Adam(
learning_rate=0.001, parameter_list=net.parameters())
model = Model(net, inputs=self.inputs, labels=self.labels)
model.prepare(
optim_new,
loss_function=CrossEntropy(average=False),
loss_function=CrossEntropyLoss(reduction="sum"),
metrics=Accuracy())
model.fit(self.train_dataset, batch_size=64, shuffle=False)
@ -271,9 +271,9 @@ class TestModel(unittest.TestCase):
class MyModel(fluid.dygraph.Layer):
def __init__(self):
def __init__(self, classifier_activation='softmax'):
super(MyModel, self).__init__()
self._fc = Linear(20, 10, act='softmax')
self._fc = Linear(20, 10, act=classifier_activation)
def forward(self, x):
y = self._fc(x)
@ -293,13 +293,12 @@ class TestModelFunction(unittest.TestCase):
def get_expect():
fluid.enable_dygraph(fluid.CPUPlace())
self.set_seed()
m = MyModel()
m = MyModel(classifier_activation=None)
optim = fluid.optimizer.SGD(learning_rate=0.001,
parameter_list=m.parameters())
m.train()
output = m(to_variable(data))
l = to_variable(label)
loss = fluid.layers.cross_entropy(output, l)
loss = CrossEntropyLoss(reduction='sum')(output, to_variable(label))
avg_loss = fluid.layers.reduce_sum(loss)
avg_loss.backward()
optim.minimize(avg_loss)
@ -313,14 +312,15 @@ class TestModelFunction(unittest.TestCase):
fluid.enable_dygraph(device) if dynamic else None
self.set_seed()
net = MyModel()
net = MyModel(classifier_activation=None)
optim2 = fluid.optimizer.SGD(learning_rate=0.001,
parameter_list=net.parameters())
inputs = [Input('x', [None, dim], 'float32')]
labels = [Input('label', [None, 1], 'int64')]
model = Model(net, inputs, labels)
model.prepare(optim2, loss_function=CrossEntropy(average=False))
model.prepare(
optim2, loss_function=CrossEntropyLoss(reduction="sum"))
loss, = model.train_batch([data], [label])
np.testing.assert_allclose(loss.flatten(), ref.flatten())
@ -358,14 +358,15 @@ class TestModelFunction(unittest.TestCase):
for dynamic in [True, False]:
device = hapi.set_device('cpu')
fluid.enable_dygraph(device) if dynamic else None
net = MyModel()
net = MyModel(classifier_activation=None)
inputs = [Input('x', [None, 20], 'float32')]
labels = [Input('label', [None, 1], 'int64')]
optim = fluid.optimizer.SGD(learning_rate=0.001,
parameter_list=net.parameters())
model = Model(net, inputs, labels)
model.prepare(
optimizer=optim, loss_function=CrossEntropy(average=False))
optimizer=optim,
loss_function=CrossEntropyLoss(reduction="sum"))
model.save(path + '/test')
model.load(path + '/test')
shutil.rmtree(path)
@ -376,48 +377,48 @@ class TestModelFunction(unittest.TestCase):
# dynamic saving
device = hapi.set_device('cpu')
fluid.enable_dygraph(device)
model = Model(MyModel())
model = Model(MyModel(classifier_activation=None))
optim = fluid.optimizer.SGD(learning_rate=0.001,
parameter_list=model.parameters())
model.prepare(
optimizer=optim, loss_function=CrossEntropy(average=False))
optimizer=optim, loss_function=CrossEntropyLoss(reduction="sum"))
model.save(path + '/test')
fluid.disable_dygraph()
inputs = [Input('x', [None, 20], 'float32')]
labels = [Input('label', [None, 1], 'int64')]
model = Model(MyModel(), inputs, labels)
model = Model(MyModel(classifier_activation=None), inputs, labels)
optim = fluid.optimizer.SGD(learning_rate=0.001,
parameter_list=model.parameters())
model.prepare(
optimizer=optim, loss_function=CrossEntropy(average=False))
optimizer=optim, loss_function=CrossEntropyLoss(reduction="sum"))
model.load(path + '/test')
shutil.rmtree(path)
def test_static_save_dynamic_load(self):
path = tempfile.mkdtemp()
net = MyModel()
net = MyModel(classifier_activation=None)
inputs = [Input('x', [None, 20], 'float32')]
labels = [Input('label', [None, 1], 'int64')]
optim = fluid.optimizer.SGD(learning_rate=0.001,
parameter_list=net.parameters())
model = Model(net, inputs, labels)
model.prepare(
optimizer=optim, loss_function=CrossEntropy(average=False))
optimizer=optim, loss_function=CrossEntropyLoss(reduction="sum"))
model.save(path + '/test')
device = hapi.set_device('cpu')
fluid.enable_dygraph(device) #if dynamic else None
net = MyModel()
net = MyModel(classifier_activation=None)
inputs = [Input('x', [None, 20], 'float32')]
labels = [Input('label', [None, 1], 'int64')]
optim = fluid.optimizer.SGD(learning_rate=0.001,
parameter_list=net.parameters())
model = Model(net, inputs, labels)
model.prepare(
optimizer=optim, loss_function=CrossEntropy(average=False))
optimizer=optim, loss_function=CrossEntropyLoss(reduction="sum"))
model.load(path + '/test')
shutil.rmtree(path)
fluid.disable_dygraph()

Loading…
Cancel
Save