【paddle.fleet】add lamb to fleet meta optimizer (#26025)
add lamb to fleet meta optimizerrevert-24895-update_cub
parent
1be6bf45ae
commit
54003b873e
@ -0,0 +1,99 @@
|
||||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
|
||||
from paddle.fluid.optimizer import AdamOptimizer
|
||||
from paddle.fluid.optimizer import LambOptimizer as LAMB
|
||||
from .meta_optimizer_base import MetaOptimizerBase
|
||||
import logging
|
||||
|
||||
__all__ = ["LambOptimizer"]
|
||||
|
||||
|
||||
class LambOptimizer(MetaOptimizerBase):
|
||||
def __init__(self, optimizer):
|
||||
super(LambOptimizer, self).__init__(optimizer)
|
||||
self.inner_opt = optimizer
|
||||
self.lamb_opt = None
|
||||
# we do not allow meta optimizer to be inner optimizer currently
|
||||
self.meta_optimizers_white_list = []
|
||||
|
||||
def _set_basic_info(self, loss, role_maker, user_defined_optimizer,
|
||||
user_defined_strategy):
|
||||
super(LambOptimizer, self)._set_basic_info(
|
||||
loss, role_maker, user_defined_optimizer, user_defined_strategy)
|
||||
|
||||
opt = self.inner_opt
|
||||
if not isinstance(opt, AdamOptimizer):
|
||||
return
|
||||
|
||||
configs = self.user_defined_strategy.lamb_configs
|
||||
if len(configs['exclude_from_weight_decay']) == 0:
|
||||
_exclude_from_weight_decay_fn = None
|
||||
else:
|
||||
|
||||
def exclude_fn(param):
|
||||
exclude_list = configs['exclude_from_weight_decay']
|
||||
for name in exclude_list:
|
||||
if param.name.endswith(name):
|
||||
return True
|
||||
return False
|
||||
|
||||
_exclude_from_weight_decay_fn = exclude_fn
|
||||
|
||||
self.lamb_opt = LAMB(
|
||||
learning_rate=opt._learning_rate,
|
||||
lamb_weight_decay=configs['lamb_weight_decay'],
|
||||
beta1=opt._beta1,
|
||||
beta2=opt._beta2,
|
||||
epsilon=opt._epsilon,
|
||||
parameter_list=opt._parameter_list,
|
||||
regularization=opt.regularization,
|
||||
grad_clip=opt._grad_clip,
|
||||
exclude_from_weight_decay_fn=_exclude_from_weight_decay_fn,
|
||||
name=opt._name)
|
||||
|
||||
def _can_apply(self):
|
||||
if self.user_defined_strategy.lamb:
|
||||
if not isinstance(self.inner_opt, AdamOptimizer):
|
||||
logging.warn(
|
||||
"lamb need the inner optimizer to be AdamOptimizer optimizer but got {}.".
|
||||
format(self.inner_opt.type))
|
||||
return False
|
||||
return True
|
||||
return False
|
||||
|
||||
def _disable_strategy(self, dist_strategy):
|
||||
dist_strategy.lamb = False
|
||||
dist_strategy.lamb_configs = {
|
||||
'lamb_weight_decay': 0.01,
|
||||
'exclude_from_weight_decay': [],
|
||||
}
|
||||
|
||||
def backward(self,
|
||||
loss,
|
||||
startup_program=None,
|
||||
parameter_list=None,
|
||||
no_grad_set=None,
|
||||
callbacks=None):
|
||||
return self.lamb_opt.backward(loss, startup_program, parameter_list,
|
||||
no_grad_set, callbacks)
|
||||
|
||||
def minimize_impl(self,
|
||||
loss,
|
||||
startup_program=None,
|
||||
parameter_list=None,
|
||||
no_grad_set=None):
|
||||
optimize_ops, params_grads = \
|
||||
self.lamb_opt.minimize(loss, startup_program,
|
||||
parameter_list, no_grad_set)
|
||||
return optimize_ops, params_grads
|
@ -0,0 +1,108 @@
|
||||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
import paddle
|
||||
from paddle import fluid
|
||||
import os
|
||||
import paddle.fleet as fleet
|
||||
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
|
||||
|
||||
|
||||
class TestFleetLambMetaOptimizer(unittest.TestCase):
|
||||
def setUp(self):
|
||||
os.environ["POD_IP"] = "127.0.0.1"
|
||||
os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001"
|
||||
os.environ["PADDLE_TRAINERS_NUM"] = "2"
|
||||
os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = \
|
||||
"127.0.0.1:36001,127.0.0.2:36001"
|
||||
|
||||
def net(self, main_prog, startup_prog):
|
||||
with fluid.program_guard(main_prog, startup_prog):
|
||||
with fluid.unique_name.guard():
|
||||
input_x = paddle.fluid.layers.data(
|
||||
name="x", shape=[32], dtype='float32')
|
||||
input_y = paddle.fluid.layers.data(
|
||||
name="y", shape=[1], dtype='int64')
|
||||
|
||||
fc_1 = paddle.fluid.layers.fc(input=input_x,
|
||||
size=64,
|
||||
act='tanh')
|
||||
fc_2 = paddle.fluid.layers.fc(input=fc_1, size=256, act='tanh')
|
||||
prediction = paddle.fluid.layers.fc(input=[fc_2],
|
||||
size=2,
|
||||
act='softmax')
|
||||
cost = paddle.fluid.layers.cross_entropy(
|
||||
input=prediction, label=input_y)
|
||||
avg_cost = paddle.fluid.layers.mean(x=cost)
|
||||
|
||||
strategy = paddle.fleet.DistributedStrategy()
|
||||
strategy.lamb = True
|
||||
strategy.lamb_configs = {
|
||||
'lamb_weight_decay': 0.01,
|
||||
'exclude_from_weight_decay': [],
|
||||
}
|
||||
|
||||
return avg_cost, strategy
|
||||
|
||||
def test_lamb_optimizer(self):
|
||||
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
|
||||
fleet.init(role)
|
||||
startup_prog = fluid.Program()
|
||||
train_prog = fluid.Program()
|
||||
avg_cost, strategy = self.net(train_prog, startup_prog)
|
||||
optimizer = paddle.optimizer.Adam(learning_rate=0.01)
|
||||
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
|
||||
optimizer.minimize(avg_cost)
|
||||
|
||||
ops = [op.type for op in avg_cost.block.ops]
|
||||
self.assertIn('lamb', ops)
|
||||
|
||||
def test_lamb_not_apply_with_momentum(self):
|
||||
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
|
||||
fleet.init(role)
|
||||
startup_prog = fluid.Program()
|
||||
train_prog = fluid.Program()
|
||||
avg_cost, strategy = self.net(train_prog, startup_prog)
|
||||
optimizer = paddle.optimizer.Momentum(learning_rate=0.1, momentum=0.9)
|
||||
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
|
||||
optimizer.minimize(avg_cost)
|
||||
|
||||
ops = [op.type for op in avg_cost.block.ops]
|
||||
self.assertNotIn('lamb', ops)
|
||||
|
||||
def test_lamb_exclude_fn(self):
|
||||
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
|
||||
fleet.init(role)
|
||||
startup_prog = fluid.Program()
|
||||
train_prog = fluid.Program()
|
||||
avg_cost, strategy = self.net(train_prog, startup_prog)
|
||||
optimizer = paddle.optimizer.Adam(learning_rate=0.01)
|
||||
strategy.lamb_configs = {
|
||||
'lamb_weight_decay': 0.01,
|
||||
'exclude_from_weight_decay': ['.b_0'],
|
||||
}
|
||||
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
|
||||
optimizer.minimize(avg_cost)
|
||||
|
||||
ops_with_bias = [
|
||||
op for op in avg_cost.block.ops
|
||||
if op.type == 'lamb' and op.attr('op_role_var')[0].endswith('.b_0')
|
||||
]
|
||||
for op in ops_with_bias:
|
||||
self.assertEqual(op.attr('weight_decay'), 0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
Loading…
Reference in new issue