|
|
|
@ -41,15 +41,11 @@ def _tensor_run_opt(opt, spars_opt, learning_rate, l1, l2, lr_power, linear, gra
|
|
|
|
|
return success
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _check_param(initial_accum, learning_rate, lr_power, l1, l2, use_locking, loss_scale=1.0, weight_decay=0.0,
|
|
|
|
|
prim_name=None):
|
|
|
|
|
def _check_param(initial_accum, lr_power, l1, l2, use_locking, weight_decay=0.0, prim_name=None):
|
|
|
|
|
"""Check param."""
|
|
|
|
|
validator.check_value_type("initial_accum", initial_accum, [float], prim_name)
|
|
|
|
|
validator.check_number("initial_accum", initial_accum, 0.0, Rel.GE, prim_name)
|
|
|
|
|
|
|
|
|
|
validator.check_value_type("learning_rate", learning_rate, [float], prim_name)
|
|
|
|
|
validator.check_number("learning_rate", learning_rate, 0.0, Rel.GT, prim_name)
|
|
|
|
|
|
|
|
|
|
validator.check_value_type("lr_power", lr_power, [float], prim_name)
|
|
|
|
|
validator.check_number("lr_power", lr_power, 0.0, Rel.LE, prim_name)
|
|
|
|
|
|
|
|
|
@ -61,9 +57,6 @@ def _check_param(initial_accum, learning_rate, lr_power, l1, l2, use_locking, lo
|
|
|
|
|
|
|
|
|
|
validator.check_value_type("use_locking", use_locking, [bool], prim_name)
|
|
|
|
|
|
|
|
|
|
validator.check_value_type("loss_scale", loss_scale, [float], prim_name)
|
|
|
|
|
validator.check_number("loss_scale", loss_scale, 1.0, Rel.GE, prim_name)
|
|
|
|
|
|
|
|
|
|
validator.check_value_type("weight_decay", weight_decay, [float], prim_name)
|
|
|
|
|
validator.check_number("weight_decay", weight_decay, 0.0, Rel.GE, prim_name)
|
|
|
|
|
|
|
|
|
@ -110,21 +103,18 @@ class FTRL(Optimizer):
|
|
|
|
|
"""
|
|
|
|
|
def __init__(self, params, initial_accum=0.1, learning_rate=0.001, lr_power=-0.5, l1=0.0, l2=0.0,
|
|
|
|
|
use_locking=False, loss_scale=1.0, weight_decay=0.0):
|
|
|
|
|
super(FTRL, self).__init__(learning_rate, params)
|
|
|
|
|
super(FTRL, self).__init__(learning_rate, params, loss_scale=loss_scale)
|
|
|
|
|
if self.is_group:
|
|
|
|
|
raise RuntimeError(f"The {self.cls_name} optimizer cannot support group setting.")
|
|
|
|
|
_check_param(initial_accum, learning_rate, lr_power, l1, l2, use_locking, loss_scale, weight_decay,
|
|
|
|
|
self.cls_name)
|
|
|
|
|
_check_param(initial_accum, lr_power, l1, l2, use_locking, weight_decay, self.cls_name)
|
|
|
|
|
self.moments = self.parameters.clone(prefix="moments", init=initial_accum)
|
|
|
|
|
self.linear = self.parameters.clone(prefix="linear", init='zeros')
|
|
|
|
|
self.l1 = l1
|
|
|
|
|
self.l2 = l2
|
|
|
|
|
self.lr_power = lr_power
|
|
|
|
|
self.reciprocal_scale = 1.0 / loss_scale
|
|
|
|
|
self.weight_decay = weight_decay
|
|
|
|
|
self.decay_tf = tuple((lambda: True)() for x in self.parameters)
|
|
|
|
|
self.hyper_map = C.HyperMap()
|
|
|
|
|
self.map_ = C.Map()
|
|
|
|
|
self.opt = P.ApplyFtrl(use_locking=use_locking)
|
|
|
|
|
self.sparse_opt = P.SparseApplyFtrl(learning_rate, l1, l2, lr_power, use_locking=use_locking)
|
|
|
|
|
|
|
|
|
@ -132,11 +122,11 @@ class FTRL(Optimizer):
|
|
|
|
|
params = self.parameters
|
|
|
|
|
moments = self.moments
|
|
|
|
|
linear = self.linear
|
|
|
|
|
lr = self.learning_rate
|
|
|
|
|
if self.weight_decay > 0.0:
|
|
|
|
|
grads = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_tf, params, grads)
|
|
|
|
|
if self.reciprocal_scale != 1.0:
|
|
|
|
|
grads = self.hyper_map(F.partial(grad_scale, self.reciprocal_scale), grads)
|
|
|
|
|
lr = self.learning_rate
|
|
|
|
|
|
|
|
|
|
grads = self.scale_grad(grads)
|
|
|
|
|
success = self.map_(F.partial(ftrl_opt, self.opt, self.sparse_opt, lr, self.l1, self.l2, self.lr_power),
|
|
|
|
|
linear, grads, params, moments)
|
|
|
|
|
return success
|
|
|
|
|