|
|
|
@ -63,18 +63,14 @@ def noam_decay(d_model, warmup_steps):
|
|
|
|
|
Returns:
|
|
|
|
|
The decayed learning rate.
|
|
|
|
|
"""
|
|
|
|
|
with default_main_program()._lr_schedule_guard():
|
|
|
|
|
global_step = _decay_step_counter(1)
|
|
|
|
|
|
|
|
|
|
def _lr_schedule(dtype):
|
|
|
|
|
with default_main_program()._lr_schedule_guard():
|
|
|
|
|
global_step = _decay_step_counter(1)
|
|
|
|
|
a = global_step**-0.5
|
|
|
|
|
b = (warmup_steps**-1.5) * global_step
|
|
|
|
|
lr_value = (d_model**-0.5) * nn.elementwise_min(a, b)
|
|
|
|
|
|
|
|
|
|
a = global_step**-0.5
|
|
|
|
|
b = (warmup_steps**-1.5) * global_step
|
|
|
|
|
lr_value = (d_model**-0.5) * nn.elementwise_min(a, b)
|
|
|
|
|
|
|
|
|
|
return lr_value
|
|
|
|
|
|
|
|
|
|
return _lr_schedule
|
|
|
|
|
return lr_value
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False):
|
|
|
|
@ -113,19 +109,15 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False):
|
|
|
|
|
sgd_optimizer.minimize(avg_cost)
|
|
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
with default_main_program()._lr_schedule_guard():
|
|
|
|
|
global_step = _decay_step_counter()
|
|
|
|
|
|
|
|
|
|
def _lr_schedule(dtype):
|
|
|
|
|
with default_main_program()._lr_schedule_guard():
|
|
|
|
|
global_step = _decay_step_counter()
|
|
|
|
|
|
|
|
|
|
div_res = global_step / decay_steps
|
|
|
|
|
if staircase:
|
|
|
|
|
div_res = ops.floor(div_res)
|
|
|
|
|
decayed_lr = learning_rate * (decay_rate**div_res)
|
|
|
|
|
div_res = global_step / decay_steps
|
|
|
|
|
if staircase:
|
|
|
|
|
div_res = ops.floor(div_res)
|
|
|
|
|
decayed_lr = learning_rate * (decay_rate**div_res)
|
|
|
|
|
|
|
|
|
|
return decayed_lr
|
|
|
|
|
|
|
|
|
|
return _lr_schedule
|
|
|
|
|
return decayed_lr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False):
|
|
|
|
@ -146,19 +138,15 @@ def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False):
|
|
|
|
|
Returns:
|
|
|
|
|
The decayed learning rate
|
|
|
|
|
"""
|
|
|
|
|
with default_main_program()._lr_schedule_guard():
|
|
|
|
|
global_step = _decay_step_counter()
|
|
|
|
|
|
|
|
|
|
def _lr_schedule(dtype):
|
|
|
|
|
with default_main_program()._lr_schedule_guard():
|
|
|
|
|
global_step = _decay_step_counter()
|
|
|
|
|
|
|
|
|
|
div_res = global_step / decay_steps
|
|
|
|
|
if staircase:
|
|
|
|
|
div_res = ops.floor(div_res)
|
|
|
|
|
decayed_lr = learning_rate * ops.exp(-1 * decay_rate * div_res)
|
|
|
|
|
|
|
|
|
|
return decayed_lr
|
|
|
|
|
div_res = global_step / decay_steps
|
|
|
|
|
if staircase:
|
|
|
|
|
div_res = ops.floor(div_res)
|
|
|
|
|
decayed_lr = learning_rate * ops.exp(-1 * decay_rate * div_res)
|
|
|
|
|
|
|
|
|
|
return _lr_schedule
|
|
|
|
|
return decayed_lr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False):
|
|
|
|
@ -196,20 +184,16 @@ def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False):
|
|
|
|
|
staircase=True))
|
|
|
|
|
sgd_optimizer.minimize(avg_cost)
|
|
|
|
|
"""
|
|
|
|
|
with default_main_program()._lr_schedule_guard():
|
|
|
|
|
global_step = _decay_step_counter()
|
|
|
|
|
|
|
|
|
|
def _lr_schedule(dtype):
|
|
|
|
|
with default_main_program()._lr_schedule_guard():
|
|
|
|
|
global_step = _decay_step_counter()
|
|
|
|
|
div_res = global_step / decay_steps
|
|
|
|
|
if staircase:
|
|
|
|
|
div_res = ops.floor(div_res)
|
|
|
|
|
|
|
|
|
|
div_res = global_step / decay_steps
|
|
|
|
|
if staircase:
|
|
|
|
|
div_res = ops.floor(div_res)
|
|
|
|
|
decayed_lr = learning_rate / (1 + decay_rate * div_res)
|
|
|
|
|
|
|
|
|
|
decayed_lr = learning_rate / (1 + decay_rate * div_res)
|
|
|
|
|
|
|
|
|
|
return decayed_lr
|
|
|
|
|
|
|
|
|
|
return _lr_schedule
|
|
|
|
|
return decayed_lr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def polynomial_decay(learning_rate,
|
|
|
|
@ -240,33 +224,28 @@ def polynomial_decay(learning_rate,
|
|
|
|
|
Returns:
|
|
|
|
|
Variable: The decayed learning rate
|
|
|
|
|
"""
|
|
|
|
|
with default_main_program()._lr_schedule_guard():
|
|
|
|
|
global_step = _decay_step_counter()
|
|
|
|
|
|
|
|
|
|
def _lr_schedule(dtype, decay_steps=decay_steps):
|
|
|
|
|
with default_main_program()._lr_schedule_guard():
|
|
|
|
|
global_step = _decay_step_counter()
|
|
|
|
|
|
|
|
|
|
if cycle:
|
|
|
|
|
div_res = ops.ceil(global_step / decay_steps)
|
|
|
|
|
zero_var = tensor.fill_constant(
|
|
|
|
|
shape=[1], dtype=dtype, value=0.0)
|
|
|
|
|
one_var = tensor.fill_constant(
|
|
|
|
|
shape=[1], dtype=dtype, value=1.0)
|
|
|
|
|
|
|
|
|
|
with control_flow.Switch() as switch:
|
|
|
|
|
with switch.case(global_step == zero_var):
|
|
|
|
|
tensor.assign(input=one_var, output=div_res)
|
|
|
|
|
decay_steps = decay_steps * div_res
|
|
|
|
|
else:
|
|
|
|
|
decay_steps_var = tensor.fill_constant(
|
|
|
|
|
shape=[1], dtype=dtype, value=float(decay_steps))
|
|
|
|
|
global_step = nn.elementwise_min(
|
|
|
|
|
x=global_step, y=decay_steps_var)
|
|
|
|
|
if cycle:
|
|
|
|
|
div_res = ops.ceil(global_step / decay_steps)
|
|
|
|
|
zero_var = tensor.fill_constant(
|
|
|
|
|
shape=[1], dtype='float32', value=0.0)
|
|
|
|
|
one_var = tensor.fill_constant(
|
|
|
|
|
shape=[1], dtype='float32', value=1.0)
|
|
|
|
|
|
|
|
|
|
decayed_lr = (learning_rate - end_learning_rate) * \
|
|
|
|
|
((1 - global_step / decay_steps) ** power) + end_learning_rate
|
|
|
|
|
return decayed_lr
|
|
|
|
|
with control_flow.Switch() as switch:
|
|
|
|
|
with switch.case(global_step == zero_var):
|
|
|
|
|
tensor.assign(input=one_var, output=div_res)
|
|
|
|
|
decay_steps = decay_steps * div_res
|
|
|
|
|
else:
|
|
|
|
|
decay_steps_var = tensor.fill_constant(
|
|
|
|
|
shape=[1], dtype='float32', value=float(decay_steps))
|
|
|
|
|
global_step = nn.elementwise_min(x=global_step, y=decay_steps_var)
|
|
|
|
|
|
|
|
|
|
return _lr_schedule
|
|
|
|
|
decayed_lr = (learning_rate - end_learning_rate) * \
|
|
|
|
|
((1 - global_step / decay_steps) ** power) + end_learning_rate
|
|
|
|
|
return decayed_lr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def piecewise_decay(boundaries, values):
|
|
|
|
@ -294,42 +273,38 @@ def piecewise_decay(boundaries, values):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
def _lr_schedule(dtype):
|
|
|
|
|
with default_main_program()._lr_schedule_guard():
|
|
|
|
|
if len(values) - len(boundaries) != 1:
|
|
|
|
|
raise ValueError("len(values) - len(boundaries) should be 1")
|
|
|
|
|
|
|
|
|
|
global_step = _decay_step_counter()
|
|
|
|
|
|
|
|
|
|
lr = tensor.create_global_var(
|
|
|
|
|
shape=[1],
|
|
|
|
|
value=0.0,
|
|
|
|
|
dtype='float32',
|
|
|
|
|
persistable=True,
|
|
|
|
|
name="learning_rate")
|
|
|
|
|
|
|
|
|
|
with control_flow.Switch() as switch:
|
|
|
|
|
for i in range(len(boundaries)):
|
|
|
|
|
boundary_val = tensor.fill_constant(
|
|
|
|
|
shape=[1],
|
|
|
|
|
dtype='float32',
|
|
|
|
|
value=float(boundaries[i]),
|
|
|
|
|
force_cpu=True)
|
|
|
|
|
value_var = tensor.fill_constant(
|
|
|
|
|
shape=[1], dtype='float32', value=float(values[i]))
|
|
|
|
|
with switch.case(global_step < boundary_val):
|
|
|
|
|
tensor.assign(value_var, lr)
|
|
|
|
|
last_value_var = tensor.fill_constant(
|
|
|
|
|
with default_main_program()._lr_schedule_guard():
|
|
|
|
|
if len(values) - len(boundaries) != 1:
|
|
|
|
|
raise ValueError("len(values) - len(boundaries) should be 1")
|
|
|
|
|
|
|
|
|
|
global_step = _decay_step_counter()
|
|
|
|
|
|
|
|
|
|
lr = tensor.create_global_var(
|
|
|
|
|
shape=[1],
|
|
|
|
|
value=0.0,
|
|
|
|
|
dtype='float32',
|
|
|
|
|
persistable=True,
|
|
|
|
|
name="learning_rate")
|
|
|
|
|
|
|
|
|
|
with control_flow.Switch() as switch:
|
|
|
|
|
for i in range(len(boundaries)):
|
|
|
|
|
boundary_val = tensor.fill_constant(
|
|
|
|
|
shape=[1],
|
|
|
|
|
dtype='float32',
|
|
|
|
|
value=float(values[len(values) - 1]))
|
|
|
|
|
with switch.default():
|
|
|
|
|
tensor.assign(last_value_var, lr)
|
|
|
|
|
|
|
|
|
|
return lr
|
|
|
|
|
value=float(boundaries[i]),
|
|
|
|
|
force_cpu=True)
|
|
|
|
|
value_var = tensor.fill_constant(
|
|
|
|
|
shape=[1], dtype='float32', value=float(values[i]))
|
|
|
|
|
with switch.case(global_step < boundary_val):
|
|
|
|
|
tensor.assign(value_var, lr)
|
|
|
|
|
last_value_var = tensor.fill_constant(
|
|
|
|
|
shape=[1],
|
|
|
|
|
dtype='float32',
|
|
|
|
|
value=float(values[len(values) - 1]))
|
|
|
|
|
with switch.default():
|
|
|
|
|
tensor.assign(last_value_var, lr)
|
|
|
|
|
|
|
|
|
|
return _lr_schedule
|
|
|
|
|
return lr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def append_LARS(params_grads, learning_rate, weight_decay):
|
|
|
|
|