|
|
|
@ -275,15 +275,26 @@ class Optimizer(object):
|
|
|
|
|
self._create_global_learning_rate()
|
|
|
|
|
|
|
|
|
|
optimize_ops = []
|
|
|
|
|
for param_and_grad in parameters_and_grads:
|
|
|
|
|
if param_and_grad[1] is None:
|
|
|
|
|
continue
|
|
|
|
|
with param_and_grad[0].block.program._optimized_guard(
|
|
|
|
|
param_and_grad), name_scope("optimizer"):
|
|
|
|
|
if param_and_grad[0].trainable is True:
|
|
|
|
|
optimize_op = self._append_optimize_op(global_block,
|
|
|
|
|
param_and_grad)
|
|
|
|
|
optimize_ops.append(optimize_op)
|
|
|
|
|
if framework._in_dygraph_mode():
|
|
|
|
|
for param_and_grad in parameters_and_grads:
|
|
|
|
|
if param_and_grad[1] is None:
|
|
|
|
|
continue
|
|
|
|
|
with param_and_grad[0].block.program._optimized_guard(
|
|
|
|
|
param_and_grad):
|
|
|
|
|
if param_and_grad[0].trainable is True:
|
|
|
|
|
optimize_op = self._append_optimize_op(global_block,
|
|
|
|
|
param_and_grad)
|
|
|
|
|
optimize_ops.append(optimize_op)
|
|
|
|
|
else:
|
|
|
|
|
for param_and_grad in parameters_and_grads:
|
|
|
|
|
if param_and_grad[1] is None:
|
|
|
|
|
continue
|
|
|
|
|
with param_and_grad[0].block.program._optimized_guard(
|
|
|
|
|
param_and_grad), name_scope("optimizer"):
|
|
|
|
|
if param_and_grad[0].trainable is True:
|
|
|
|
|
optimize_op = self._append_optimize_op(global_block,
|
|
|
|
|
param_and_grad)
|
|
|
|
|
optimize_ops.append(optimize_op)
|
|
|
|
|
|
|
|
|
|
# Get custom finish ops for subclasses
|
|
|
|
|
# FIXME: Need to fix this once we figure out how to handle dependencies
|
|
|
|
|