| 
						
						
							
								
							
						
						
					 | 
					 | 
					@ -275,15 +275,26 @@ class Optimizer(object):
 | 
				
			
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					        self._create_global_learning_rate()
 | 
					 | 
					 | 
					 | 
					        self._create_global_learning_rate()
 | 
				
			
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					
 | 
					 | 
					 | 
					 | 
					
 | 
				
			
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					        optimize_ops = []
 | 
					 | 
					 | 
					 | 
					        optimize_ops = []
 | 
				
			
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					        for param_and_grad in parameters_and_grads:
 | 
					 | 
					 | 
					 | 
					        if framework.in_dygraph_mode():
 | 
				
			
			
				
				
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					            if param_and_grad[1] is None:
 | 
					 | 
					 | 
					 | 
					            for param_and_grad in parameters_and_grads:
 | 
				
			
			
				
				
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					                continue
 | 
					 | 
					 | 
					 | 
					                if param_and_grad[1] is None:
 | 
				
			
			
				
				
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					            with param_and_grad[0].block.program._optimized_guard(
 | 
					 | 
					 | 
					 | 
					                    continue
 | 
				
			
			
				
				
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					                    param_and_grad), name_scope("optimizer"):
 | 
					 | 
					 | 
					 | 
					                with param_and_grad[0].block.program._optimized_guard(
 | 
				
			
			
				
				
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					                if param_and_grad[0].trainable is True:
 | 
					 | 
					 | 
					 | 
					                        param_and_grad):
 | 
				
			
			
				
				
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					                    optimize_op = self._append_optimize_op(global_block,
 | 
					 | 
					 | 
					 | 
					                    if param_and_grad[0].trainable is True:
 | 
				
			
			
				
				
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					                                                           param_and_grad)
 | 
					 | 
					 | 
					 | 
					                        optimize_op = self._append_optimize_op(global_block,
 | 
				
			
			
				
				
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					                    optimize_ops.append(optimize_op)
 | 
					 | 
					 | 
					 | 
					                                                               param_and_grad)
 | 
				
			
			
				
				
			
		
	
		
		
	
		
		
	
		
		
	
		
		
	
		
		
	
		
		
	
		
		
	
		
		
	
		
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					 | 
					 | 
					 | 
					 | 
					                        optimize_ops.append(optimize_op)
 | 
				
			
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					 | 
					 | 
					 | 
					 | 
					        else:
 | 
				
			
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					 | 
					 | 
					 | 
					 | 
					            for param_and_grad in parameters_and_grads:
 | 
				
			
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					 | 
					 | 
					 | 
					 | 
					                if param_and_grad[1] is None:
 | 
				
			
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					 | 
					 | 
					 | 
					 | 
					                    continue
 | 
				
			
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					 | 
					 | 
					 | 
					 | 
					                with param_and_grad[0].block.program._optimized_guard(
 | 
				
			
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					 | 
					 | 
					 | 
					 | 
					                        param_and_grad), name_scope("optimizer"):
 | 
				
			
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					 | 
					 | 
					 | 
					 | 
					                    if param_and_grad[0].trainable is True:
 | 
				
			
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					 | 
					 | 
					 | 
					 | 
					                        optimize_op = self._append_optimize_op(global_block,
 | 
				
			
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					 | 
					 | 
					 | 
					 | 
					                                                               param_and_grad)
 | 
				
			
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					 | 
					 | 
					 | 
					 | 
					                        optimize_ops.append(optimize_op)
 | 
				
			
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					
 | 
					 | 
					 | 
					 | 
					
 | 
				
			
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					        # Get custom finish ops for subclasses
 | 
					 | 
					 | 
					 | 
					        # Get custom finish ops for subclasses
 | 
				
			
			
		
	
		
		
			
				
					
					 | 
					 | 
					 | 
					        # FIXME: Need to fix this once we figure out how to handle dependencies
 | 
					 | 
					 | 
					 | 
					        # FIXME: Need to fix this once we figure out how to handle dependencies
 | 
				
			
			
		
	
	
		
		
			
				
					| 
						
							
								
							
						
						
						
					 | 
					 | 
					
 
 |