Polish nn code, test=develop (#22237)

* refine code, test=develop

* reuse contain_var, test=develop
revert-22710-feature/integrated_ps_api
Leo Chen 5 years ago committed by GitHub
parent efcdeb512f
commit d4bdbf8cf0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -455,14 +455,14 @@ def name_scope(prefix=None):
"""
# TODO(panyx0718): Only [0-9a-z].
# in dygraph we don't need namescope since it will cause mem leak
if not in_dygraph_mode():
if in_dygraph_mode():
yield
else:
assert prefix, "namescope prefix cannot be empty."
global _name_scope
_name_scope = _name_scope.child(prefix)
yield
_name_scope = _name_scope.parent()
else:
yield
def _full_name_scope():
@ -715,10 +715,9 @@ def _getitem_impl_(var, item):
if (use_strided_slice == True):
attrs['strides'] = []
infer_flags = list(1 for i in range(len(slice_axis)))
# starts
if not contain_var(slice_start):
attrs['starts'] = slice_start
else:
if contain_var(slice_start):
inputs['StartsTensorList'] = get_new_list_tensor(slice_start)
for i, dim in enumerate(slice_start):
if isinstance(dim, Variable):
@ -726,10 +725,11 @@ def _getitem_impl_(var, item):
infer_flags[i] = -1
else:
attrs['starts'].append(dim)
# ends
if not contain_var(slice_end):
attrs['ends'] = slice_end
else:
attrs['starts'] = slice_start
# ends
if contain_var(slice_end):
inputs['EndsTensorList'] = get_new_list_tensor(slice_end)
for i, dim in enumerate(slice_end):
if isinstance(dim, Variable):
@ -737,11 +737,12 @@ def _getitem_impl_(var, item):
infer_flags[i] = -1
else:
attrs['ends'].append(dim)
else:
attrs['ends'] = slice_end
# strides
if use_strided_slice == True:
if not contain_var(slice_step):
attrs['strides'] = slice_step
else:
if contain_var(slice_step):
inputs['StridesTensorList'] = get_new_list_tensor(slice_step)
for i, dim in enumerate(slice_step):
if isinstance(dim, Variable):
@ -749,6 +750,8 @@ def _getitem_impl_(var, item):
infer_flags[i] = -1
else:
attrs['strides'].append(dim)
else:
attrs['strides'] = slice_step
# infer_flags
attrs['infer_flags'] = infer_flags
@ -2344,12 +2347,12 @@ class Block(object):
if isinstance(item[1], Parameter))
def create_var(self, *args, **kwargs):
if not in_dygraph_mode():
if in_dygraph_mode():
var = _varbase_creator(*args, **kwargs)
else:
var = Variable(block=self, *args, **kwargs)
if 'initializer' in kwargs:
kwargs['initializer'](var, self)
else:
var = _varbase_creator(*args, **kwargs)
return var
def has_var(self, name):
@ -2396,9 +2399,8 @@ class Block(object):
# NOTE: v is destroyed by C++ after calling _rename_var.
d = self.desc.find_var(cpt.to_bytes(new_name))
if var_type == "Parameter":
if not in_dygraph_mode():
var = Parameter(
self,
if in_dygraph_mode():
var = ParamBase(
d.shape(),
d.dtype(),
type=orig_var_type,
@ -2410,7 +2412,8 @@ class Block(object):
gradient_clip_attr=gradient_clip_attr,
error_clip=error_clip)
else:
var = ParamBase(
var = Parameter(
self,
d.shape(),
d.dtype(),
type=orig_var_type,
@ -2444,10 +2447,10 @@ class Block(object):
def create_parameter(self, *args, **kwargs):
global_block = self.program.global_block()
param = None
if not in_dygraph_mode():
param = Parameter(global_block, *args, **kwargs)
else:
if in_dygraph_mode():
param = ParamBase(*args, **kwargs)
else:
param = Parameter(global_block, *args, **kwargs)
if 'initializer' in kwargs:
def _is_inited_by(block, var):
@ -2687,9 +2690,8 @@ class Block(object):
"same topology")
assert isinstance(v, Variable)
new_p = None
if not in_dygraph_mode():
new_p = Parameter(
block=self,
if in_dygraph_mode():
new_p = ParamBase(
shape=v.shape,
dtype=v.dtype,
type=v.type,
@ -2702,7 +2704,8 @@ class Block(object):
error_clip=p.error_clip,
name=v.name)
else:
new_p = ParamBase(
new_p = Parameter(
block=self,
shape=v.shape,
dtype=v.dtype,
type=v.type,

File diff suppressed because it is too large Load Diff

@ -22,6 +22,7 @@ from ..initializer import Constant, force_init_on_cpu
from ..core import VarDesc
from .. import core
from .layer_function_generator import templatedoc
from . import utils
from ..data_feeder import check_type_and_dtype, check_type, check_dtype, convert_dtype
import numpy
import warnings
@ -552,13 +553,6 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
shape = fluid.layers.fill_constant([1,2], "int32", 2) # shape=[2,2]
data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]]
"""
def _contain_var(one_list):
for ele in one_list:
if isinstance(ele, Variable):
return True
return False
attrs = {
'value': float(value),
'force_cpu': force_cpu or force_init_on_cpu()
@ -571,8 +565,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
if in_dygraph_mode():
if isinstance(shape, (list, tuple)):
contain_var = _contain_var(shape)
if contain_var:
if utils._contain_var(shape):
raise TypeError(
"The type of 'shape' in fill_constant must be list[int] or tuple(int) in Dygraph mode, but "
"received %s, which contains Variable." % type(shape))
@ -644,7 +637,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
"The size of 'shape' in fill_constant can't be zero, "
"but received %s." % len(shape))
attrs["shape"] = _get_attr_shape(shape)
if _contain_var(shape):
if utils._contain_var(shape):
inputs['ShapeTensorList'] = _get_shape_tensor(shape)
if out is None:

@ -16,6 +16,7 @@ from __future__ import print_function
import collections
import six
import numpy as np
from ..framework import Variable
def convert_to_list(value, n, name, dtype=np.int):
@ -244,3 +245,13 @@ def _is_symmetric_padding(padding, data_dim):
if padding[i * 2] != padding[i * 2 + 1]:
is_sys = False
return is_sys
def _contain_var(list_or_tuple):
"""
Check whether list or tuple contains variable.
"""
for item in list_or_tuple:
if isinstance(item, Variable):
return True
return False

@ -134,11 +134,11 @@ class Optimizer(object):
# global step if use lr decay
if isinstance(self._learning_rate, LearningRateDecay):
var_tmp = None
if not framework.in_dygraph_mode():
var_temp = Variable(None, name='global_step', dtype='int32')
else:
if framework.in_dygraph_mode():
var_temp = framework._varbase_creator(
None, name='global_step', dtype='int32')
else:
var_temp = Variable(None, name='global_step', dtype='int32')
tensor.fill_constant(
[1], "int32", self._learning_rate.step_num, out=var_temp)
@ -546,10 +546,10 @@ class Optimizer(object):
See examples in ``apply_gradients``.
"""
act_no_grad_set = None
if not framework.in_dygraph_mode():
act_no_grad_set = self._get_no_grad_set(loss, no_grad_set)
else:
if framework.in_dygraph_mode():
pass
else:
act_no_grad_set = self._get_no_grad_set(loss, no_grad_set)
self._dtype = loss.dtype
if framework.in_dygraph_mode():

Loading…
Cancel
Save