test=develop, reconstruct layer helper to fit imperative usage (#15938)

* test=develop, reconstruct layer helper to fit imperative usage

* test=develop, fix import error on py35

* test=develop, fix rnn gradient error

* test=develop, delete test use code

* test=develop, remove helper from imperative usage

* test=develop, fix test_base_layer using new helper

* test=develop, reconstruct layerhelper for imperative mode

* test=develop, reconstruct layerhelper for imperative mode

* test=develop, fix bug

* test=develop, fix test failed bug

* test=develop, fix test failed bug

* test=develop, fix test failed bug

* test=develop, fix bug

* test=develop, polish code
align_pyramid
Jiabin Yang 7 years ago committed by GitHub
parent 14b4337663
commit 654825cfe3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,220 @@
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import copy
import six
from ..framework import Parameter, _in_imperative_mode
from ..param_attr import ParamAttr
from .. import core
from six.moves import zip
from ..layer_helper_base import LayerHelperBase
class LayerObjectHelper(LayerHelperBase):
def __init__(self, name):
super(LayerObjectHelper, self).__init__(name, layer_type=name)
def append_op(self,
type=None,
inputs=None,
outputs=None,
attrs=None,
stop_gradient=None):
"""append an operator for this layer object.
Args:
type: operator type
inputs: input variable of the operator
dtype: data type of this parameter
is_bias: if this is a bias parameter
default_initializer: set the default initializer for this parameter
Returns created parameter Variable.
"""
return self.main_program.current_block().append_op(
type=type,
inputs=inputs,
outputs=outputs,
attrs=attrs,
stop_gradient=stop_gradient)
def _multiple_input(self, inputs_in):
inputs = inputs_in
ret = []
if isinstance(inputs, (list, tuple)):
for inp in inputs:
ret.append(self.to_variable(inp))
else:
ret.append(self.to_variable(inputs))
return ret
# TODO: make it public when we need it
def _input(self, inputs_in):
inputs = self._multiple_input(inputs_in)
if len(inputs) != 1:
raise "{0} layer only takes one input".format(self.layer_type)
return inputs[0]
def _multiple_param_attr(self, length, param_attr_in=None):
param_attr = param_attr_in
if isinstance(param_attr, ParamAttr):
param_attr = [param_attr]
if len(param_attr) != 1 and len(param_attr) != length:
raise ValueError("parameter number mismatch")
elif len(param_attr) == 1 and length != 1:
tmp = [None] * length
for i in six.moves.range(length):
tmp[i] = copy.deepcopy(param_attr[0])
param_attr = tmp
return param_attr
def iter_inputs_and_params(self, inputs_in, param_attr_in=None):
"""Access all inputs and params one by one
Args:
inputs_in: inputs to be iter
param_attr_in: param_attr to be iter
Returns input, param_attr
"""
inputs = inputs_in if (inputs_in is not None) else []
inputs = self._multiple_input(inputs)
param_attrs = self._multiple_param_attr(len(inputs), param_attr_in)
for ipt, param_attr in zip(inputs, param_attrs):
yield ipt, param_attr
def input_dtype(self, inputs_in):
"""Get input data type
Args:
inputs_in: inputs wanted know the data type
Returns dtype of the input
"""
inputs = self._multiple_input(inputs_in)
dtype = None
for each in inputs:
if dtype is None:
dtype = each.dtype
elif dtype != each.dtype:
raise ValueError("Data Type mismatch: %d to %d" %
(dtype, each.dtype))
return dtype
def get_parameter(self, name):
"""Get parameter specifically
Args:
name: parameter's name
Returns target parameter
"""
param = self.main_program.global_block().var(name)
if not isinstance(param, Parameter):
raise ValueError("no Parameter name %s found" % name)
return param
def append_bias_op(self,
input_var,
dim_start=1,
dim_end=None,
bias_attr=None):
"""Append bias operator and return its output. If the user does not set bias_attr, append_bias_op will return input_var
Args:
input_var: the input variable. The len(input_var.shape) is
larger or equal than 2.
dim_start:
dim_end: the shape of the bias will be
bias_attr: the bias_attr of it
Return the Variable of after append bias op
"""
size = list(input_var.shape[dim_start:dim_end])
bias_attr = bias_attr
if not bias_attr:
return input_var
b = self.create_parameter(
attr=bias_attr, shape=size, dtype=input_var.dtype, is_bias=True)
tmp = self.create_variable_for_type_inference(dtype=input_var.dtype)
self.append_op(
type='elementwise_add',
inputs={'X': [input_var],
'Y': [b]},
outputs={'Out': [tmp]},
attrs={'axis': dim_start})
return tmp
# TODO: this should not be called anymore after all activation func move to Layers
def append_activation(self,
input_var,
act=None,
use_cudnn=None,
use_mkl_dnn=None):
"""Append activation
Args:
input_var: the input variable. The len(input_var.shape) is
larger or equal than 2.
act: activation type
use_mkl_dnn: if use mkldnn
use_cudnn: if use cudnn
Return the Variable of after append activation
"""
act = act
if act is None:
return input_var
if isinstance(act, six.string_types):
act = {'type': act}
else:
raise TypeError(str(act) + " should be unicode or str")
if (use_cudnn is not None) and use_cudnn:
act['use_cudnn'] = use_cudnn
if (use_mkl_dnn is not None) and use_mkl_dnn:
act['use_mkldnn'] = use_mkl_dnn
act_type = act.pop('type')
tmp = input_var
# NOTE(dzhwinter): some activation support inplace compution.
# NOTE(minqiyang): currently, we don't support inplace in imperative mode
if not _in_imperative_mode() and core.IsInplace(act_type):
tmp = input_var
else:
tmp = self.create_variable_for_type_inference(dtype=input_var.dtype)
self.append_op(
type=act_type,
inputs={"X": [input_var]},
outputs={"Out": [tmp]},
attrs=act)
return tmp
def is_instance(self, param, cls):
"""Check if the input parameter is instance of input class
Args:
param: parameter to be check
cls: class of the parameter
Return result of the check (True or False)
"""
param = param
if not isinstance(param, cls):
raise TypeError("The input {0} parameter of method {1} must be {2}",
param, self.layer_type, cls.__name__)

@ -19,8 +19,8 @@ import numpy as np
import collections
from .. import unique_name
from paddle.fluid import core
from .layer_object_helper import LayerObjectHelper
from paddle.fluid import framework
from paddle.fluid.imperative import base
__all__ = ['Layer', 'PyLayer']
@ -44,6 +44,8 @@ class Layer(core.Layer):
self._parameters = collections.OrderedDict()
self._sub_layers = collections.OrderedDict()
self._helper = LayerObjectHelper(self._full_name)
def full_name(self):
"""Full name for this layers.
@ -53,6 +55,51 @@ class Layer(core.Layer):
"""
return self._full_name
def create_parameter(self,
attr,
shape,
dtype,
is_bias=False,
default_initializer=None):
"""Create parameters for this layers.
Args:
attr: [ParamAttr] should be the parameter attribute for this parameter
shape: shape of the paramter
dtype: data type of this parameter
is_bias: if this is a bias parameter
default_initializer: set the default initializer for this parameter
Returns created parameter Variable.
"""
return self._helper.create_parameter(attr, shape, dtype, is_bias,
default_initializer)
# TODO: Add more parameter list when we need them
def create_variable(self,
name=None,
persistable=None,
dtype=None,
type=core.VarDesc.VarType.LOD_TENSOR):
"""Create Variable for this layers.
Args:
name: name of the variable
persistable: if set this variable persistable
dtype: data type of data in the variable
type: type of the variable
Returns created Variable.
"""
if name is not None:
var_name = ".".join([self._full_name, name])
else:
var_name = unique_name.generate(".".join(
[self._full_name, "_generated_var"]))
return self._helper.main_program.current_block().create_var(
name=var_name, persistable=persistable, dtype=dtype, type=type)
def parameters(self, include_sublayers=True):
"""Returns a list of Parameters from current and sub-layers.

@ -41,21 +41,12 @@ class Conv2D(layers.Layer):
bias_attr=None,
dtype=core.VarDesc.VarType.FP32):
assert param_attr is not False, "param_attr should not be False here."
super(Conv2D, self).__init__(name_scope, dtype=dtype)
# TODO(minqiyang): Move this to the top.
from ..layer_helper import LayerHelper
self._helper = LayerHelper(
self.full_name(),
param_attr=param_attr,
bias_attr=bias_attr,
dtype=dtype,
act=act)
super(Conv2D, self).__init__(name_scope)
self._groups = groups
self._stride = utils.convert_to_list(stride, 2, 'stride')
self._padding = utils.convert_to_list(padding, 2, 'padding')
self._dilation = utils.convert_to_list(dilation, 2, 'dilation')
self._act = act
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
self._use_cudnn = use_cudnn
@ -80,28 +71,28 @@ class Conv2D(layers.Layer):
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std, 0)
self._filter_param = self._helper.create_parameter(
attr=self._helper.param_attr,
self._filter_param = self.create_parameter(
attr=param_attr,
shape=filter_shape,
dtype=self._dtype,
default_initializer=_get_default_param_initializer())
if self._use_cudnn:
self._helper.create_variable(
self.create_variable(
name="kCUDNNFwdAlgoCache",
persistable=True,
type=core.VarDesc.VarType.RAW)
self._helper.create_variable(
self.create_variable(
name="kCUDNNBwdDataAlgoCache",
persistable=True,
type=core.VarDesc.VarType.RAW)
self._helper.create_variable(
self.create_variable(
name="kCUDNNBwdFilterAlgoCache",
persistable=True,
type=core.VarDesc.VarType.RAW)
self._bias_param = self._helper.create_parameter(
attr=self._helper.bias_attr,
self._bias_param = self.create_parameter(
attr=bias_attr,
shape=[num_filters],
dtype=self._dtype,
is_bias=True)
@ -137,7 +128,7 @@ class Conv2D(layers.Layer):
attrs={'axis': 1})
# Currently, we don't support inplace in imperative mode
return self._helper.append_activation(pre_act)
return self._helper.append_activation(pre_act, act=self._act)
class Pool2D(layers.Layer):
@ -167,9 +158,6 @@ class Pool2D(layers.Layer):
super(Pool2D, self).__init__(name_scope, dtype=dtype)
from ..layer_helper import LayerHelper
self._helper = LayerHelper(self.full_name(), dtype=dtype)
self._pool_type = pool_type
self._pool_size = utils.convert_to_list(pool_size, 2, 'pool_size')
self._pool_padding = utils.convert_to_list(pool_padding, 2,
@ -216,28 +204,25 @@ class FC(layers.Layer):
self._size = size
self._num_flatten_dims = num_flatten_dims
self._dtype = dtype
from ..layer_helper import LayerHelper
self._helper = LayerHelper(
self.full_name(),
param_attr=param_attr,
bias_attr=bias_attr,
act=act)
self._param_attr = param_attr
self._bias_attr = param_attr
self._act = act
def _build_once(self, input):
input_shape = input.shape
param_shape = [
reduce(lambda a, b: a * b, input_shape[self._num_flatten_dims:], 1)
] + [self._size]
self._w = self._helper.create_parameter(
attr=self._helper.param_attr,
self._w = self.create_parameter(
attr=self._param_attr,
shape=param_shape,
dtype=self._dtype,
is_bias=False)
if self._helper.bias_attr:
if self._param_attr:
size = list([self._size])
self._b = self._helper.create_parameter(
attr=self._helper.bias_attr,
self._b = self.create_parameter(
attr=self._param_attr,
shape=size,
dtype=self._dtype,
is_bias=True)
@ -275,7 +260,7 @@ class FC(layers.Layer):
else:
pre_activation = pre_bias
# Currently, we don't support inplace in imperative mode
return self._helper.append_activation(pre_activation)
return self._helper.append_activation(pre_activation, act=self._act)
class BatchNorm(layers.Layer):
@ -297,16 +282,12 @@ class BatchNorm(layers.Layer):
fuse_with_relu=False,
use_global_stats=False):
super(BatchNorm, self).__init__(name_scope)
self._param_attr = param_attr
self._param_attr = bias_attr
self._act = act
assert bias_attr is not False, "bias_attr should not be False in batch_norm."
from ..layer_helper import LayerHelper
self._helper = LayerHelper(
self.full_name(),
param_attr=param_attr,
bias_attr=bias_attr,
act=act)
if dtype == core.VarDesc.VarType.FP16:
self._dtype = core.VarDesc.VarType.FP32
else:
@ -315,23 +296,23 @@ class BatchNorm(layers.Layer):
param_shape = [num_channels]
# create parameter
self._scale = self._helper.create_parameter(
attr=self._helper.param_attr,
self._scale = self.create_parameter(
attr=self._param_attr,
shape=param_shape,
dtype=self._dtype,
default_initializer=Constant(1.0))
if use_global_stats and self._helper.param_attr.learning_rate == 0.:
if use_global_stats and self._param_attr.learning_rate == 0.:
self._scale._stop_gradient = True
self._bias = self._helper.create_parameter(
attr=self._helper.bias_attr,
self._bias = self.create_parameter(
attr=self._param_attr,
shape=param_shape,
dtype=self._dtype,
is_bias=True)
if use_global_stats and self._helper.bias_attr.learning_rate == 0.:
if use_global_stats and self._param_attr.learning_rate == 0.:
self._bias._stop_gradient = True
self._mean = self._helper.create_parameter(
self._mean = self.create_parameter(
attr=ParamAttr(
name=moving_mean_name,
initializer=Constant(0.0),
@ -341,7 +322,7 @@ class BatchNorm(layers.Layer):
dtype=self._dtype)
self._mean._stop_gradient = True
self._variance = self._helper.create_parameter(
self._variance = self.create_parameter(
attr=ParamAttr(
name=moving_variance_name,
initializer=Constant(1.0),
@ -401,7 +382,7 @@ class BatchNorm(layers.Layer):
})
# Currently, we don't support inplace in imperative mode
return self._helper.append_activation(batch_norm_out)
return self._helper.append_activation(batch_norm_out, self._act)
class Embedding(layers.Layer):
@ -466,9 +447,7 @@ class Embedding(layers.Layer):
if self._remote_prefetch:
assert self._is_sparse is True and self._is_distributed is False
from ..layer_helper import LayerHelper
self._helper = LayerHelper(self.full_name(), param_attr=param_attr)
self._w = self._helper.create_parameter(
self._w = self.create_parameter(
attr=self._param_attr,
shape=self._size,
dtype=self._dtype,

@ -19,7 +19,6 @@ import numpy as np
from .wrapped_decorator import signature_safe_contextmanager
from .core import VarDesc
from . import unique_name
from .imperative import base as imperative_base
__all__ = [
'Constant', 'Uniform', 'Normal', 'TruncatedNormal', 'Xavier', 'Bilinear',
@ -166,7 +165,7 @@ class ConstantInitializer(Initializer):
'force_cpu': self._force_cpu or force_init_on_cpu()
},
stop_gradient=True)
if not imperative_base.enabled():
if not framework._in_imperative_mode():
var.op = op
return op
@ -246,7 +245,7 @@ class UniformInitializer(Initializer):
attrs={"in_dtype": out_var.dtype,
"out_dtype": var.dtype})
if not imperative_base.enabled():
if not framework._in_imperative_mode():
var.op = op
return op
@ -325,7 +324,7 @@ class NormalInitializer(Initializer):
outputs={"Out": var},
attrs={"in_dtype": out_var.dtype,
"out_dtype": var.dtype})
if not imperative_base.enabled():
if not framework._in_imperative_mode():
var.op = op
return op
@ -404,7 +403,7 @@ class TruncatedNormalInitializer(Initializer):
outputs={"Out": var},
attrs={"in_dtype": out_var.dtype,
"out_dtype": var.dtype})
if not imperative_base.enabled():
if not framework._in_imperative_mode():
var.op = op
return op
@ -510,7 +509,7 @@ class XavierInitializer(Initializer):
"seed": self._seed
},
stop_gradient=True)
if not imperative_base.enabled():
if not framework._in_imperative_mode():
var.op = op
return op
@ -611,7 +610,7 @@ class MSRAInitializer(Initializer):
"seed": self._seed
},
stop_gradient=True)
if not imperative_base.enabled():
if not framework._in_imperative_mode():
var.op = op
return op
@ -710,7 +709,7 @@ class BilinearInitializer(Initializer):
'shape': list(shape),
value_name: values
})
if not imperative_base.enabled():
if not framework._in_imperative_mode():
var.op = op
return op
@ -769,7 +768,7 @@ class NumpyArrayInitializer(Initializer):
value_name: values
},
stop_gradient=True)
if not imperative_base.enabled():
if not framework._in_imperative_mode():
var.op = op
return op

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -379,7 +379,7 @@ class Optimizer(object):
self._dtype = loss.dtype
program = loss.block.program
optimize_ops = []
if imperative_base.enabled():
if framework._in_imperative_mode():
if parameter_list is not None:
parameters = parameter_list
else:

@ -16,27 +16,17 @@ import unittest
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.layer_helper import LayerHelper
class L1(fluid.imperative.Layer):
def __init__(self, prefix):
super(L1, self).__init__(prefix)
self._helper = LayerHelper(
self.full_name(),
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.1)))
self.w1 = self._helper.create_parameter(
attr=self._helper.param_attr,
shape=[2, 2],
dtype='float32',
is_bias=False)
self.w2 = self._helper.create_parameter(
attr=self._helper.param_attr,
shape=[2, 2],
dtype='float32',
is_bias=False)
self._param_attr = fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.1))
self.w1 = self.create_parameter(
attr=self._param_attr, shape=[2, 2], dtype='float32', is_bias=False)
self.w2 = self.create_parameter(
attr=self._param_attr, shape=[2, 2], dtype='float32', is_bias=False)
def forward(self):
return self.w1 + self.w2
@ -67,8 +57,8 @@ class TestBaseLayer(unittest.TestCase):
with fluid.imperative.guard():
l = L1('test_one_level')
ret = l()
self.assertEqual(l.w1.name, "test_one_level/L1_0_0.w_0")
self.assertEqual(l.w2.name, "test_one_level/L1_0_0.w_1")
self.assertEqual(l.w1.name, "test_one_level/L1_0.w_0")
self.assertEqual(l.w2.name, "test_one_level/L1_0.w_1")
self.assertTrue(np.allclose(ret._numpy(), 0.2 * np.ones([2, 2])))
def test_three_level(self):
@ -76,12 +66,12 @@ class TestBaseLayer(unittest.TestCase):
l = L3('test_three_level')
names = [p.name for p in l.parameters()]
ret = l()
self.assertEqual(names[0], "test_three_level/L3_0/L2_0/L1_0_0.w_0")
self.assertEqual(names[1], "test_three_level/L3_0/L2_0/L1_0_0.w_1")
self.assertEqual(names[2], "test_three_level/L3_0/L2_0/L1_1_0.w_0")
self.assertEqual(names[3], "test_three_level/L3_0/L2_0/L1_1_0.w_1")
self.assertEqual(names[4], "test_three_level/L3_0/L2_1/L1_0_0.w_0")
self.assertEqual(names[5], "test_three_level/L3_0/L2_1/L1_0_0.w_1")
self.assertEqual(names[0], "test_three_level/L3_0/L2_0/L1_0.w_0")
self.assertEqual(names[1], "test_three_level/L3_0/L2_0/L1_0.w_1")
self.assertEqual(names[2], "test_three_level/L3_0/L2_0/L1_1.w_0")
self.assertEqual(names[3], "test_three_level/L3_0/L2_0/L1_1.w_1")
self.assertEqual(names[4], "test_three_level/L3_0/L2_1/L1_0.w_0")
self.assertEqual(names[5], "test_three_level/L3_0/L2_1/L1_0.w_1")
self.assertTrue(np.allclose(ret._numpy(), 0.8 * np.ones([2, 2])))

@ -53,11 +53,15 @@ class MLP(fluid.imperative.Layer):
super(MLP, self).__init__(name_scope)
self._fc1 = FC(self.full_name(),
3,
fluid.ParamAttr(
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.1)),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.1)))
self._fc2 = FC(self.full_name(),
4,
fluid.ParamAttr(
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.1)),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.1)))
def forward(self, inputs):
@ -74,41 +78,37 @@ class SimpleRNNCell(fluid.imperative.Layer):
self.step_input_size = step_input_size
self.hidden_size = hidden_size
self.output_size = output_size
self._dype = core.VarDesc.VarType.FP32
from paddle.fluid.layer_helper import LayerHelper
self._helper = LayerHelper(
'SimpleRNNCell', act="tanh", param_attr=param_attr)
self._dtype = core.VarDesc.VarType.FP32
self.param_attr = param_attr
def _build_once(self, inputs, pre_hidden):
i2h_param_shape = [self.step_input_size, self.hidden_size]
h2h_param_shape = [self.hidden_size, self.hidden_size]
h2o_param_shape = [self.output_size, self.hidden_size]
self._i2h_w = self._helper.create_parameter(
attr=self._helper.param_attr,
self._i2h_w = self.create_parameter(
attr=self.param_attr,
shape=i2h_param_shape,
dtype=self._dtype,
is_bias=False)
self._h2h_w = self._helper.create_parameter(
attr=self._helper.param_attr,
self._h2h_w = self.create_parameter(
attr=self.param_attr,
shape=h2h_param_shape,
dtype=self._dtype,
is_bias=False)
self._h2o_w = self._helper.create_parameter(
attr=self._helper.param_attr,
self._h2o_w = self.create_parameter(
attr=self.param_attr,
shape=h2o_param_shape,
dtype=self._dtype,
is_bias=False)
def forward(self, input, pre_hidden):
tmp_i2h = self._helper.create_variable_for_type_inference(self._dtype)
tmp_h2h = self._helper.create_variable_for_type_inference(self._dtype)
hidden = self._helper.create_variable_for_type_inference(self._dype)
out = self._helper.create_variable_for_type_inference(self._dype)
softmax_out = self._helper.create_variable_for_type_inference(
self._dtype)
reduce_out = self._helper.create_variable_for_type_inference(
self._dtype)
tmp_i2h = self.create_variable(dtype=self._dtype)
tmp_h2h = self.create_variable(dtype=self._dtype)
hidden = self.create_variable(dtype=self._dtype)
out = self.create_variable(dtype=self._dtype)
softmax_out = self.create_variable(dtype=self._dtype)
reduce_out = self.create_variable(dtype=self._dtype)
self._helper.append_op(
type="mul",
inputs={"X": input,
@ -132,7 +132,7 @@ class SimpleRNNCell(fluid.imperative.Layer):
outputs={'Out': hidden},
attrs={'axis': -1,
'use_mkldnn': False})
hidden = self._helper.append_activation(hidden)
hidden = self._helper.append_activation(hidden, act='tanh')
self._helper.append_op(
type="mul",
@ -174,7 +174,7 @@ class SimpleRNN(fluid.imperative.Layer):
outs = list()
pre_hiddens = list()
init_hidden = fluid.layers.tensor.create_parameter(
init_hidden = self.create_parameter(
attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.1)),
shape=[1, 3],
@ -337,10 +337,10 @@ class TestImperative(unittest.TestCase):
self.assertTrue(np.allclose(dy_grad, static_grad))
params = mlp.parameters(True)
self.assertEqual("mlp/MLP_0/FC_0_0.w_0", params[0].name)
self.assertEqual("mlp/MLP_0/FC_0_0.b_0", params[1].name)
self.assertEqual("mlp/MLP_0/FC_1_0.w_0", params[2].name)
self.assertEqual("mlp/MLP_0/FC_1_0.b_0", params[3].name)
self.assertEqual("mlp/MLP_0/FC_0.w_0", params[0].name)
self.assertEqual("mlp/MLP_0/FC_0.b_0", params[1].name)
self.assertEqual("mlp/MLP_0/FC_1.w_0", params[2].name)
self.assertEqual("mlp/MLP_0/FC_1.b_0", params[3].name)
self.assertEqual(len(params), 4)
sublayers = mlp.sublayers(True)

@ -78,7 +78,7 @@ class SimpleImgConvPool(fluid.imperative.Layer):
class MNIST(fluid.imperative.Layer):
def __init__(self, name_scope, param_attr=None, bias_attr=None):
def __init__(self, name_scope):
super(MNIST, self).__init__(name_scope)
self._simple_img_conv_pool_1 = SimpleImgConvPool(

@ -41,19 +41,17 @@ class SimpleLSTMRNN(fluid.imperative.Layer):
self._dropout = dropout
self._input = None
self._num_steps = num_steps
from paddle.fluid.layer_helper import LayerHelper
self._helper = LayerHelper('SimpleLSTMRNN', act="tanh")
self.cell_array = []
self.hidden_array = []
def _build_once(self, input_embedding, init_hidden=None, init_cell=None):
self.weight_1_arr = []
self.weight_2_arr = []
self.bias_arr = []
self.hidden_array = []
self.cell_array = []
self.mask_array = []
for i in range(self._num_layers):
weight_1 = self._helper.create_parameter(
weight_1 = self.create_parameter(
attr=fluid.ParamAttr(
initializer=fluid.initializer.UniformInitializer(
low=-self._init_scale, high=self._init_scale)),
@ -62,7 +60,7 @@ class SimpleLSTMRNN(fluid.imperative.Layer):
default_initializer=fluid.initializer.UniformInitializer(
low=-self._init_scale, high=self._init_scale))
self.weight_1_arr.append(weight_1)
bias_1 = self._helper.create_parameter(
bias_1 = self.create_parameter(
attr=fluid.ParamAttr(
initializer=fluid.initializer.UniformInitializer(
low=-self._init_scale, high=self._init_scale)),
@ -71,6 +69,11 @@ class SimpleLSTMRNN(fluid.imperative.Layer):
default_initializer=fluid.initializer.Constant(0.0))
self.bias_arr.append(bias_1)
def forward(self, input_embedding, init_hidden=None, init_cell=None):
self.cell_array = []
self.hidden_array = []
for i in range(self._num_layers):
pre_hidden = fluid.layers.slice(
init_hidden, axes=[0], starts=[i], ends=[i + 1])
pre_cell = fluid.layers.slice(
@ -82,7 +85,6 @@ class SimpleLSTMRNN(fluid.imperative.Layer):
self.hidden_array.append(pre_hidden)
self.cell_array.append(pre_cell)
def forward(self, input_embedding, init_hidden=None, init_cell=None):
res = []
for index in range(self._num_steps):
self._input = fluid.layers.slice(
@ -145,8 +147,6 @@ class PtbModel(fluid.imperative.Layer):
self.num_layers = num_layers
self.num_steps = num_steps
self.dropout = dropout
from paddle.fluid.layer_helper import LayerHelper
self._helper = LayerHelper('PtbModel', act="tanh")
self.simple_lstm_rnn = SimpleLSTMRNN(
self.full_name(),
hidden_size,
@ -163,13 +163,13 @@ class PtbModel(fluid.imperative.Layer):
name='embedding_para',
initializer=fluid.initializer.UniformInitializer(
low=-init_scale, high=init_scale)))
self.softmax_weight = self._helper.create_parameter(
self.softmax_weight = self.create_parameter(
attr=fluid.ParamAttr(),
shape=[self.hidden_size, self.vocab_size],
dtype="float32",
default_initializer=fluid.initializer.UniformInitializer(
low=-self.init_scale, high=self.init_scale))
self.softmax_bias = self._helper.create_parameter(
self.softmax_bias = self.create_parameter(
attr=fluid.ParamAttr(),
shape=[self.vocab_size],
dtype="float32",
@ -180,7 +180,6 @@ class PtbModel(fluid.imperative.Layer):
pass
def forward(self, input, label, init_hidden, init_cell):
init_h = fluid.layers.reshape(
init_hidden, shape=[self.num_layers, -1, self.hidden_size])

Loading…
Cancel
Save