|
|
|
@ -981,7 +981,12 @@ def cos_sim(X, Y):
|
|
|
|
|
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def dropout(x, dropout_prob, is_test=False, seed=None, name=None):
|
|
|
|
|
def dropout(x,
|
|
|
|
|
dropout_prob,
|
|
|
|
|
is_test=False,
|
|
|
|
|
seed=None,
|
|
|
|
|
name=None,
|
|
|
|
|
dropout_implementation="downgrade_in_infer"):
|
|
|
|
|
"""
|
|
|
|
|
Computes dropout.
|
|
|
|
|
|
|
|
|
@ -1001,6 +1006,21 @@ def dropout(x, dropout_prob, is_test=False, seed=None, name=None):
|
|
|
|
|
units will be dropped. DO NOT use a fixed seed in training.
|
|
|
|
|
name (str|None): A name for this layer(optional). If set None, the layer
|
|
|
|
|
will be named automatically.
|
|
|
|
|
dropout_implementation(string): ['downgrade_in_infer'(defauld)|'upscale_in_train']
|
|
|
|
|
1. downgrade_in_infer(default), downgrade the outcome at inference
|
|
|
|
|
train: out = input * mask
|
|
|
|
|
inference: out = input * dropout_prob
|
|
|
|
|
(make is a tensor same shape with input, value is 0 or 1
|
|
|
|
|
ratio of 0 is dropout_prob)
|
|
|
|
|
2. upscale_in_train, upscale the outcome at training time
|
|
|
|
|
train: out = input * mask / ( 1.0 - dropout_prob )
|
|
|
|
|
inference: out = input
|
|
|
|
|
(make is a tensor same shape with input, value is 0 or 1
|
|
|
|
|
ratio of 0 is dropout_prob)
|
|
|
|
|
dropout op can be removed from the program.
|
|
|
|
|
the program will be efficient
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
Variable: A tensor variable is the shape with `x`.
|
|
|
|
@ -1030,7 +1050,8 @@ def dropout(x, dropout_prob, is_test=False, seed=None, name=None):
|
|
|
|
|
'dropout_prob': dropout_prob,
|
|
|
|
|
'is_test': is_test,
|
|
|
|
|
'fix_seed': seed is not None,
|
|
|
|
|
'seed': seed if seed is not None else 0
|
|
|
|
|
'seed': seed if seed is not None else 0,
|
|
|
|
|
'dropout_implementation': dropout_implementation,
|
|
|
|
|
})
|
|
|
|
|
return out
|
|
|
|
|
|
|
|
|
@ -4845,7 +4866,7 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1):
|
|
|
|
|
return counter
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def reshape(x, shape, actual_shape=None, act=None, inplace=True, name=None):
|
|
|
|
|
def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
|
|
|
|
|
"""
|
|
|
|
|
Gives a new shape to the input Tensor without changing its data.
|
|
|
|
|
|
|
|
|
@ -4893,15 +4914,22 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=True, name=None):
|
|
|
|
|
:attr:`shape` specifying shape. That is to
|
|
|
|
|
say :attr:`actual_shape` has a higher priority
|
|
|
|
|
than :attr:`shape`.
|
|
|
|
|
act (str): The non-linear activation to be applied to output variable.
|
|
|
|
|
inplace(bool): If this flag is set true, the output
|
|
|
|
|
shares data with input without copying, otherwise
|
|
|
|
|
a new output tensor is created
|
|
|
|
|
whose data is copied from input x.
|
|
|
|
|
act (str): The non-linear activation to be applied to the reshaped tensor
|
|
|
|
|
variable.
|
|
|
|
|
inplace(bool): Must use :attr:`False` if :attr:`x` is used in multiple
|
|
|
|
|
operators. If this flag is set :attr:`True`, reuse input
|
|
|
|
|
:attr:`x` to reshape, which will change the shape of
|
|
|
|
|
tensor variable :attr:`x` and might cause errors when
|
|
|
|
|
:attr:`x` is used in multiple operators. If :attr:`False`,
|
|
|
|
|
preserve the shape :attr:`x` and create a new output tensor
|
|
|
|
|
variable whose data is copied from input x but reshaped.
|
|
|
|
|
name (str): The name of this layer. It is optional.
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
Variable: The output tensor.
|
|
|
|
|
Variable: The reshaped tensor variable if :attr:`act` is None. It is a \
|
|
|
|
|
new tensor variable if :attr:`inplace` is :attr:`False`, \
|
|
|
|
|
otherwise it is :attr:`x`. If :attr:`act` is not None, return \
|
|
|
|
|
the activated tensor variable.
|
|
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
|
TypeError: if actual_shape is neither Variable nor None.
|
|
|
|
@ -4912,7 +4940,7 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=True, name=None):
|
|
|
|
|
data = fluid.layers.data(
|
|
|
|
|
name='data', shape=[2, 4, 6], dtype='float32')
|
|
|
|
|
reshaped = fluid.layers.reshape(
|
|
|
|
|
x=data, shape=[-1, 0, 3, 2], act='tanh', inplace=True)
|
|
|
|
|
x=data, shape=[-1, 0, 3, 2], inplace=True)
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
if not (isinstance(shape, list) or isinstance(shape, tuple)):
|
|
|
|
@ -4939,7 +4967,8 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=True, name=None):
|
|
|
|
|
"except one unknown dimension.")
|
|
|
|
|
|
|
|
|
|
helper = LayerHelper("reshape2", **locals())
|
|
|
|
|
out = helper.create_variable_for_type_inference(dtype=x.dtype)
|
|
|
|
|
out = x if inplace else helper.create_variable_for_type_inference(
|
|
|
|
|
dtype=x.dtype)
|
|
|
|
|
x_shape = helper.create_variable_for_type_inference(dtype=x.dtype)
|
|
|
|
|
helper.append_op(
|
|
|
|
|
type="reshape2",
|
|
|
|
|