|
|
@ -13,7 +13,7 @@ __all__ = [
|
|
|
|
'crf_decoding', 'cos_sim', 'cross_entropy', 'square_error_cost', 'accuracy',
|
|
|
|
'crf_decoding', 'cos_sim', 'cross_entropy', 'square_error_cost', 'accuracy',
|
|
|
|
'chunk_eval', 'sequence_conv', 'conv2d', 'sequence_pool', 'pool2d',
|
|
|
|
'chunk_eval', 'sequence_conv', 'conv2d', 'sequence_pool', 'pool2d',
|
|
|
|
'batch_norm', 'beam_search_decode', 'conv2d_transpose', 'sequence_expand',
|
|
|
|
'batch_norm', 'beam_search_decode', 'conv2d_transpose', 'sequence_expand',
|
|
|
|
'lstm_unit', 'reduce_sum'
|
|
|
|
'lstm_unit', 'reduce_sum', 'reduce_mean'
|
|
|
|
]
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -979,3 +979,47 @@ def reduce_sum(input, dim=None, keep_dim=False):
|
|
|
|
'reduce_all': True if dim == None else False
|
|
|
|
'reduce_all': True if dim == None else False
|
|
|
|
})
|
|
|
|
})
|
|
|
|
return out
|
|
|
|
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def reduce_mean(input, dim=None, keep_dim=False):
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
Computes the mean of tensor elements over the given dimension.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
input (Variable): The input variable which is a Tensor or LoDTensor.
|
|
|
|
|
|
|
|
dim (int|None): The dimension along which the mean is computed. If
|
|
|
|
|
|
|
|
:attr:`None`, compute the mean over all elements of :attr:`input`
|
|
|
|
|
|
|
|
and return a Tensor variable with a single element, otherwise
|
|
|
|
|
|
|
|
must be in the range :math:`[-rank(input), rank(input))`. If
|
|
|
|
|
|
|
|
:math:`dim < 0`, the dimension to reduce is :math:`rank + dim`.
|
|
|
|
|
|
|
|
keep_dim (bool): Whether to reserve the reduced dimension in the
|
|
|
|
|
|
|
|
output Tensor. The result tensor will have one fewer dimension
|
|
|
|
|
|
|
|
than the :attr:`input` unless :attr:`keep_dim` is true.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
Variable: The reduced Tensor variable.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Examples:
|
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# x is a Tensor variable with following elements:
|
|
|
|
|
|
|
|
# [[0.2, 0.3, 0.5, 0.9]
|
|
|
|
|
|
|
|
# [0.1, 0.2, 0.6, 0.7]]
|
|
|
|
|
|
|
|
# Each example is followed by the correspending output tensor.
|
|
|
|
|
|
|
|
fluid.layers.reduce_mean(x) # [0.4375]
|
|
|
|
|
|
|
|
fluid.layers.reduce_mean(x, dim=0) # [0.15, 0.25, 0.55, 0.8]
|
|
|
|
|
|
|
|
fluid.layers.reduce_mean(x, dim=-1) # [0.475, 0.4]
|
|
|
|
|
|
|
|
fluid.layers.reduce_mean(x, dim=1, keep_dim=True) # [[0.475], [0.4]]
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
helper = LayerHelper('reduce_mean', **locals())
|
|
|
|
|
|
|
|
out = helper.create_tmp_variable(dtype=helper.input_dtype())
|
|
|
|
|
|
|
|
helper.append_op(
|
|
|
|
|
|
|
|
type='reduce_mean',
|
|
|
|
|
|
|
|
inputs={'X': input},
|
|
|
|
|
|
|
|
outputs={'Out': out},
|
|
|
|
|
|
|
|
attrs={
|
|
|
|
|
|
|
|
'dim': dim if dim != None else 0,
|
|
|
|
|
|
|
|
'keep_dim': keep_dim,
|
|
|
|
|
|
|
|
'reduce_all': True if dim == None else False
|
|
|
|
|
|
|
|
})
|
|
|
|
|
|
|
|
return out
|
|
|
|