add log2 operator (#28319)

As the title
musl/fix_failed_unittests_in_musl
joejiong 5 years ago committed by GitHub
parent 0fc181dbd0
commit 08d2413142
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -301,6 +301,15 @@ Natural logarithm of x.
)DOC";
UNUSED constexpr char Log2Doc[] = R"DOC(
Log2 Activation Operator.
$$out = \log_2x$$
logarithm of x base to 2.
)DOC";
UNUSED constexpr char Log1pDoc[] = R"DOC(
Log Activation Operator.
@ -697,6 +706,7 @@ REGISTER_ACTIVATION_OP_MAKER(Cosh, CoshDoc);
REGISTER_ACTIVATION_OP_MAKER(Round, RoundDoc);
REGISTER_ACTIVATION_OP_MAKER(Reciprocal, ReciprocalDoc);
REGISTER_ACTIVATION_OP_MAKER(Log, LogDoc);
REGISTER_ACTIVATION_OP_MAKER(Log2, Log2Doc);
REGISTER_ACTIVATION_OP_MAKER(Log1p, Log1pDoc);
REGISTER_ACTIVATION_OP_MAKER(Square, SquareDoc);
REGISTER_ACTIVATION_OP_MAKER(Softsign, SoftsignDoc);

@ -820,6 +820,27 @@ struct LogGradFunctor : public BaseActivationFunctor<T> {
static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
};
// log2(x) = logarithm to the base 2 of the elements of x
template <typename T>
struct Log2Functor : public BaseActivationFunctor<T> {
template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const {
out.device(d) = x.log() / static_cast<T>(log(2));
}
};
// the gradient of log2(x) is 1/(x*ln(2))
template <typename T>
struct Log2GradFunctor : public BaseActivationFunctor<T> {
template <typename Device, typename X, typename Out, typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
dx.device(d) = dout * static_cast<T>(1) / (x * static_cast<T>(log(2)));
}
static constexpr ActBwdOpFwdDeps FwdDeps() { return kDepX; }
};
// log1p(x) = natural logarithm of x+1
template <typename T>
struct Log1pFunctor : public BaseActivationFunctor<T> {
@ -1908,6 +1929,7 @@ struct LogGradGradFunctor : public BaseActivationFunctor<T> {
__macro(round, Round, RoundFunctor, ZeroGradFunctor); \
__macro(reciprocal, Reciprocal, ReciprocalFunctor, ReciprocalGradFunctor); \
__macro(log1p, Log1p, Log1pFunctor, Log1pGradFunctor); \
__macro(log2, Log2, Log2Functor, Log2GradFunctor); \
__macro(brelu, BRelu, BReluFunctor, BReluGradFunctor); \
__macro(soft_relu, SoftRelu, SoftReluFunctor, SoftReluGradFunctor); \
__macro(stanh, STanh, STanhFunctor, STanhGradFunctor); \

@ -151,6 +151,7 @@ from .tensor.math import exp #DEFINE_ALIAS
from .tensor.math import floor #DEFINE_ALIAS
from .tensor.math import increment #DEFINE_ALIAS
from .tensor.math import log #DEFINE_ALIAS
from .tensor.math import log2 #DEFINE_ALIAS
from .tensor.math import multiplex #DEFINE_ALIAS
from .tensor.math import pow #DEFINE_ALIAS
from .tensor.math import reciprocal #DEFINE_ALIAS

File diff suppressed because it is too large Load Diff

@ -151,6 +151,7 @@ from .math import add #DEFINE_ALIAS
from .math import atan #DEFINE_ALIAS
from .math import logsumexp #DEFINE_ALIAS
from .math import inverse #DEFINE_ALIAS
from .math import log2 #DEFINE_ALIAS
from .math import log1p #DEFINE_ALIAS
from .math import erf #DEFINE_ALIAS
# from .math import addcmul #DEFINE_ALIAS

@ -79,6 +79,7 @@ __all__ = [
'floor',
'increment',
'log',
'log2',
'logsumexp',
'mul',
'multiplex',
@ -1315,6 +1316,54 @@ def log1p(x, name=None):
helper.append_op(type="log1p", inputs={"X": x}, outputs={"Out": out})
return out
def log2(x, name=None):
"""
Calculates the log to the base 2 of the given input tensor, element-wise.
.. math::
Out = \\log_2x
Args:
x (Tensor): Input tensor must be one of the following types: float32, float64.
name (str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor: The log to the base 2 of the input Tensor computed element-wise.
Examples:
.. code-block:: python
import paddle
# example 1: x is a float
x_i = paddle.to_tensor([[1.0], [2.0]])
res = paddle.log2(x_i) # [[0.], [1.0]]
# example 2: x is float32
x_i = paddle.full(shape=[1], fill_value=2, dtype='float32')
paddle.to_tensor(x_i)
res = paddle.log2(x_i)
print(res) # [1.0]
# example 3: x is float64
x_i = paddle.full(shape=[1], fill_value=2, dtype='float64')
paddle.to_tensor(x_i)
res = paddle.log2(x_i)
print(res) # [1.0]
"""
if in_dygraph_mode():
return core.ops.log2(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], "log2")
inputs = {'X': [x]}
helper = LayerHelper('log2', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type="log2", inputs={"X": x}, outputs={"Out": out})
return out
def addcmul(input, tensor1, tensor2, value=1.0, name=None):
"""

Loading…
Cancel
Save