add broadcast feature for elementwise logical op

add broadcast feature for elementwise logical op
revert-26856-strategy_example2
Jack Zhou 5 years ago committed by GitHub
parent 63eef7632e
commit c282db3a93
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -13,7 +13,9 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/controlflow/logical_op.h" #include "paddle/fluid/operators/controlflow/logical_op.h"
#include <algorithm>
#include <string> #include <string>
#include <vector>
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
namespace paddle { namespace paddle {
@ -97,19 +99,19 @@ class BinaryLogicalOp : public LogicalOp {
OP_INOUT_CHECK(context->HasInput("Y"), "Input", "Y", comment.type); OP_INOUT_CHECK(context->HasInput("Y"), "Input", "Y", comment.type);
auto dim_x = context->GetInputDim("X"); auto dim_x = context->GetInputDim("X");
auto dim_y = context->GetInputDim("Y"); auto dim_y = context->GetInputDim("Y");
if (dim_x == dim_y) {
int product_x = framework::product(dim_x); context->SetOutputDim("Out", dim_x);
int product_y = framework::product(dim_y); } else {
bool check = context->IsRuntime() || (product_x >= 0 && product_y >= 0); int max_dim = std::max(dim_x.size(), dim_y.size());
if (check) { int axis = std::abs(dim_x.size() - dim_y.size());
PADDLE_ENFORCE_EQ(product_x, product_y, std::vector<int> x_dims_array(max_dim);
platform::errors::InvalidArgument( std::vector<int> y_dims_array(max_dim);
"The number of elements in X and Y should be same, " std::vector<int> out_dims_array(max_dim);
"but received %d != %d", GetBroadcastDimsArrays(dim_x, dim_y, x_dims_array.data(),
product_x, product_y)); y_dims_array.data(), out_dims_array.data(),
max_dim, axis);
context->SetOutputDim("Out", framework::make_ddim(out_dims_array));
} }
context->SetOutputDim("Out", context->GetInputDim("X"));
context->ShareLoD("X", "Out"); context->ShareLoD("X", "Out");
} }
}; };

@ -16,6 +16,7 @@ limitations under the License. */
#include <math.h> #include <math.h>
#include <type_traits> #include <type_traits>
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
#include "paddle/fluid/platform/transform.h" #include "paddle/fluid/platform/transform.h"
namespace paddle { namespace paddle {
@ -57,10 +58,8 @@ class BinaryLogicalOpKernel
auto* y = context.Input<framework::Tensor>("Y"); auto* y = context.Input<framework::Tensor>("Y");
auto* out = context.Output<framework::Tensor>("Out"); auto* out = context.Output<framework::Tensor>("Out");
Functor binary_func; Functor binary_func;
platform::Transform<DeviceContext> trans; ElementwiseComputeEx<Functor, DeviceContext, T, bool>(context, x, y, -1,
trans(context.template device_context<DeviceContext>(), x->data<T>(), binary_func, out);
x->data<T>() + x->numel(), y->data<T>(),
out->mutable_data<bool>(context.GetPlace()), binary_func);
} }
}; };

@ -12086,6 +12086,13 @@ Examples:
def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
if in_dygraph_mode():
op = getattr(core.ops, op_name)
if binary_op:
return op(x, y)
else:
return op(x)
check_variable_and_dtype(x, "x", ["bool"], op_name) check_variable_and_dtype(x, "x", ["bool"], op_name)
if y is not None: if y is not None:
check_variable_and_dtype(y, "y", ["bool"], op_name) check_variable_and_dtype(y, "y", ["bool"], op_name)
@ -12110,28 +12117,27 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
return out return out
@templatedoc()
def logical_and(x, y, out=None, name=None): def logical_and(x, y, out=None, name=None):
""" """
:alias_main: paddle.logical_and
:alias: paddle.logical_and, paddle.tensor.logical_and, paddle.tensor.logic.logical_and
:old_api: paddle.fluid.layers.logical_and
``logical_and`` operator computes element-wise logical AND on ``x`` and ``y``, and returns ``out``. ``x``, ``y`` and ``out`` are N-dim boolean ``Variable``. ``logical_and`` operator computes element-wise logical AND on ``x`` and ``y``, and returns ``out``. ``x``, ``y`` and ``out`` are N-dim boolean ``Tensor``.
Each element of ``out`` is calculated by Each element of ``out`` is calculated by
.. math:: .. math::
out = x \&\& y out = x \&\& y
.. note::
``paddle.logical_and`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`.
Args: Args:
x(${x_type}): ${x_comment}. x (Tensor): the input tensor, it's data type should be bool.
y(${y_type}): ${y_comment}. y (Tensor): the input tensor, it's data type should be bool.
out(Variable): The ``Variable`` that specifies the output of the operator, which can be any ``Variable`` that has been created in the program. The default value is None, and a new ``Variable`` will be created to save the output. out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output.
name(str|None): The default value is None. Normally there is no need for users to set this property. For more information, please refer to :ref:`api_guide_Name`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns: Returns:
${out_type}: ${out_comment} N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``.
Examples: Examples:
.. code-block:: python .. code-block:: python
@ -12140,43 +12146,38 @@ def logical_and(x, y, out=None, name=None):
import numpy as np import numpy as np
paddle.disable_static() paddle.disable_static()
x_data = np.array([True, True, False, False], dtype=np.bool) x_data = np.array([True], dtype=np.bool)
y_data = np.array([True, False, True, False], dtype=np.bool) y_data = np.array([True, False, True, False], dtype=np.bool)
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data) y = paddle.to_tensor(y_data)
res = paddle.logical_and(x, y) res = paddle.logical_and(x, y)
print(res.numpy()) # [True False False False] print(res.numpy()) # [True False True False]
""" """
if x.shape != y.shape:
raise TypeError(
'Input tensors must be same shape, but received x \'s shape: %s, y \'s shape: %s '
% (x.shape, y.shape))
return _logical_op( return _logical_op(
op_name="logical_and", x=x, y=y, name=name, out=out, binary_op=True) op_name="logical_and", x=x, y=y, name=name, out=out, binary_op=True)
@templatedoc()
def logical_or(x, y, out=None, name=None): def logical_or(x, y, out=None, name=None):
""" """
:alias_main: paddle.logical_or
:alias: paddle.logical_or, paddle.tensor.logical_or, paddle.tensor.logic.logical_or
:old_api: paddle.fluid.layers.logical_or
``logical_or`` operator computes element-wise logical OR on ``x`` and ``y``, and returns ``out``. ``x``, ``y`` and ``out`` are N-dim boolean ``Variable``. ``logical_or`` operator computes element-wise logical OR on ``x`` and ``y``, and returns ``out``. ``x``, ``y`` and ``out`` are N-dim boolean ``Tensor``.
Each element of ``out`` is calculated by Each element of ``out`` is calculated by
.. math:: .. math::
out = x || y out = x || y
.. note::
``paddle.logical_or`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`.
Args: Args:
x(${x_type}): ${x_comment}. x (Tensor): the input tensor, it's data type should be bool.
y(${y_type}): ${y_comment}. y (Tensor): the input tensor, it's data type should be bool.
out(Variable): The ``Variable`` that specifies the output of the operator, which can be any ``Variable`` that has been created in the program. The default value is None, and a new ``Variable`` will be created to save the output. out(Tensor): The ``Variable`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output.
name(str|None): The default value is None. Normally there is no need for users to set this property. For more information, please refer to :ref:`api_guide_Name`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns: Returns:
${out_type}: ${out_comment} N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``.
Examples: Examples:
.. code-block:: python .. code-block:: python
@ -12185,43 +12186,38 @@ def logical_or(x, y, out=None, name=None):
import numpy as np import numpy as np
paddle.disable_static() paddle.disable_static()
x_data = np.array([True, True, False, False], dtype=np.bool) x_data = np.array([True, False], dtype=np.bool).reshape(2, 1)
y_data = np.array([True, False, True, False], dtype=np.bool) y_data = np.array([True, False, True, False], dtype=np.bool).reshape(2, 2)
x = paddle.to_variable(x_data) x = paddle.to_tensor(x_data)
y = paddle.to_variable(y_data) y = paddle.to_tensor(y_data)
res = paddle.logical_or(x, y) res = paddle.logical_or(x, y)
print(res.numpy()) # [True True True False] print(res.numpy()) # [[ True True] [ True False]]
""" """
if x.shape != y.shape:
raise TypeError(
'Input tensors must be same shape, but received x \'s shape: %s, y \'s shape: %s '
% (x.shape, y.shape))
return _logical_op( return _logical_op(
op_name="logical_or", x=x, y=y, name=name, out=out, binary_op=True) op_name="logical_or", x=x, y=y, name=name, out=out, binary_op=True)
@templatedoc()
def logical_xor(x, y, out=None, name=None): def logical_xor(x, y, out=None, name=None):
""" """
:alias_main: paddle.logical_xor
:alias: paddle.logical_xor, paddle.tensor.logical_xor, paddle.tensor.logic.logical_xor
:old_api: paddle.fluid.layers.logical_xor
``logical_xor`` operator computes element-wise logical XOR on ``x`` and ``y``, and returns ``out``. ``x``, ``y`` and ``out`` are N-dim boolean ``Variable``. ``logical_xor`` operator computes element-wise logical XOR on ``x`` and ``y``, and returns ``out``. ``x``, ``y`` and ``out`` are N-dim boolean ``Tensor``.
Each element of ``out`` is calculated by Each element of ``out`` is calculated by
.. math:: .. math::
out = (x || y) \&\& !(x \&\& y) out = (x || y) \&\& !(x \&\& y)
.. note::
``paddle.logical_xor`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`.
Args: Args:
x(${x_type}): ${x_comment}. x (Tensor): the input tensor, it's data type should be bool.
y(${y_type}): ${y_comment}. y (Tensor): the input tensor, it's data type should be bool.
out(Variable): The ``Variable`` that specifies the output of the operator, which can be any ``Variable`` that has been created in the program. The default value is None, and a new ``Variable`` will be created to save the output. out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output.
name(str|None): The default value is None. Normally there is no need for users to set this property. For more information, please refer to :ref:`api_guide_Name`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns: Returns:
${out_type}: ${out_comment} N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``.
Examples: Examples:
.. code-block:: python .. code-block:: python
@ -12230,17 +12226,13 @@ def logical_xor(x, y, out=None, name=None):
import numpy as np import numpy as np
paddle.disable_static() paddle.disable_static()
x_data = np.array([True, True, False, False], dtype=np.bool) x_data = np.array([True, False], dtype=np.bool).reshape([2, 1])
y_data = np.array([True, False, True, False], dtype=np.bool) y_data = np.array([True, False, True, False], dtype=np.bool).reshape([2, 2])
x = paddle.to_variable(x_data) x = paddle.to_tensor(x_data)
y = paddle.to_variable(y_data) y = paddle.to_tensor(y_data)
res = paddle.logical_xor(x, y) res = paddle.logical_xor(x, y)
print(res.numpy()) # [False True True False] print(res.numpy()) # [[False, True], [ True, False]]
""" """
if x.shape != y.shape:
raise TypeError(
'Input tensors must be same shape, but received x \'s shape: %s, y \'s shape: %s '
% (x.shape, y.shape))
return _logical_op( return _logical_op(
op_name="logical_xor", x=x, y=y, name=name, out=out, binary_op=True) op_name="logical_xor", x=x, y=y, name=name, out=out, binary_op=True)

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save