add conj op for complex types (#29527)
* add conj op for complex types * add conj for complex types * add more test case * add conj_op test * modify conj api and impl * add complex type for fill_constant_op xpu * add setConstant for complex type * remove complex conj test file * user define grad for test_conj_op * add test case for static mode of conj api * modify conj doc * change input args name to x * remove useless codes * conj support real types * add conj test case for real numberrevert-31562-mean
parent
b593d588aa
commit
71063b8137
@ -0,0 +1,87 @@
|
||||
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle/fluid/operators/conj_op.h"
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
#ifdef PADDLE_WITH_MKLDNN
|
||||
#include "paddle/fluid/platform/mkldnn_helper.h"
|
||||
#endif
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
class ConjOp : public framework::OperatorWithKernel {
|
||||
public:
|
||||
using framework::OperatorWithKernel::OperatorWithKernel;
|
||||
|
||||
void InferShape(framework::InferShapeContext *ctx) const override {
|
||||
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "conj");
|
||||
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "conj");
|
||||
|
||||
auto in_dims = ctx->GetInputDim("X");
|
||||
|
||||
ctx->SetOutputDim("Out", in_dims);
|
||||
ctx->ShareLoD("X", /*->*/ "Out");
|
||||
}
|
||||
};
|
||||
|
||||
class ConjOpMaker : public framework::OpProtoAndCheckerMaker {
|
||||
public:
|
||||
void Make() override {
|
||||
AddInput("X", "(Tensor), The input tensor of conj op.");
|
||||
AddOutput("Out", "(Tensor), The output tensor of conj op.");
|
||||
AddComment(R"DOC(
|
||||
Conj Operator.
|
||||
|
||||
This operator is used to perform elementwise conjugate for input $X$.
|
||||
|
||||
)DOC");
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class ConjGradMaker : public framework::SingleGradOpMaker<T> {
|
||||
public:
|
||||
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
|
||||
|
||||
void Apply(GradOpPtr<T> retv) const override {
|
||||
retv->SetType("conj");
|
||||
retv->SetInput("X", this->OutputGrad("Out"));
|
||||
retv->SetAttrMap(this->Attrs());
|
||||
retv->SetOutput("Out", this->InputGrad("X"));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
||||
|
||||
namespace ops = paddle::operators;
|
||||
|
||||
REGISTER_OPERATOR(conj, ops::ConjOp, ops::ConjOpMaker,
|
||||
ops::ConjGradMaker<paddle::framework::OpDesc>,
|
||||
ops::ConjGradMaker<paddle::imperative::OpBase>);
|
||||
|
||||
REGISTER_OP_CPU_KERNEL(
|
||||
conj, ops::ConjKernel<paddle::platform::CPUDeviceContext,
|
||||
paddle::platform::complex64>,
|
||||
ops::ConjKernel<paddle::platform::CPUDeviceContext,
|
||||
paddle::platform::complex128>,
|
||||
ops::ConjKernel<paddle::platform::CPUDeviceContext, float>,
|
||||
ops::ConjKernel<paddle::platform::CPUDeviceContext, double>,
|
||||
ops::ConjKernel<paddle::platform::CPUDeviceContext, int>,
|
||||
ops::ConjKernel<paddle::platform::CPUDeviceContext, int64_t>);
|
@ -0,0 +1,28 @@
|
||||
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle/fluid/operators/conj_op.h"
|
||||
#include "paddle/fluid/platform/complex128.h"
|
||||
#include "paddle/fluid/platform/complex64.h"
|
||||
|
||||
namespace ops = paddle::operators;
|
||||
REGISTER_OP_CUDA_KERNEL(
|
||||
conj, ops::ConjKernel<paddle::platform::CUDADeviceContext,
|
||||
paddle::platform::complex64>,
|
||||
ops::ConjKernel<paddle::platform::CUDADeviceContext,
|
||||
paddle::platform::complex128>,
|
||||
ops::ConjKernel<paddle::platform::CUDADeviceContext, float>,
|
||||
ops::ConjKernel<paddle::platform::CUDADeviceContext, double>,
|
||||
ops::ConjKernel<paddle::platform::CUDADeviceContext, int>,
|
||||
ops::ConjKernel<paddle::platform::CUDADeviceContext, int64_t>);
|
@ -0,0 +1,85 @@
|
||||
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "paddle/fluid/framework/eigen.h"
|
||||
#include "paddle/fluid/framework/op_registry.h"
|
||||
#include "paddle/fluid/framework/operator.h"
|
||||
#include "paddle/fluid/platform/for_range.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
using Tensor = framework::Tensor;
|
||||
|
||||
template <typename T>
|
||||
using EnableComplex =
|
||||
typename std::enable_if<std::is_same<T, platform::complex64>::value ||
|
||||
std::is_same<T, platform::complex128>::value>::type;
|
||||
|
||||
template <typename T>
|
||||
using DisableComplex = typename std::enable_if<
|
||||
!std::is_same<T, platform::complex64>::value &&
|
||||
!std::is_same<T, platform::complex128>::value>::type;
|
||||
|
||||
template <typename T, typename Enable = void>
|
||||
struct ConjFunctor;
|
||||
|
||||
template <typename T>
|
||||
struct ConjFunctor<T, EnableComplex<T>> {
|
||||
ConjFunctor(const T* input, int64_t numel, T* output)
|
||||
: input_(input), numel_(numel), output_(output) {}
|
||||
|
||||
HOSTDEVICE void operator()(size_t idx) const {
|
||||
output_[idx] = T(input_[idx].real, -input_[idx].imag);
|
||||
}
|
||||
const T* input_;
|
||||
int64_t numel_;
|
||||
T* output_;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct ConjFunctor<T, DisableComplex<T>> {
|
||||
ConjFunctor(const T* input, int64_t numel, T* output)
|
||||
: input_(input), numel_(numel), output_(output) {}
|
||||
|
||||
HOSTDEVICE void operator()(size_t idx) const { output_[idx] = input_[idx]; }
|
||||
const T* input_;
|
||||
int64_t numel_;
|
||||
T* output_;
|
||||
};
|
||||
|
||||
template <typename DeviceContext, typename T>
|
||||
class ConjKernel : public framework::OpKernel<T> {
|
||||
public:
|
||||
void Compute(const framework::ExecutionContext& context) const override {
|
||||
const Tensor* x = context.Input<Tensor>("X");
|
||||
Tensor* out = context.Output<Tensor>("Out");
|
||||
|
||||
auto numel = x->numel();
|
||||
auto* x_data = x->data<T>();
|
||||
auto* out_data = out->mutable_data<T>(context.GetPlace(),
|
||||
size_t(x->numel() * sizeof(T)));
|
||||
|
||||
auto& dev_ctx = context.template device_context<DeviceContext>();
|
||||
platform::ForRange<DeviceContext> for_range(dev_ctx, numel);
|
||||
ConjFunctor<T> functor(x_data, numel, out_data);
|
||||
for_range(functor);
|
||||
}
|
||||
};
|
||||
|
||||
DECLARE_INPLACE_OP_INFERER(ConjOpInplaceInferer, {"X", "Out"});
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
@ -0,0 +1,126 @@
|
||||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import unittest
|
||||
import numpy as np
|
||||
import paddle
|
||||
import paddle.fluid.core as core
|
||||
import sys
|
||||
sys.path.append("..")
|
||||
from op_test import OpTest
|
||||
from paddle.fluid import Program, program_guard
|
||||
import paddle.fluid.dygraph as dg
|
||||
import paddle.static as static
|
||||
from numpy.random import random as rand
|
||||
|
||||
paddle.enable_static()
|
||||
|
||||
|
||||
class TestConjOp(OpTest):
|
||||
def setUp(self):
|
||||
self.op_type = "conj"
|
||||
self.init_dtype_type()
|
||||
self.init_input_output()
|
||||
self.init_grad_input_output()
|
||||
|
||||
def init_dtype_type(self):
|
||||
self.dtype = np.complex64
|
||||
|
||||
def init_input_output(self):
|
||||
x = (np.random.random((12, 14)) + 1j * np.random.random(
|
||||
(12, 14))).astype(self.dtype)
|
||||
out = np.conj(x)
|
||||
|
||||
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
|
||||
self.outputs = {'Out': out}
|
||||
|
||||
def init_grad_input_output(self):
|
||||
self.grad_out = (np.ones((12, 14)) + 1j * np.ones(
|
||||
(12, 14))).astype(self.dtype)
|
||||
self.grad_in = np.conj(self.grad_out)
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output()
|
||||
|
||||
def test_check_grad_normal(self):
|
||||
self.check_grad(
|
||||
['X'],
|
||||
'Out',
|
||||
user_defined_grads=[self.grad_in],
|
||||
user_defined_grad_outputs=[self.grad_out])
|
||||
|
||||
|
||||
class TestComplexConjOp(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self._dtypes = ["float32", "float64"]
|
||||
self._places = [paddle.CPUPlace()]
|
||||
if paddle.is_compiled_with_cuda():
|
||||
self._places.append(paddle.CUDAPlace(0))
|
||||
|
||||
def test_conj_api(self):
|
||||
for dtype in self._dtypes:
|
||||
input = rand([2, 20, 2, 3]).astype(dtype) + 1j * rand(
|
||||
[2, 20, 2, 3]).astype(dtype)
|
||||
for place in self._places:
|
||||
with dg.guard(place):
|
||||
var_x = paddle.to_tensor(input)
|
||||
result = paddle.conj(var_x).numpy()
|
||||
target = np.conj(input)
|
||||
self.assertTrue(np.array_equal(result, target))
|
||||
|
||||
def test_conj_operator(self):
|
||||
for dtype in self._dtypes:
|
||||
input = rand([2, 20, 2, 3]).astype(dtype) + 1j * rand(
|
||||
[2, 20, 2, 3]).astype(dtype)
|
||||
for place in self._places:
|
||||
with dg.guard(place):
|
||||
var_x = paddle.to_tensor(input)
|
||||
result = var_x.conj().numpy()
|
||||
target = np.conj(input)
|
||||
self.assertTrue(np.array_equal(result, target))
|
||||
|
||||
def test_conj_static_mode(self):
|
||||
def init_input_output(dtype):
|
||||
input = rand([2, 20, 2, 3]).astype(dtype) + 1j * rand(
|
||||
[2, 20, 2, 3]).astype(dtype)
|
||||
return {'x': input}, np.conj(input)
|
||||
|
||||
for dtype in self._dtypes:
|
||||
input_dict, np_res = init_input_output(dtype)
|
||||
for place in self._places:
|
||||
with static.program_guard(static.Program()):
|
||||
x_dtype = np.complex64 if dtype == "float32" else np.complex128
|
||||
x = static.data(
|
||||
name="x", shape=[2, 20, 2, 3], dtype=x_dtype)
|
||||
out = paddle.conj(x)
|
||||
|
||||
exe = static.Executor(place)
|
||||
out_value = exe.run(feed=input_dict, fetch_list=[out.name])
|
||||
self.assertTrue(np.array_equal(np_res, out_value[0]))
|
||||
|
||||
def test_conj_api_real_number(self):
|
||||
for dtype in self._dtypes:
|
||||
input = rand([2, 20, 2, 3]).astype(dtype)
|
||||
for place in self._places:
|
||||
with dg.guard(place):
|
||||
var_x = paddle.to_tensor(input)
|
||||
result = paddle.conj(var_x).numpy()
|
||||
target = np.conj(input)
|
||||
self.assertTrue(np.array_equal(result, target))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
Loading…
Reference in new issue