[CustomOp] Add more dispatch marco for users (#31058)
* add more dispatch marco * add more dispatch marco * add more tests * revert unneeded change * add timeout for test dispatch * add float and complex test * remove and marcorevert-31068-fix_conv3d_windows
parent
d5323dab41
commit
6beeafe797
@ -0,0 +1,138 @@
|
||||
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
#include "paddle/extension.h"
|
||||
|
||||
template <typename data_t>
|
||||
void assign_cpu_kernel(const data_t* x_data,
|
||||
data_t* out_data,
|
||||
int64_t x_numel) {
|
||||
for (int i = 0; i < x_numel; ++i) {
|
||||
out_data[i] = x_data[i];
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::vector<int64_t>> InferShape(std::vector<int64_t> x_shape) {
|
||||
return {x_shape};
|
||||
}
|
||||
|
||||
std::vector<paddle::DataType> InferDType(paddle::DataType x_dtype) {
|
||||
return {x_dtype};
|
||||
}
|
||||
|
||||
std::vector<paddle::Tensor> DispatchTestInterger(const paddle::Tensor& x) {
|
||||
auto out = paddle::Tensor(paddle::PlaceType::kCPU);
|
||||
out.reshape(x.shape());
|
||||
|
||||
PD_DISPATCH_INTEGRAL_TYPES(
|
||||
x.type(), "assign_cpu_kernel", ([&] {
|
||||
assign_cpu_kernel<data_t>(
|
||||
x.data<data_t>(), out.mutable_data<data_t>(), x.size());
|
||||
}));
|
||||
|
||||
return {out};
|
||||
}
|
||||
|
||||
PD_BUILD_OP("dispatch_test_integer")
|
||||
.Inputs({"X"})
|
||||
.Outputs({"Out"})
|
||||
.SetKernelFn(PD_KERNEL(DispatchTestInterger))
|
||||
.SetInferShapeFn(PD_INFER_SHAPE(InferShape))
|
||||
.SetInferDtypeFn(PD_INFER_DTYPE(InferDType));
|
||||
|
||||
std::vector<paddle::Tensor> DispatchTestComplex(const paddle::Tensor& x) {
|
||||
auto out = paddle::Tensor(paddle::PlaceType::kCPU);
|
||||
out.reshape(x.shape());
|
||||
|
||||
PD_DISPATCH_COMPLEX_TYPES(
|
||||
x.type(), "assign_cpu_kernel", ([&] {
|
||||
assign_cpu_kernel<data_t>(
|
||||
x.data<data_t>(), out.mutable_data<data_t>(), x.size());
|
||||
}));
|
||||
|
||||
return {out};
|
||||
}
|
||||
|
||||
PD_BUILD_OP("dispatch_test_complex")
|
||||
.Inputs({"X"})
|
||||
.Outputs({"Out"})
|
||||
.SetKernelFn(PD_KERNEL(DispatchTestComplex))
|
||||
.SetInferShapeFn(PD_INFER_SHAPE(InferShape))
|
||||
.SetInferDtypeFn(PD_INFER_DTYPE(InferDType));
|
||||
|
||||
std::vector<paddle::Tensor> DispatchTestFloatAndInteger(
|
||||
const paddle::Tensor& x) {
|
||||
auto out = paddle::Tensor(paddle::PlaceType::kCPU);
|
||||
out.reshape(x.shape());
|
||||
|
||||
PD_DISPATCH_FLOATING_AND_INTEGRAL_TYPES(
|
||||
x.type(), "assign_cpu_kernel", ([&] {
|
||||
assign_cpu_kernel<data_t>(
|
||||
x.data<data_t>(), out.mutable_data<data_t>(), x.size());
|
||||
}));
|
||||
|
||||
return {out};
|
||||
}
|
||||
|
||||
PD_BUILD_OP("dispatch_test_float_and_integer")
|
||||
.Inputs({"X"})
|
||||
.Outputs({"Out"})
|
||||
.SetKernelFn(PD_KERNEL(DispatchTestFloatAndInteger))
|
||||
.SetInferShapeFn(PD_INFER_SHAPE(InferShape))
|
||||
.SetInferDtypeFn(PD_INFER_DTYPE(InferDType));
|
||||
|
||||
std::vector<paddle::Tensor> DispatchTestFloatAndComplex(
|
||||
const paddle::Tensor& x) {
|
||||
auto out = paddle::Tensor(paddle::PlaceType::kCPU);
|
||||
out.reshape(x.shape());
|
||||
|
||||
PD_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
|
||||
x.type(), "assign_cpu_kernel", ([&] {
|
||||
assign_cpu_kernel<data_t>(
|
||||
x.data<data_t>(), out.mutable_data<data_t>(), x.size());
|
||||
}));
|
||||
|
||||
return {out};
|
||||
}
|
||||
|
||||
PD_BUILD_OP("dispatch_test_float_and_complex")
|
||||
.Inputs({"X"})
|
||||
.Outputs({"Out"})
|
||||
.SetKernelFn(PD_KERNEL(DispatchTestFloatAndComplex))
|
||||
.SetInferShapeFn(PD_INFER_SHAPE(InferShape))
|
||||
.SetInferDtypeFn(PD_INFER_DTYPE(InferDType));
|
||||
|
||||
std::vector<paddle::Tensor> DispatchTestFloatAndIntegerAndComplex(
|
||||
const paddle::Tensor& x) {
|
||||
auto out = paddle::Tensor(paddle::PlaceType::kCPU);
|
||||
out.reshape(x.shape());
|
||||
|
||||
PD_DISPATCH_FLOATING_AND_INTEGRAL_AND_COMPLEX_TYPES(
|
||||
x.type(), "assign_cpu_kernel", ([&] {
|
||||
assign_cpu_kernel<data_t>(
|
||||
x.data<data_t>(), out.mutable_data<data_t>(), x.size());
|
||||
}));
|
||||
|
||||
return {out};
|
||||
}
|
||||
|
||||
PD_BUILD_OP("dispatch_test_float_and_integer_and_complex")
|
||||
.Inputs({"X"})
|
||||
.Outputs({"Out"})
|
||||
.SetKernelFn(PD_KERNEL(DispatchTestFloatAndIntegerAndComplex))
|
||||
.SetInferShapeFn(PD_INFER_SHAPE(InferShape))
|
||||
.SetInferDtypeFn(PD_INFER_DTYPE(InferDType));
|
@ -0,0 +1,79 @@
|
||||
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import unittest
|
||||
import paddle
|
||||
import numpy as np
|
||||
from paddle.utils.cpp_extension import load
|
||||
from utils import paddle_includes, extra_compile_args
|
||||
|
||||
dispatch_op = load(
|
||||
name='dispatch_op',
|
||||
sources=['dispatch_test_op.cc'],
|
||||
extra_include_paths=paddle_includes, # add for Coverage CI
|
||||
extra_cflags=extra_compile_args) # add for Coverage CI
|
||||
|
||||
|
||||
class TestJitDispatch(unittest.TestCase):
|
||||
def setUp(self):
|
||||
paddle.set_device('cpu')
|
||||
|
||||
def run_dispatch_test(self, func, dtype):
|
||||
np_x = np.ones([2, 2]).astype(dtype)
|
||||
x = paddle.to_tensor(np_x)
|
||||
out = func(x)
|
||||
np_x = x.numpy()
|
||||
np_out = out.numpy()
|
||||
self.assertTrue(dtype in str(np_out.dtype))
|
||||
self.assertTrue(
|
||||
np.array_equal(np_x, np_out),
|
||||
"custom op x: {},\n custom op out: {}".format(np_x, np_out))
|
||||
|
||||
def test_dispatch_integer(self):
|
||||
dtypes = ["int32", "int64", "int8", "uint8", "int16"]
|
||||
for dtype in dtypes:
|
||||
self.run_dispatch_test(dispatch_op.dispatch_test_integer, dtype)
|
||||
|
||||
def test_dispatch_complex(self):
|
||||
dtypes = ["complex64", "complex128"]
|
||||
for dtype in dtypes:
|
||||
self.run_dispatch_test(dispatch_op.dispatch_test_complex, dtype)
|
||||
|
||||
def test_dispatch_float_and_integer(self):
|
||||
dtypes = [
|
||||
"float32", "float64", "int32", "int64", "int8", "uint8", "int16"
|
||||
]
|
||||
for dtype in dtypes:
|
||||
self.run_dispatch_test(dispatch_op.dispatch_test_float_and_integer,
|
||||
dtype)
|
||||
|
||||
def test_dispatch_float_and_complex(self):
|
||||
dtypes = ["float32", "float64", "complex64", "complex128"]
|
||||
for dtype in dtypes:
|
||||
self.run_dispatch_test(dispatch_op.dispatch_test_float_and_complex,
|
||||
dtype)
|
||||
|
||||
def test_dispatch_float_and_integer_and_complex(self):
|
||||
dtypes = [
|
||||
"float32", "float64", "int32", "int64", "int8", "uint8", "int16",
|
||||
"complex64", "complex128"
|
||||
]
|
||||
for dtype in dtypes:
|
||||
self.run_dispatch_test(
|
||||
dispatch_op.dispatch_test_float_and_integer_and_complex, dtype)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
Loading…
Reference in new issue