[CustomOp] Support attributes as func input in custom op (#31128)
* add simple attr support and test * add int, float attr support * support other attribute * add custom attrs test in cmake * polish details * fix test failed * add backward test * update test flagstest_model_benchmark_ci
parent
ffbf71359a
commit
e8cdb49aa9
@ -0,0 +1,182 @@
|
||||
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <cstdlib>
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
#include "paddle/extension.h"
|
||||
|
||||
template <typename data_t>
|
||||
void assign_cpu_kernel(const data_t* x_data,
|
||||
data_t* out_data,
|
||||
int64_t x_numel) {
|
||||
for (int i = 0; i < x_numel; ++i) {
|
||||
out_data[i] = x_data[i];
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<paddle::Tensor> AttrTestForward(
|
||||
const paddle::Tensor& x,
|
||||
bool bool_attr,
|
||||
int int_attr,
|
||||
float float_attr,
|
||||
int64_t int64_attr,
|
||||
std::string str_attr,
|
||||
std::vector<int> int_vec_attr,
|
||||
std::vector<float> float_vec_attr,
|
||||
std::vector<int64_t> int64_vec_attr,
|
||||
std::vector<std::string> str_vec_attr) {
|
||||
auto out = paddle::Tensor(paddle::PlaceType::kCPU);
|
||||
out.reshape(x.shape());
|
||||
|
||||
PD_DISPATCH_FLOATING_TYPES(
|
||||
x.type(), "assign_cpu_kernel", ([&] {
|
||||
assign_cpu_kernel<data_t>(
|
||||
x.data<data_t>(), out.mutable_data<data_t>(), x.size());
|
||||
}));
|
||||
|
||||
// Check attrs value
|
||||
if (bool_attr != true) {
|
||||
throw std::runtime_error("bool_attr value error.");
|
||||
}
|
||||
if (int_attr != 10) {
|
||||
throw std::runtime_error("int_attr value error.");
|
||||
}
|
||||
if (std::abs(float_attr - 3.14) > 1e-6) {
|
||||
throw std::runtime_error("float_attr value error.");
|
||||
}
|
||||
if (int64_attr != 10000000000) {
|
||||
throw std::runtime_error("int64_attr value error.");
|
||||
}
|
||||
if (str_attr != "StrAttr") {
|
||||
throw std::runtime_error("str_attr value error.");
|
||||
}
|
||||
|
||||
if (int_vec_attr.size() != 3) {
|
||||
throw std::runtime_error("int_vec_attr size error.");
|
||||
} else {
|
||||
for (auto& value : int_vec_attr) {
|
||||
if (value != 10) {
|
||||
throw std::runtime_error("int_vec_attr value error.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (float_vec_attr.size() != 3) {
|
||||
throw std::runtime_error("float_vec_attr size error.");
|
||||
} else {
|
||||
for (auto& value : float_vec_attr) {
|
||||
if (std::abs(value - 3.14) > 1e-6) {
|
||||
throw std::runtime_error("float_vec_attr value error.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (int64_vec_attr.size() != 3) {
|
||||
throw std::runtime_error("int64_vec_attr size error.");
|
||||
} else {
|
||||
for (auto& value : int64_vec_attr) {
|
||||
if (value != 10000000000) {
|
||||
throw std::runtime_error("int64_vec_attr value error.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (str_vec_attr.size() != 3) {
|
||||
throw std::runtime_error("str_vec_attr size error.");
|
||||
} else {
|
||||
for (auto& value : str_vec_attr) {
|
||||
if (value != "StrAttr") {
|
||||
throw std::runtime_error("str_vec_attr value error.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {out};
|
||||
}
|
||||
|
||||
// The attrs of backward op must be the subset of attrs of forward op
|
||||
std::vector<paddle::Tensor> AttrTestBackward(
|
||||
const paddle::Tensor& grad_out,
|
||||
int int_attr,
|
||||
std::vector<float> float_vec_attr,
|
||||
std::vector<std::string> str_vec_attr) {
|
||||
auto grad_x = paddle::Tensor(paddle::PlaceType::kCPU);
|
||||
grad_x.reshape(grad_out.shape());
|
||||
|
||||
PD_DISPATCH_FLOATING_TYPES(grad_out.type(), "assign_cpu_kernel", ([&] {
|
||||
assign_cpu_kernel<data_t>(
|
||||
grad_out.data<data_t>(),
|
||||
grad_x.mutable_data<data_t>(),
|
||||
grad_out.size());
|
||||
}));
|
||||
|
||||
if (int_attr != 10) {
|
||||
throw std::runtime_error("int_attr value error.");
|
||||
}
|
||||
|
||||
if (float_vec_attr.size() != 3) {
|
||||
throw std::runtime_error("float_vec_attr size error.");
|
||||
} else {
|
||||
for (auto& value : float_vec_attr) {
|
||||
if (std::abs(value - 3.14) > 1e-6) {
|
||||
throw std::runtime_error("float_vec_attr value error.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (str_vec_attr.size() != 3) {
|
||||
throw std::runtime_error("str_vec_attr size error.");
|
||||
} else {
|
||||
for (auto& value : str_vec_attr) {
|
||||
if (value != "StrAttr") {
|
||||
throw std::runtime_error("str_vec_attr value error.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {grad_x};
|
||||
}
|
||||
|
||||
std::vector<std::vector<int64_t>> InferShape(std::vector<int64_t> x_shape) {
|
||||
return {x_shape};
|
||||
}
|
||||
|
||||
std::vector<paddle::DataType> InferDType(paddle::DataType x_dtype) {
|
||||
return {x_dtype};
|
||||
}
|
||||
|
||||
PD_BUILD_OP("attr_test")
|
||||
.Inputs({"X"})
|
||||
.Outputs({"Out"})
|
||||
.Attrs({"bool_attr: bool",
|
||||
"int_attr: int",
|
||||
"float_attr: float",
|
||||
"int64_attr: int64_t",
|
||||
"str_attr: std::string",
|
||||
"int_vec_attr: std::vector<int>",
|
||||
"float_vec_attr: std::vector<float>",
|
||||
"int64_vec_attr: std::vector<int64_t>",
|
||||
"str_vec_attr: std::vector<std::string>"})
|
||||
.SetKernelFn(PD_KERNEL(AttrTestForward))
|
||||
.SetInferShapeFn(PD_INFER_SHAPE(InferShape))
|
||||
.SetInferDtypeFn(PD_INFER_DTYPE(InferDType))
|
||||
.SetBackwardOp("attr_test_grad")
|
||||
.Inputs({paddle::Grad("Out")})
|
||||
.Outputs({paddle::Grad("X")})
|
||||
.Attrs({"int_attr: int",
|
||||
"float_vec_attr: std::vector<float>",
|
||||
"str_vec_attr: std::vector<std::string>"})
|
||||
.SetKernelFn(PD_KERNEL(AttrTestBackward));
|
@ -0,0 +1,67 @@
|
||||
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import unittest
|
||||
import numpy as np
|
||||
|
||||
import paddle
|
||||
from paddle.utils.cpp_extension import load, get_build_directory
|
||||
from utils import paddle_includes, extra_compile_args
|
||||
from paddle.utils.cpp_extension.extension_utils import run_cmd
|
||||
|
||||
# Because Windows don't use docker, the shared lib already exists in the
|
||||
# cache dir, it will not be compiled again unless the shared lib is removed.
|
||||
file = '{}\\custom_attrs_jit\\custom_attrs_jit.pyd'.format(get_build_directory(
|
||||
))
|
||||
if os.name == 'nt' and os.path.isfile(file):
|
||||
cmd = 'del {}'.format(file)
|
||||
run_cmd(cmd, True)
|
||||
|
||||
# Compile and load custom op Just-In-Time.
|
||||
custom_attrs = load(
|
||||
name='custom_attrs_jit',
|
||||
sources=['attr_test_op.cc'],
|
||||
extra_include_paths=paddle_includes, # add for Coverage CI
|
||||
extra_cxx_cflags=extra_compile_args, # add for Coverage CI
|
||||
verbose=True)
|
||||
|
||||
|
||||
class TestJitCustomAttrs(unittest.TestCase):
|
||||
def test_attr_value(self):
|
||||
paddle.set_device('cpu')
|
||||
# prepare test value
|
||||
bool_attr = True
|
||||
int_attr = 10
|
||||
float_attr = 3.14
|
||||
int64_attr = 10000000000
|
||||
str_attr = "StrAttr"
|
||||
int_vec_attr = [10, 10, 10]
|
||||
float_vec_attr = [3.14, 3.14, 3.14]
|
||||
int64_vec_attr = [10000000000, 10000000000, 10000000000]
|
||||
str_vec_attr = ["StrAttr", "StrAttr", "StrAttr"]
|
||||
|
||||
x = paddle.ones([2, 2], dtype='float32')
|
||||
x.stop_gradient = False
|
||||
out = custom_attrs.attr_test(
|
||||
x, bool_attr, int_attr, float_attr, int64_attr, str_attr,
|
||||
int_vec_attr, float_vec_attr, int64_vec_attr, str_vec_attr)
|
||||
out.stop_gradient = False
|
||||
out.backward()
|
||||
|
||||
self.assertTrue(np.array_equal(x.numpy(), out.numpy()))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
Loading…
Reference in new issue