Normalized function parameter writing (#31588)

pull/1/head
Chen Weihang 4 years ago committed by GitHub
parent cac9635a67
commit 30a627aaf3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

File diff suppressed because it is too large Load Diff

@ -27,27 +27,15 @@ void assign_cpu_kernel(const data_t* x_data,
} }
} }
std::vector<paddle::Tensor> AttrTestForward( void CheckAllForwardAttrs(const bool& bool_attr,
const paddle::Tensor& x, const int& int_attr,
bool bool_attr, const float& float_attr,
int int_attr, const int64_t& int64_attr,
float float_attr, const std::string& str_attr,
int64_t int64_attr, const std::vector<int>& int_vec_attr,
std::string str_attr, const std::vector<float>& float_vec_attr,
std::vector<int> int_vec_attr, const std::vector<int64_t>& int64_vec_attr,
std::vector<float> float_vec_attr, const std::vector<std::string>& str_vec_attr) {
std::vector<int64_t> int64_vec_attr,
std::vector<std::string> str_vec_attr) {
auto out = paddle::Tensor(paddle::PlaceType::kCPU);
out.reshape(x.shape());
PD_DISPATCH_FLOATING_TYPES(
x.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>(
x.data<data_t>(), out.mutable_data<data_t>(), x.size());
}));
// Check attrs value
if (bool_attr != true) { if (bool_attr != true) {
throw std::runtime_error("bool_attr value error."); throw std::runtime_error("bool_attr value error.");
} }
@ -103,26 +91,11 @@ std::vector<paddle::Tensor> AttrTestForward(
} }
} }
} }
return {out};
} }
// The attrs of backward op must be the subset of attrs of forward op void CheckAllBackwardAttrs(const int& int_attr,
std::vector<paddle::Tensor> AttrTestBackward( const std::vector<float>& float_vec_attr,
const paddle::Tensor& grad_out, const std::vector<std::string>& str_vec_attr) {
int int_attr,
std::vector<float> float_vec_attr,
std::vector<std::string> str_vec_attr) {
auto grad_x = paddle::Tensor(paddle::PlaceType::kCPU);
grad_x.reshape(grad_out.shape());
PD_DISPATCH_FLOATING_TYPES(grad_out.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>(
grad_out.data<data_t>(),
grad_x.mutable_data<data_t>(),
grad_out.size());
}));
if (int_attr != 10) { if (int_attr != 10) {
throw std::runtime_error("int_attr value error."); throw std::runtime_error("int_attr value error.");
} }
@ -146,6 +119,114 @@ std::vector<paddle::Tensor> AttrTestBackward(
} }
} }
} }
}
std::vector<paddle::Tensor> AttrTestForward(
const paddle::Tensor& x,
bool bool_attr,
int int_attr,
float float_attr,
int64_t int64_attr,
std::string str_attr,
std::vector<int> int_vec_attr,
std::vector<float> float_vec_attr,
std::vector<int64_t> int64_vec_attr,
std::vector<std::string> str_vec_attr) {
auto out = paddle::Tensor(paddle::PlaceType::kCPU);
out.reshape(x.shape());
PD_DISPATCH_FLOATING_TYPES(
x.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>(
x.data<data_t>(), out.mutable_data<data_t>(), x.size());
}));
// Check attrs value
CheckAllForwardAttrs(bool_attr,
int_attr,
float_attr,
int64_attr,
str_attr,
int_vec_attr,
float_vec_attr,
int64_vec_attr,
str_vec_attr);
return {out};
}
// The attrs of backward op must be the subset of attrs of forward op
std::vector<paddle::Tensor> AttrTestBackward(
const paddle::Tensor& grad_out,
int int_attr,
std::vector<float> float_vec_attr,
std::vector<std::string> str_vec_attr) {
auto grad_x = paddle::Tensor(paddle::PlaceType::kCPU);
grad_x.reshape(grad_out.shape());
PD_DISPATCH_FLOATING_TYPES(grad_out.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>(
grad_out.data<data_t>(),
grad_x.mutable_data<data_t>(),
grad_out.size());
}));
CheckAllBackwardAttrs(int_attr, float_vec_attr, str_vec_attr);
return {grad_x};
}
std::vector<paddle::Tensor> ConstAttrTestForward(
const paddle::Tensor& x,
const bool& bool_attr,
const int& int_attr,
const float& float_attr,
const int64_t& int64_attr,
const std::string& str_attr,
const std::vector<int>& int_vec_attr,
const std::vector<float>& float_vec_attr,
const std::vector<int64_t>& int64_vec_attr,
const std::vector<std::string>& str_vec_attr) {
auto out = paddle::Tensor(paddle::PlaceType::kCPU);
out.reshape(x.shape());
PD_DISPATCH_FLOATING_TYPES(
x.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>(
x.data<data_t>(), out.mutable_data<data_t>(), x.size());
}));
// Check attrs value
CheckAllForwardAttrs(bool_attr,
int_attr,
float_attr,
int64_attr,
str_attr,
int_vec_attr,
float_vec_attr,
int64_vec_attr,
str_vec_attr);
return {out};
}
// The attrs of backward op must be the subset of attrs of forward op
std::vector<paddle::Tensor> ConstAttrTestBackward(
const paddle::Tensor& grad_out,
const int& int_attr,
const std::vector<float>& float_vec_attr,
const std::vector<std::string>& str_vec_attr) {
auto grad_x = paddle::Tensor(paddle::PlaceType::kCPU);
grad_x.reshape(grad_out.shape());
PD_DISPATCH_FLOATING_TYPES(grad_out.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>(
grad_out.data<data_t>(),
grad_x.mutable_data<data_t>(),
grad_out.size());
}));
CheckAllBackwardAttrs(int_attr, float_vec_attr, str_vec_attr);
return {grad_x}; return {grad_x};
} }
@ -171,3 +252,25 @@ PD_BUILD_GRAD_OP(attr_test)
"float_vec_attr: std::vector<float>", "float_vec_attr: std::vector<float>",
"str_vec_attr: std::vector<std::string>"}) "str_vec_attr: std::vector<std::string>"})
.SetKernelFn(PD_KERNEL(AttrTestBackward)); .SetKernelFn(PD_KERNEL(AttrTestBackward));
PD_BUILD_OP(const_attr_test)
.Inputs({"X"})
.Outputs({"Out"})
.Attrs({"bool_attr: bool",
"int_attr: int",
"float_attr: float",
"int64_attr: int64_t",
"str_attr: std::string",
"int_vec_attr: std::vector<int>",
"float_vec_attr: std::vector<float>",
"int64_vec_attr: std::vector<int64_t>",
"str_vec_attr: std::vector<std::string>"})
.SetKernelFn(PD_KERNEL(AttrTestForward));
PD_BUILD_GRAD_OP(const_attr_test)
.Inputs({paddle::Grad("Out")})
.Outputs({paddle::Grad("X")})
.Attrs({"int_attr: int",
"float_vec_attr: std::vector<float>",
"str_vec_attr: std::vector<std::string>"})
.SetKernelFn(PD_KERNEL(AttrTestBackward));

@ -122,13 +122,14 @@ std::vector<paddle::Tensor> ConcatBackwardDynamicAxis(
} }
std::vector<std::vector<int64_t>> ConcatInferShapeDynamicAxis( std::vector<std::vector<int64_t>> ConcatInferShapeDynamicAxis(
std::vector<std::vector<int64_t>> input_shapes, const std::vector<std::vector<int64_t>>& input_shapes,
std::vector<int64_t> axis_shape) { const std::vector<int64_t>& axis_shape) {
return {std::vector<int64_t>(input_shapes[0].size(), -1)}; return {std::vector<int64_t>(input_shapes[0].size(), -1)};
} }
std::vector<paddle::DataType> ConcatInferDtypeDynamicAxis( std::vector<paddle::DataType> ConcatInferDtypeDynamicAxis(
std::vector<paddle::DataType> input_dtypes, paddle::DataType axis_dtype) { const std::vector<paddle::DataType>& input_dtypes,
const paddle::DataType& axis_dtype) {
return {input_dtypes[0]}; return {input_dtypes[0]};
} }

@ -40,24 +40,38 @@ custom_attrs = load(
class TestJitCustomAttrs(unittest.TestCase): class TestJitCustomAttrs(unittest.TestCase):
def test_attr_value(self): def setUp(self):
paddle.set_device('cpu') paddle.set_device('cpu')
# prepare test value # prepare test value
bool_attr = True self.bool_attr = True
int_attr = 10 self.int_attr = 10
float_attr = 3.14 self.float_attr = 3.14
int64_attr = 10000000000 self.int64_attr = 10000000000
str_attr = "StrAttr" self.str_attr = "StrAttr"
int_vec_attr = [10, 10, 10] self.int_vec_attr = [10, 10, 10]
float_vec_attr = [3.14, 3.14, 3.14] self.float_vec_attr = [3.14, 3.14, 3.14]
int64_vec_attr = [10000000000, 10000000000, 10000000000] self.int64_vec_attr = [10000000000, 10000000000, 10000000000]
str_vec_attr = ["StrAttr", "StrAttr", "StrAttr"] self.str_vec_attr = ["StrAttr", "StrAttr", "StrAttr"]
def test_attr_value(self):
x = paddle.ones([2, 2], dtype='float32') x = paddle.ones([2, 2], dtype='float32')
x.stop_gradient = False x.stop_gradient = False
out = custom_attrs.attr_test( out = custom_attrs.attr_test(
x, bool_attr, int_attr, float_attr, int64_attr, str_attr, x, self.bool_attr, self.int_attr, self.float_attr, self.int64_attr,
int_vec_attr, float_vec_attr, int64_vec_attr, str_vec_attr) self.str_attr, self.int_vec_attr, self.float_vec_attr,
self.int64_vec_attr, self.str_vec_attr)
out.stop_gradient = False
out.backward()
self.assertTrue(np.array_equal(x.numpy(), out.numpy()))
def test_const_attr_value(self):
x = paddle.ones([2, 2], dtype='float32')
x.stop_gradient = False
out = custom_attrs.const_attr_test(
x, self.bool_attr, self.int_attr, self.float_attr, self.int64_attr,
self.str_attr, self.int_vec_attr, self.float_vec_attr,
self.int64_vec_attr, self.str_vec_attr)
out.stop_gradient = False out.stop_gradient = False
out.backward() out.backward()

Loading…
Cancel
Save