Add Assert Op (#24280)
1. To make ProgramTranslator to support `assert` grammar, this PR adds `assert` python API and C++ code. 2. Fix a bug: graph_pattern_detector.h #include <gtest/gtest_prod.h> but didn't declared dependency at CMakeLists, which can cause single build failure. 3. Refactoring `Formatter` in print_op to make it reusable and reuse the formatter to print in assert op.revert-24314-dev/fix_err_msg
parent
8c296dea75
commit
8a1a2af82e
@ -0,0 +1,108 @@
|
||||
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/fluid/framework/op_registry.h"
|
||||
#include "paddle/fluid/framework/var_type.h"
|
||||
#include "paddle/fluid/operators/controlflow/while_op_helper.h"
|
||||
#include "paddle/fluid/operators/tensor_formatter.h"
|
||||
|
||||
const char kCond[] = "Cond";
|
||||
const char kData[] = "Data";
|
||||
const char kSummarize[] = "summarize";
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
using framework::LoDTensor;
|
||||
|
||||
class AssertOp : public framework::OperatorBase {
|
||||
public:
|
||||
AssertOp(const std::string &type, const framework::VariableNameMap &inputs,
|
||||
const framework::VariableNameMap &outputs,
|
||||
const framework::AttributeMap &attrs)
|
||||
: OperatorBase(type, inputs, outputs, attrs) {}
|
||||
|
||||
private:
|
||||
void RunImpl(const framework::Scope &scope,
|
||||
const platform::Place &dev_place) const override {
|
||||
const framework::Variable *cond_var_ptr = scope.FindVar(Input(kCond));
|
||||
PADDLE_ENFORCE_NOT_NULL(cond_var_ptr,
|
||||
platform::errors::NotFound(
|
||||
"Input(Condition) of AssertOp is not found."));
|
||||
const LoDTensor &cond = cond_var_ptr->Get<LoDTensor>();
|
||||
PADDLE_ENFORCE_EQ(
|
||||
cond.dims(), paddle::framework::make_ddim({1}),
|
||||
platform::errors::InvalidArgument(
|
||||
"The numel of Input(Condition) of AssertOp must be 1. But now "
|
||||
"the Condition's shape is %s.",
|
||||
cond.dims().to_str()));
|
||||
|
||||
bool cond_data = GetCondData(cond);
|
||||
if (cond_data) {
|
||||
return;
|
||||
}
|
||||
|
||||
TensorFormatter formatter;
|
||||
formatter.SetSummarize(Attr<int64_t>(kSummarize));
|
||||
|
||||
const std::vector<std::string> &x_names = Inputs(kData);
|
||||
for (const std::string &name : x_names) {
|
||||
const framework::Variable *x_var_ptr = scope.FindVar(name);
|
||||
const framework::LoDTensor &x_tensor = x_var_ptr->Get<LoDTensor>();
|
||||
formatter.Print(x_tensor, name);
|
||||
}
|
||||
|
||||
PADDLE_THROW(platform::errors::InvalidArgument(
|
||||
"The condition variable '%s' of AssertOp must be "
|
||||
"true, but received false",
|
||||
Input(kCond)));
|
||||
}
|
||||
};
|
||||
|
||||
class AssertOpProtoMaker : public framework::OpProtoAndCheckerMaker {
|
||||
public:
|
||||
void Make() override {
|
||||
AddInput(
|
||||
kCond,
|
||||
"The boolean scalar condition tensor which is asserted to be true.");
|
||||
AddInput(kData,
|
||||
"The tensors to print when the assert condition is not true.")
|
||||
.AsDuplicable();
|
||||
AddAttr<int64_t>(
|
||||
kSummarize,
|
||||
"The number of entries of each tensor to print when the "
|
||||
"assert condition is not true. -1 means print all entries. If "
|
||||
"the number of entries of a tensor is less then "
|
||||
"summarize_num, this OP will print all entries of the tensor.")
|
||||
.SetDefault(-1);
|
||||
AddComment(
|
||||
R"DOC(Assert the input Condition Tensor is true and print Tensors if the Condition Tensor is false.)DOC");
|
||||
}
|
||||
};
|
||||
|
||||
class AssertOpInferShape : public framework::InferShapeBase {
|
||||
public:
|
||||
void operator()(framework::InferShapeContext *context) const override {
|
||||
OP_INOUT_CHECK(context->HasInputs(kCond), "Input", "Condition", "AssertOp");
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
||||
|
||||
namespace ops = paddle::operators;
|
||||
REGISTER_OPERATOR(
|
||||
assert, ops::AssertOp, ops::AssertOpProtoMaker, ops::AssertOpInferShape,
|
||||
paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
|
||||
paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>);
|
@ -0,0 +1,154 @@
|
||||
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
|
||||
#include "paddle/fluid/operators/tensor_formatter.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
void TensorFormatter::SetPrintTensorType(bool print_tensor_type) {
|
||||
print_tensor_type_ = print_tensor_type;
|
||||
}
|
||||
|
||||
void TensorFormatter::SetPrintTensorShape(bool print_tensor_shape) {
|
||||
print_tensor_shape_ = print_tensor_shape;
|
||||
}
|
||||
|
||||
void TensorFormatter::SetPrintTensorLod(bool print_tensor_lod) {
|
||||
print_tensor_lod_ = print_tensor_lod;
|
||||
}
|
||||
|
||||
void TensorFormatter::SetPrintTensorLayout(bool print_tensor_layout) {
|
||||
print_tensor_layout_ = print_tensor_layout;
|
||||
}
|
||||
|
||||
void TensorFormatter::SetSummarize(int64_t summarize) {
|
||||
summarize_ = summarize;
|
||||
}
|
||||
|
||||
void TensorFormatter::Print(const framework::LoDTensor& print_tensor,
|
||||
const std::string& tensor_name,
|
||||
const std::string& message) {
|
||||
static std::mutex mutex;
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
std::cout << Format(print_tensor, tensor_name, message);
|
||||
}
|
||||
|
||||
std::string TensorFormatter::Format(const framework::LoDTensor& print_tensor,
|
||||
const std::string& tensor_name,
|
||||
const std::string& message) {
|
||||
std::stringstream log_stream;
|
||||
if (!tensor_name.empty()) {
|
||||
log_stream << "Variable: " << tensor_name << std::endl;
|
||||
}
|
||||
|
||||
if (!message.empty()) {
|
||||
log_stream << " - message: " << message << std::endl;
|
||||
}
|
||||
|
||||
if (print_tensor_lod_) {
|
||||
log_stream << " - lod: {";
|
||||
const framework::LoD& lod = print_tensor.lod();
|
||||
for (auto level : lod) {
|
||||
log_stream << "{";
|
||||
bool is_first = true;
|
||||
for (auto i : level) {
|
||||
if (is_first) {
|
||||
log_stream << i;
|
||||
is_first = false;
|
||||
} else {
|
||||
log_stream << ", " << i;
|
||||
}
|
||||
}
|
||||
log_stream << "}";
|
||||
}
|
||||
log_stream << "}" << std::endl;
|
||||
}
|
||||
|
||||
log_stream << " - place: " << print_tensor.place() << std::endl;
|
||||
|
||||
if (print_tensor_shape_) {
|
||||
log_stream << " - shape: " << print_tensor.dims().to_str() << std::endl;
|
||||
}
|
||||
|
||||
if (print_tensor_layout_) {
|
||||
log_stream << " - layout: "
|
||||
<< framework::DataLayoutToString(print_tensor.layout())
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
std::type_index dtype = framework::ToTypeIndex(print_tensor.type());
|
||||
if (print_tensor_type_) {
|
||||
log_stream << " - dtype: " << platform::demangle(dtype.name())
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
if (framework::IsType<const float>(dtype)) {
|
||||
FormatData<float>(print_tensor, log_stream);
|
||||
} else if (framework::IsType<const double>(dtype)) {
|
||||
FormatData<double>(print_tensor, log_stream);
|
||||
} else if (framework::IsType<const int>(dtype)) {
|
||||
FormatData<int>(print_tensor, log_stream);
|
||||
} else if (framework::IsType<const int64_t>(dtype)) {
|
||||
FormatData<int64_t>(print_tensor, log_stream);
|
||||
} else if (framework::IsType<const bool>(dtype)) {
|
||||
FormatData<bool>(print_tensor, log_stream);
|
||||
} else {
|
||||
log_stream << " - data: unprintable type: " << dtype.name() << std::endl;
|
||||
}
|
||||
return log_stream.str();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void TensorFormatter::FormatData(const framework::LoDTensor& print_tensor,
|
||||
std::stringstream& log_stream) {
|
||||
int64_t print_size = summarize_ == -1
|
||||
? print_tensor.numel()
|
||||
: std::min(summarize_, print_tensor.numel());
|
||||
const T* data = nullptr;
|
||||
if (is_cpu_place(print_tensor.place())) {
|
||||
data = print_tensor.data<T>();
|
||||
} else {
|
||||
framework::LoDTensor cpu_tensor;
|
||||
platform::CPUPlace cpu_place;
|
||||
TensorCopy(print_tensor, cpu_place, &cpu_tensor);
|
||||
data = cpu_tensor.data<T>();
|
||||
}
|
||||
|
||||
log_stream << " - data: [";
|
||||
if (print_size > 0) {
|
||||
log_stream << data[0];
|
||||
for (int64_t i = 1; i < print_size; ++i) {
|
||||
log_stream << " " << data[i];
|
||||
}
|
||||
}
|
||||
log_stream << "]" << std::endl;
|
||||
}
|
||||
|
||||
template void TensorFormatter::FormatData<bool>(
|
||||
const framework::LoDTensor& print_tensor, std::stringstream& log_stream);
|
||||
template void TensorFormatter::FormatData<float>(
|
||||
const framework::LoDTensor& print_tensor, std::stringstream& log_stream);
|
||||
template void TensorFormatter::FormatData<double>(
|
||||
const framework::LoDTensor& print_tensor, std::stringstream& log_stream);
|
||||
template void TensorFormatter::FormatData<int>(
|
||||
const framework::LoDTensor& print_tensor, std::stringstream& log_stream);
|
||||
template void TensorFormatter::FormatData<int64_t>(
|
||||
const framework::LoDTensor& print_tensor, std::stringstream& log_stream);
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
@ -0,0 +1,55 @@
|
||||
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
#include <string>
|
||||
|
||||
#include "paddle/fluid/framework/data_layout.h"
|
||||
#include "paddle/fluid/framework/var_type.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
class TensorFormatter {
|
||||
public:
|
||||
TensorFormatter() {}
|
||||
|
||||
std::string Format(const framework::LoDTensor& print_tensor,
|
||||
const std::string& tensor_name = "",
|
||||
const std::string& message = "");
|
||||
|
||||
void Print(const framework::LoDTensor& print_tensor,
|
||||
const std::string& tensor_name = "",
|
||||
const std::string& message = "");
|
||||
|
||||
void SetPrintTensorType(bool print_tensor_type);
|
||||
void SetPrintTensorShape(bool print_tensor_shape);
|
||||
void SetPrintTensorLod(bool print_tensor_lod);
|
||||
void SetPrintTensorLayout(bool print_tensor_layout);
|
||||
void SetSummarize(int64_t summarize);
|
||||
|
||||
private:
|
||||
template <typename T>
|
||||
void FormatData(const framework::LoDTensor& print_tensor,
|
||||
std::stringstream& log_stream);
|
||||
|
||||
int64_t summarize_ = -1;
|
||||
bool print_tensor_type_ = true;
|
||||
bool print_tensor_shape_ = true;
|
||||
bool print_tensor_lod_ = true;
|
||||
bool print_tensor_layout_ = true;
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
@ -0,0 +1,90 @@
|
||||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import paddle.fluid as fluid
|
||||
import paddle.fluid.layers as layers
|
||||
import unittest
|
||||
|
||||
|
||||
class TestAssertOp(unittest.TestCase):
|
||||
def run_network(self, net_func):
|
||||
main_program = fluid.Program()
|
||||
startup_program = fluid.Program()
|
||||
with fluid.program_guard(main_program, startup_program):
|
||||
net_func()
|
||||
exe = fluid.Executor()
|
||||
exe.run(main_program)
|
||||
|
||||
def test_assert_true(self):
|
||||
def net_func():
|
||||
condition = layers.fill_constant(
|
||||
shape=[1], dtype='bool', value=True)
|
||||
layers.Assert(condition, [])
|
||||
|
||||
self.run_network(net_func)
|
||||
|
||||
def test_assert_false(self):
|
||||
def net_func():
|
||||
condition = layers.fill_constant(
|
||||
shape=[1], dtype='bool', value=False)
|
||||
layers.Assert(condition)
|
||||
|
||||
with self.assertRaises(fluid.core.EnforceNotMet):
|
||||
self.run_network(net_func)
|
||||
|
||||
def test_assert_cond_numel_error(self):
|
||||
def net_func():
|
||||
condition = layers.fill_constant(
|
||||
shape=[1, 2], dtype='bool', value=True)
|
||||
layers.Assert(condition, [])
|
||||
|
||||
with self.assertRaises(fluid.core.EnforceNotMet):
|
||||
self.run_network(net_func)
|
||||
|
||||
def test_assert_print_data(self):
|
||||
def net_func():
|
||||
zero = layers.fill_constant(shape=[1], dtype='int64', value=0)
|
||||
one = layers.fill_constant(shape=[1], dtype='int64', value=1)
|
||||
condition = layers.less_than(one, zero) # False
|
||||
layers.Assert(condition, [zero, one])
|
||||
|
||||
print("test_assert_print_data")
|
||||
with self.assertRaises(fluid.core.EnforceNotMet):
|
||||
self.run_network(net_func)
|
||||
|
||||
def test_assert_summary(self):
|
||||
def net_func():
|
||||
x = layers.fill_constant(shape=[10], dtype='float32', value=2.0)
|
||||
condition = layers.reduce_max(x) < 1.0
|
||||
layers.Assert(condition, (x, ), 5)
|
||||
|
||||
print("test_assert_summary")
|
||||
with self.assertRaises(fluid.core.EnforceNotMet):
|
||||
self.run_network(net_func)
|
||||
|
||||
def test_assert_summary_greater_than_size(self):
|
||||
def net_func():
|
||||
x = layers.fill_constant(shape=[2, 3], dtype='float32', value=2.0)
|
||||
condition = layers.reduce_max(x) < 1.0
|
||||
layers.Assert(condition, [x], 10, name="test")
|
||||
|
||||
print("test_assert_summary_greater_than_size")
|
||||
with self.assertRaises(fluid.core.EnforceNotMet):
|
||||
self.run_network(net_func)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
Loading…
Reference in new issue