More UT for LayerNormFuse pass (#30891)

* Additionally change to not throw error from inside pass.
revert-31068-fix_conv3d_windows
Adam Osewski 5 years ago committed by GitHub
parent a80fe67f84
commit 092a2b1413
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include <string>
#include <vector> #include <vector>
#include "paddle/fluid/framework/framework.pb.h" #include "paddle/fluid/framework/framework.pb.h"
@ -22,6 +21,7 @@
#include "paddle/fluid/framework/var_desc.h" #include "paddle/fluid/framework/var_desc.h"
#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/string/pretty_log.h" #include "paddle/fluid/string/pretty_log.h"
#include "paddle/fluid/string/printf.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
@ -30,34 +30,57 @@ namespace ir {
// cpplint complaints (wrong!) for not included <string> header in below line. // cpplint complaints (wrong!) for not included <string> header in below line.
using string::PrettyLogDetail; // NOLINT using string::PrettyLogDetail; // NOLINT
#define CHECK_TRUE(expr, err_msg) \
do { \
int e_ = (expr); \
if (!e_) { \
VLOG(4) << err_msg; \
return; \
} \
} while (0)
#define EXPECT_TRUE(expr, err_msg) \
do { \
int e_ = (expr); \
if (!e_) { \
VLOG(4) << err_msg; \
return false; \
} \
} while (0)
namespace { namespace {
void validateReduceOpAttrs(const Node* node, const std::string& name) {
bool validateReduceOpAttrs(const Node* node, const std::string& name) {
const auto* op = node->Op(); const auto* op = node->Op();
if (op->HasAttr("dim")) { if (op->HasAttr("dim")) {
auto dims = BOOST_GET_CONST(std::vector<int>, op->GetAttr("dim")); auto dims = BOOST_GET_CONST(std::vector<int>, op->GetAttr("dim"));
PADDLE_ENFORCE_EQ(dims.size(), 1, platform::errors::PreconditionNotMet( EXPECT_TRUE(
"The LayerNorm fusion ", name, dims.size() == 1,
" reduction must happen only over " ::paddle::string::Sprintf(
"single dimension.")); "The LayerNorm fusion %s reduction must happen only over single "
PADDLE_ENFORCE_EQ(dims.front(), -1, platform::errors::PreconditionNotMet( "dimension.",
"The LayerNorm fusion ", name, name));
" reduction must happen over last " EXPECT_TRUE(dims.front() == -1,
"dimension.")); ::paddle::string::Sprintf("The LayerNorm fusion %s reduction "
"must happen over last dimension.",
name));
} }
if (op->HasAttr("reduce_all")) { if (op->HasAttr("reduce_all")) {
PADDLE_ENFORCE(!BOOST_GET_CONST(bool, op->GetAttr("reduce_all")), EXPECT_TRUE(
platform::errors::PreconditionNotMet( !BOOST_GET_CONST(bool, op->GetAttr("reduce_all")),
"The LayerNorm fusion ", name, ::paddle::string::Sprintf(
" reduction must have " "The LayerNorm fusion %s"
"\'reduce_all\' attribute set to false.")); "reduction must have \'reduce_all\' attribute set to false.",
name));
} }
if (op->HasAttr("keep_dim")) { if (op->HasAttr("keep_dim")) {
PADDLE_ENFORCE(BOOST_GET_CONST(bool, op->GetAttr("keep_dim")), EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("keep_dim")),
platform::errors::PreconditionNotMet( ::paddle::string::Sprintf(
"The LayerNorm fusion ", name, "The LayerNorm fusion %s"
" reduction must have " " reduction must have \'keep_dim\' attribute set to true.",
"\'keep_dim\' attribute set to true.")); name));
} }
return true;
} }
void setIntermediateOut(OpDesc* desc, const std::string& out_name, void setIntermediateOut(OpDesc* desc, const std::string& out_name,
@ -129,48 +152,46 @@ void LayerNormFusePass::ApplyImpl(Graph* graph) const {
auto* eps_tensor = scope->FindVar(eps->Name())->GetMutable<LoDTensor>(); auto* eps_tensor = scope->FindVar(eps->Name())->GetMutable<LoDTensor>();
// ------------------ subgraph node's validation --------------------------- // ------------------ subgraph node's validation ---------------------------
PADDLE_ENFORCE_EQ( CHECK_TRUE(
eps_tensor->numel(), 1, eps_tensor->numel() == 1,
platform::errors::InvalidArgument( ::paddle::string::Sprintf(
"The LayerNorm divisor " "The LayerNorm divisor epsilon value must be one-element tensor, "
"epsilon value must be one-element tensor, but has %s " "but has %s elements.",
"elements.",
eps_tensor->numel())); eps_tensor->numel()));
PADDLE_ENFORCE_EQ(eps_tensor->type(), proto::VarType::FP32, CHECK_TRUE(
platform::errors::InvalidArgument( eps_tensor->type() == proto::VarType::FP32,
"The LayerNorm divisor " ::paddle::string::Sprintf("The LayerNorm divisor epsilon value "
"epsilon value must be of FP32 data type, but is %s.", "must be of FP32 data type, but is %s.",
eps_tensor->type())); eps_tensor->type()));
const auto& gamma_shape = gamma->Var()->GetShape(); const auto& gamma_shape = gamma->Var()->GetShape();
const auto& beta_shape = beta->Var()->GetShape(); const auto& beta_shape = beta->Var()->GetShape();
const auto& x_shape = x->Var()->GetShape(); const auto& x_shape = x->Var()->GetShape();
int64_t x_last_dim = x_shape.back(); int64_t x_last_dim = x_shape.back();
PADDLE_ENFORCE_EQ(gamma_shape.size(), 1, CHECK_TRUE(
platform::errors::InvalidArgument( gamma_shape.size() == 1,
"The LayerNorm gamma " ::paddle::string::Sprintf("The LayerNorm gamma (scale) tensor "
"(scale) tensor shape must be one-dimensional, " "shape must be one-dimensional, but is %s.",
"but is %s.", gamma_shape.size()));
gamma_shape.size())); CHECK_TRUE(
PADDLE_ENFORCE_EQ(beta_shape.size(), 1, beta_shape.size() == 1,
platform::errors::InvalidArgument( ::paddle::string::Sprintf("The LayerNorm beta (shift) tensor "
"The LayerNorm beta " "shape must be one-dimensional, but is %s.",
"(shift) tensor shape must be one-dimensional, " beta_shape.size()));
"but is %s.", CHECK_TRUE(beta_shape == gamma_shape,
beta_shape.size())); ::paddle::string::Sprintf("The LayerNorm beta and gamma tensors "
PADDLE_ENFORCE_EQ(beta_shape, gamma_shape, "shapes' must be equal."));
platform::errors::InvalidArgument( CHECK_TRUE(
"The LayerNorm beta " gamma_shape.front() == x_last_dim,
"and gamma tensors shapes' must be equal.")); ::paddle::string::Sprintf(
PADDLE_ENFORCE_EQ(gamma_shape.front(), x_last_dim, "The LayerNorm beta and gamma tensors "
platform::errors::InvalidArgument( "shapes' must be equal to the last input's dimension size."));
"The LayerNorm beta "
"and gamma tensors shapes' must be equal to the last " CHECK_TRUE(validateReduceOpAttrs(x_mean, "input mean"),
"input's dimension size.")); "Validation of input mean node failed.");
CHECK_TRUE(validateReduceOpAttrs(std_dev, "std_dev mean"),
validateReduceOpAttrs(x_mean, "input mean"); "Validation of standard deviation node failed.");
validateReduceOpAttrs(std_dev, "std_dev mean");
// ------------------ op creation and placement --------------------------- // ------------------ op creation and placement ---------------------------
@ -213,6 +234,9 @@ void LayerNormFusePass::ApplyImpl(Graph* graph) const {
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
#undef CHECK_TRUE
#undef EXPECT_TRUE
REGISTER_PASS(layer_norm_fuse_pass, paddle::framework::ir::LayerNormFusePass); REGISTER_PASS(layer_norm_fuse_pass, paddle::framework::ir::LayerNormFusePass);
REGISTER_PASS_CAPABILITY(layer_norm_fuse_pass) REGISTER_PASS_CAPABILITY(layer_norm_fuse_pass)
.AddCombination( .AddCombination(

File diff suppressed because it is too large Load Diff

@ -175,10 +175,11 @@ bool RunPassAndAssert(Graph* graph, const std::string& pass_name,
} }
template <typename T> template <typename T>
void InitLoDTensorHolder(Scope* scope, const paddle::platform::Place& place, void InitLoDTensorHolder(const Scope& scope,
const paddle::platform::Place& place,
const std::string& var_name, const std::string& var_name,
const std::vector<int64_t>& dims, const T* data) { const std::vector<int64_t>& dims, const T* data) {
auto var = scope->Var(var_name); auto var = scope.FindLocalVar(var_name);
auto tensor = var->GetMutable<LoDTensor>(); auto tensor = var->GetMutable<LoDTensor>();
auto* tensor_mem_ptr = tensor->mutable_data<T>(make_ddim(dims), place); auto* tensor_mem_ptr = tensor->mutable_data<T>(make_ddim(dims), place);
if (data != nullptr) { if (data != nullptr) {
@ -189,14 +190,16 @@ void InitLoDTensorHolder(Scope* scope, const paddle::platform::Place& place,
} }
// Instantiate for below data types. // Instantiate for below data types.
template void InitLoDTensorHolder<float>(Scope*, const paddle::platform::Place&, template void InitLoDTensorHolder<float>(const Scope&,
const paddle::platform::Place&,
const std::string&, const std::string&,
const std::vector<int64_t>&, const std::vector<int64_t>&,
const float*); const float*);
template void InitLoDTensorHolder<int>(Scope*, const paddle::platform::Place&, template void InitLoDTensorHolder<int>(const Scope&,
const paddle::platform::Place&,
const std::string&, const std::string&,
const std::vector<int64_t>&, const int*); const std::vector<int64_t>&, const int*);
template void InitLoDTensorHolder<double>(Scope*, template void InitLoDTensorHolder<double>(const Scope&,
const paddle::platform::Place&, const paddle::platform::Place&,
const std::string&, const std::string&,
const std::vector<int64_t>&, const std::vector<int64_t>&,
@ -205,7 +208,13 @@ template void InitLoDTensorHolder<double>(Scope*,
OpDesc* GetOp(const ProgramDesc& prog, const std::string& op_type, OpDesc* GetOp(const ProgramDesc& prog, const std::string& op_type,
const std::string& output_name, const std::string& output_name,
const std::string& output_arg_name) { const std::string& output_arg_name) {
auto all_ops = prog.Block(0).AllOps(); return GetOp(prog.Block(0), op_type, output_name, output_arg_name);
}
OpDesc* GetOp(const BlockDesc& block_desc, const std::string& op_type,
const std::string& output_name,
const std::string& output_arg_name) {
auto all_ops = block_desc.AllOps();
for (auto* op_desc : all_ops) { for (auto* op_desc : all_ops) {
if (op_desc->Type() == op_type && op_desc->HasOutput(output_name)) { if (op_desc->Type() == op_type && op_desc->HasOutput(output_name)) {
const auto& arg_names = op_desc->Outputs().at(output_name); const auto& arg_names = op_desc->Outputs().at(output_name);

@ -128,7 +128,8 @@ bool RunPassAndAssert(Graph* graph, const std::string& pass_name,
/// @tparam T Tensor data type. /// @tparam T Tensor data type.
/// ///
template <typename T> template <typename T>
void InitLoDTensorHolder(Scope* scope, const paddle::platform::Place& place, void InitLoDTensorHolder(const Scope& scope,
const paddle::platform::Place& place,
const std::string& var_name, const std::string& var_name,
const std::vector<int64_t>& dims, const std::vector<int64_t>& dims,
const T* data = nullptr); const T* data = nullptr);
@ -148,6 +149,10 @@ OpDesc* GetOp(const ProgramDesc& prog, const std::string& op_type,
const std::string& output_name, const std::string& output_name,
const std::string& output_arg_name); const std::string& output_arg_name);
OpDesc* GetOp(const BlockDesc& block_desc, const std::string& op_type,
const std::string& output_name,
const std::string& output_arg_name);
} // namespace test } // namespace test
} // namespace ir } // namespace ir
} // namespace framework } // namespace framework

Loading…
Cancel
Save