More UT for LayerNormFuse pass (#30891)

* Additionally change to not throw error from inside pass.
revert-31068-fix_conv3d_windows
Adam Osewski 4 years ago committed by GitHub
parent a80fe67f84
commit 092a2b1413
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include <string>
#include <vector>
#include "paddle/fluid/framework/framework.pb.h"
@ -22,6 +21,7 @@
#include "paddle/fluid/framework/var_desc.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/string/pretty_log.h"
#include "paddle/fluid/string/printf.h"
namespace paddle {
namespace framework {
@ -30,34 +30,57 @@ namespace ir {
// cpplint complaints (wrong!) for not included <string> header in below line.
using string::PrettyLogDetail; // NOLINT
#define CHECK_TRUE(expr, err_msg) \
do { \
int e_ = (expr); \
if (!e_) { \
VLOG(4) << err_msg; \
return; \
} \
} while (0)
#define EXPECT_TRUE(expr, err_msg) \
do { \
int e_ = (expr); \
if (!e_) { \
VLOG(4) << err_msg; \
return false; \
} \
} while (0)
namespace {
void validateReduceOpAttrs(const Node* node, const std::string& name) {
bool validateReduceOpAttrs(const Node* node, const std::string& name) {
const auto* op = node->Op();
if (op->HasAttr("dim")) {
auto dims = BOOST_GET_CONST(std::vector<int>, op->GetAttr("dim"));
PADDLE_ENFORCE_EQ(dims.size(), 1, platform::errors::PreconditionNotMet(
"The LayerNorm fusion ", name,
" reduction must happen only over "
"single dimension."));
PADDLE_ENFORCE_EQ(dims.front(), -1, platform::errors::PreconditionNotMet(
"The LayerNorm fusion ", name,
" reduction must happen over last "
"dimension."));
EXPECT_TRUE(
dims.size() == 1,
::paddle::string::Sprintf(
"The LayerNorm fusion %s reduction must happen only over single "
"dimension.",
name));
EXPECT_TRUE(dims.front() == -1,
::paddle::string::Sprintf("The LayerNorm fusion %s reduction "
"must happen over last dimension.",
name));
}
if (op->HasAttr("reduce_all")) {
PADDLE_ENFORCE(!BOOST_GET_CONST(bool, op->GetAttr("reduce_all")),
platform::errors::PreconditionNotMet(
"The LayerNorm fusion ", name,
" reduction must have "
"\'reduce_all\' attribute set to false."));
EXPECT_TRUE(
!BOOST_GET_CONST(bool, op->GetAttr("reduce_all")),
::paddle::string::Sprintf(
"The LayerNorm fusion %s"
"reduction must have \'reduce_all\' attribute set to false.",
name));
}
if (op->HasAttr("keep_dim")) {
PADDLE_ENFORCE(BOOST_GET_CONST(bool, op->GetAttr("keep_dim")),
platform::errors::PreconditionNotMet(
"The LayerNorm fusion ", name,
" reduction must have "
"\'keep_dim\' attribute set to true."));
EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("keep_dim")),
::paddle::string::Sprintf(
"The LayerNorm fusion %s"
" reduction must have \'keep_dim\' attribute set to true.",
name));
}
return true;
}
void setIntermediateOut(OpDesc* desc, const std::string& out_name,
@ -129,48 +152,46 @@ void LayerNormFusePass::ApplyImpl(Graph* graph) const {
auto* eps_tensor = scope->FindVar(eps->Name())->GetMutable<LoDTensor>();
// ------------------ subgraph node's validation ---------------------------
PADDLE_ENFORCE_EQ(
eps_tensor->numel(), 1,
platform::errors::InvalidArgument(
"The LayerNorm divisor "
"epsilon value must be one-element tensor, but has %s "
"elements.",
CHECK_TRUE(
eps_tensor->numel() == 1,
::paddle::string::Sprintf(
"The LayerNorm divisor epsilon value must be one-element tensor, "
"but has %s elements.",
eps_tensor->numel()));
PADDLE_ENFORCE_EQ(eps_tensor->type(), proto::VarType::FP32,
platform::errors::InvalidArgument(
"The LayerNorm divisor "
"epsilon value must be of FP32 data type, but is %s.",
eps_tensor->type()));
CHECK_TRUE(
eps_tensor->type() == proto::VarType::FP32,
::paddle::string::Sprintf("The LayerNorm divisor epsilon value "
"must be of FP32 data type, but is %s.",
eps_tensor->type()));
const auto& gamma_shape = gamma->Var()->GetShape();
const auto& beta_shape = beta->Var()->GetShape();
const auto& x_shape = x->Var()->GetShape();
int64_t x_last_dim = x_shape.back();
PADDLE_ENFORCE_EQ(gamma_shape.size(), 1,
platform::errors::InvalidArgument(
"The LayerNorm gamma "
"(scale) tensor shape must be one-dimensional, "
"but is %s.",
gamma_shape.size()));
PADDLE_ENFORCE_EQ(beta_shape.size(), 1,
platform::errors::InvalidArgument(
"The LayerNorm beta "
"(shift) tensor shape must be one-dimensional, "
"but is %s.",
beta_shape.size()));
PADDLE_ENFORCE_EQ(beta_shape, gamma_shape,
platform::errors::InvalidArgument(
"The LayerNorm beta "
"and gamma tensors shapes' must be equal."));
PADDLE_ENFORCE_EQ(gamma_shape.front(), x_last_dim,
platform::errors::InvalidArgument(
"The LayerNorm beta "
"and gamma tensors shapes' must be equal to the last "
"input's dimension size."));
validateReduceOpAttrs(x_mean, "input mean");
validateReduceOpAttrs(std_dev, "std_dev mean");
CHECK_TRUE(
gamma_shape.size() == 1,
::paddle::string::Sprintf("The LayerNorm gamma (scale) tensor "
"shape must be one-dimensional, but is %s.",
gamma_shape.size()));
CHECK_TRUE(
beta_shape.size() == 1,
::paddle::string::Sprintf("The LayerNorm beta (shift) tensor "
"shape must be one-dimensional, but is %s.",
beta_shape.size()));
CHECK_TRUE(beta_shape == gamma_shape,
::paddle::string::Sprintf("The LayerNorm beta and gamma tensors "
"shapes' must be equal."));
CHECK_TRUE(
gamma_shape.front() == x_last_dim,
::paddle::string::Sprintf(
"The LayerNorm beta and gamma tensors "
"shapes' must be equal to the last input's dimension size."));
CHECK_TRUE(validateReduceOpAttrs(x_mean, "input mean"),
"Validation of input mean node failed.");
CHECK_TRUE(validateReduceOpAttrs(std_dev, "std_dev mean"),
"Validation of standard deviation node failed.");
// ------------------ op creation and placement ---------------------------
@ -213,6 +234,9 @@ void LayerNormFusePass::ApplyImpl(Graph* graph) const {
} // namespace framework
} // namespace paddle
#undef CHECK_TRUE
#undef EXPECT_TRUE
REGISTER_PASS(layer_norm_fuse_pass, paddle::framework::ir::LayerNormFusePass);
REGISTER_PASS_CAPABILITY(layer_norm_fuse_pass)
.AddCombination(

File diff suppressed because it is too large Load Diff

@ -175,10 +175,11 @@ bool RunPassAndAssert(Graph* graph, const std::string& pass_name,
}
template <typename T>
void InitLoDTensorHolder(Scope* scope, const paddle::platform::Place& place,
void InitLoDTensorHolder(const Scope& scope,
const paddle::platform::Place& place,
const std::string& var_name,
const std::vector<int64_t>& dims, const T* data) {
auto var = scope->Var(var_name);
auto var = scope.FindLocalVar(var_name);
auto tensor = var->GetMutable<LoDTensor>();
auto* tensor_mem_ptr = tensor->mutable_data<T>(make_ddim(dims), place);
if (data != nullptr) {
@ -189,14 +190,16 @@ void InitLoDTensorHolder(Scope* scope, const paddle::platform::Place& place,
}
// Instantiate for below data types.
template void InitLoDTensorHolder<float>(Scope*, const paddle::platform::Place&,
template void InitLoDTensorHolder<float>(const Scope&,
const paddle::platform::Place&,
const std::string&,
const std::vector<int64_t>&,
const float*);
template void InitLoDTensorHolder<int>(Scope*, const paddle::platform::Place&,
template void InitLoDTensorHolder<int>(const Scope&,
const paddle::platform::Place&,
const std::string&,
const std::vector<int64_t>&, const int*);
template void InitLoDTensorHolder<double>(Scope*,
template void InitLoDTensorHolder<double>(const Scope&,
const paddle::platform::Place&,
const std::string&,
const std::vector<int64_t>&,
@ -205,7 +208,13 @@ template void InitLoDTensorHolder<double>(Scope*,
OpDesc* GetOp(const ProgramDesc& prog, const std::string& op_type,
const std::string& output_name,
const std::string& output_arg_name) {
auto all_ops = prog.Block(0).AllOps();
return GetOp(prog.Block(0), op_type, output_name, output_arg_name);
}
OpDesc* GetOp(const BlockDesc& block_desc, const std::string& op_type,
const std::string& output_name,
const std::string& output_arg_name) {
auto all_ops = block_desc.AllOps();
for (auto* op_desc : all_ops) {
if (op_desc->Type() == op_type && op_desc->HasOutput(output_name)) {
const auto& arg_names = op_desc->Outputs().at(output_name);

@ -128,7 +128,8 @@ bool RunPassAndAssert(Graph* graph, const std::string& pass_name,
/// @tparam T Tensor data type.
///
template <typename T>
void InitLoDTensorHolder(Scope* scope, const paddle::platform::Place& place,
void InitLoDTensorHolder(const Scope& scope,
const paddle::platform::Place& place,
const std::string& var_name,
const std::vector<int64_t>& dims,
const T* data = nullptr);
@ -148,6 +149,10 @@ OpDesc* GetOp(const ProgramDesc& prog, const std::string& op_type,
const std::string& output_name,
const std::string& output_arg_name);
OpDesc* GetOp(const BlockDesc& block_desc, const std::string& op_type,
const std::string& output_name,
const std::string& output_arg_name);
} // namespace test
} // namespace ir
} // namespace framework

Loading…
Cancel
Save