!11537 layer norm fusion support new IR

From: @wangzhe128
Reviewed-by: 
Signed-off-by:
pull/11537/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit e2fc667481

@ -81,7 +81,7 @@ int LayerNorm::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffe
return RET_ERROR;
}
auto val_offset = schema::CreateLayerNorm(*fbb, attr->epsilon(), attr->begin_norm_axis(), attr->begin_params_axis());
auto val_offset = schema::CreateLayerNorm(*fbb, attr->begin_norm_axis(), attr->begin_params_axis(), attr->epsilon());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_LayerNorm, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;

@ -14,6 +14,7 @@ add_library(hiai_ir_build SHARED IMPORTED)
set_target_properties(hiai_ir_build PROPERTIES IMPORTED_LOCATION
${DDK_LIB_PATH}/libhiai_ir_build.so)
add_library(npu_kernel_mid OBJECT ${NPU_RUNTIME_SRC})
add_dependencies(npu_kernel_mid fbs_src)
target_link_libraries(
npu_kernel_mid
hiai

@ -34,9 +34,6 @@ int LayerNormCPUKernel::Init() {
}
int LayerNormCPUKernel::ReSize() {
param_->begin_norm_axis_ = -1;
param_->begin_params_axis_ = -1;
auto shape = in_tensors_.front()->shape();
param_->begin_norm_axis_ =
param_->begin_norm_axis_ > 0 ? param_->begin_norm_axis_ : param_->begin_norm_axis_ + shape.size();

@ -82,9 +82,6 @@ int LayerNormInt8CPUKernel::Init() {
}
int LayerNormInt8CPUKernel::ReSize() {
param_->begin_norm_axis_ = -1;
param_->begin_params_axis_ = -1;
auto shape = in_tensors_.front()->shape();
param_->begin_norm_axis_ =
param_->begin_norm_axis_ > 0 ? param_->begin_norm_axis_ : param_->begin_norm_axis_ + shape.size();

@ -121,9 +121,6 @@ int AnfTransform::AddGraphPass(const std::shared_ptr<opt::GraphOptimizer> &optim
weight_format_transform_pass->SetFmkType(config->fmk);
weight_format_transform_pass->SetQuantType(config->quantType);
graph_pm->AddPass(weight_format_transform_pass);
auto infershape_pass = std::make_shared<opt::InferShapePass>();
infershape_pass->SetFmkType(config->fmk);
graph_pm->AddPass(infershape_pass);
auto slice_prepose_pass = std::make_shared<opt::SlicePreposePass>();
slice_prepose_pass->SetFmkType(config->fmk);
graph_pm->AddPass(slice_prepose_pass);
@ -155,6 +152,9 @@ int AnfTransform::AddConstFoldPass(const std::shared_ptr<opt::GraphOptimizer> &o
auto update_conv2d_param_pass = std::make_shared<opt::UpdateConv2DParamPass>();
update_conv2d_param_pass->SetFmkType(config->fmk);
const_fold_pm->AddPass(update_conv2d_param_pass);
auto infershape_pass = std::make_shared<opt::InferShapePass>();
infershape_pass->SetFmkType(config->fmk);
const_fold_pm->AddPass(infershape_pass);
optimizer->AddPassManager(const_fold_pm);
return RET_OK;
}

File diff suppressed because it is too large Load Diff

@ -31,6 +31,8 @@ class LayerNormFusion : public PatternProcessPass {
explicit LayerNormFusion(const std::string &name = "layer_norm_fusion", bool multigraph = true)
: PatternProcessPass(name, multigraph) {
input_ = std::make_shared<Var>();
mean1_ = std::make_shared<Var>();
mean2_ = std::make_shared<Var>();
gamma_ = std::make_shared<Var>();
beta_ = std::make_shared<Var>();
epsilon_ = std::make_shared<Var>();
@ -41,12 +43,17 @@ class LayerNormFusion : public PatternProcessPass {
const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override;
private:
CNodePtr CreateLayerNormNode(const FuncGraphPtr &func_graph, const EquivPtr &equiv, const std::vector<int> &shape,
const float epsilon) const;
VarPtr input_;
VarPtr gamma_;
VarPtr beta_;
VarPtr epsilon_;
bool GetAxis(const CNodePtr &input_cnode, const std::vector<int> &mean_axes, const std::vector<int> &params_shape,
int *begin_norm_axis, int *begin_params_axis) const;
bool CheckPattern(const EquivPtr &equiv, float *epsilon, int *begin_norm_axis, int *begin_params_axis) const;
CNodePtr CreateLayerNormNode(const FuncGraphPtr &func_graph, const EquivPtr &equiv, float epsilon,
int begin_norm_axis, int begin_params_axis) const;
VarPtr input_ = nullptr;
VarPtr mean1_ = nullptr;
VarPtr mean2_ = nullptr;
VarPtr gamma_ = nullptr;
VarPtr beta_ = nullptr;
VarPtr epsilon_ = nullptr;
};
} // namespace opt
} // namespace mindspore

Loading…
Cancel
Save