revert-15207-remove_op_handle_lock_and_fix_var
commit
92da467c99
@ -0,0 +1,28 @@
|
||||
# - Find JeMalloc library
|
||||
# Find the native JeMalloc includes and library
|
||||
#
|
||||
# JEMALLOC_INCLUDE_DIR - where to find jemalloc.h, etc.
|
||||
# JEMALLOC_LIBRARIES - List of libraries when using jemalloc.
|
||||
# JEMALLOC_FOUND - True if jemalloc found.
|
||||
|
||||
find_path(JEMALLOC_INCLUDE_DIR
|
||||
NAMES jemalloc/jemalloc.h
|
||||
HINTS ${JEMALLOC_ROOT_DIR}/include)
|
||||
|
||||
find_library(JEMALLOC_LIBRARIES
|
||||
NAMES jemalloc
|
||||
HINTS ${JEMALLOC_ROOT_DIR}/lib)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(jemalloc DEFAULT_MSG JEMALLOC_LIBRARIES JEMALLOC_INCLUDE_DIR)
|
||||
|
||||
mark_as_advanced(
|
||||
JEMALLOC_LIBRARIES
|
||||
JEMALLOC_INCLUDE_DIR)
|
||||
|
||||
if (JEMALLOC_FOUND)
|
||||
add_library(jemalloc::jemalloc UNKNOWN IMPORTED)
|
||||
set_target_properties(jemalloc::jemalloc PROPERTIES
|
||||
IMPORTED_LOCATION ${JEMALLOC_LIBRARIES}
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${JEMALLOC_INCLUDE_DIR}")
|
||||
endif()
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,130 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef PADDLE_FLUID_FRAMEWORK_IR_LOCK_FREE_OPTIMIZE_PASS_H_
|
||||
#define PADDLE_FLUID_FRAMEWORK_IR_LOCK_FREE_OPTIMIZE_PASS_H_
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
|
||||
#include "paddle/fluid/framework/ir/graph.h"
|
||||
#include "paddle/fluid/framework/ir/pass.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
namespace ir {
|
||||
|
||||
class Node;
|
||||
|
||||
/*
|
||||
* Remove the sum op of all gradients of the backward op.
|
||||
* And remove the dependecies of the optimizer related to the
|
||||
* same backward op.
|
||||
*
|
||||
* Before this pass:
|
||||
*
|
||||
* forward_op1 forward_op2
|
||||
* | |
|
||||
* grad_op1 grad_op2
|
||||
* \ /
|
||||
* \ /
|
||||
* sum_op
|
||||
* |
|
||||
* sgd_op
|
||||
*
|
||||
* After this pass:
|
||||
* forward_op1 forward_op2
|
||||
* | |
|
||||
* grad_op1 grad_op2
|
||||
* | |
|
||||
* sgd_op1 sgd_op2
|
||||
*
|
||||
* sgd_op1 and sgd_op2 will update the same weight which holds the same
|
||||
* memory, so we could benefits from the acceleration
|
||||
*/
|
||||
class LockFreeOptimizePass : public Pass {
|
||||
public:
|
||||
virtual ~LockFreeOptimizePass() {}
|
||||
|
||||
protected:
|
||||
std::unique_ptr<ir::Graph> ApplyImpl(std::unique_ptr<ir::Graph> graph) const;
|
||||
|
||||
private:
|
||||
// Create a new sgd node via current optimizer node
|
||||
ir::Node* CreateNewSGDNode(ir::Graph* graph, ir::Node* forward_node,
|
||||
ir::Node* backward_node, ir::Node* grad_sum_node,
|
||||
ir::Node* optimize_node) const;
|
||||
|
||||
// Replace the input weight's optimizers
|
||||
void ReplaceUpstreamNode(ir::Node* upstream_node,
|
||||
ir::Node* old_optimizer_node,
|
||||
ir::Node* new_optimizer_node) const;
|
||||
|
||||
// Replace the output weight's optimizers
|
||||
void ReplaceAllDownstreamNode(ir::Node* old_optimizer_node,
|
||||
ir::Node* new_optimizer_node) const;
|
||||
|
||||
// Find all weight variables in graph
|
||||
bool FindAllWeightVars(ir::Graph* graph) const;
|
||||
|
||||
// Find the forward_op node via the backward_op node
|
||||
ir::Node* FindForwardOpViaBackwardOp(ir::Graph* graph,
|
||||
ir::Node* backward_node) const;
|
||||
|
||||
std::vector<ir::Node*> FindConnectedNode(ir::Node* upstream_node,
|
||||
ir::Node* downstream_node) const;
|
||||
|
||||
inline bool IsOpNamed(ir::Node* node, const std::string& name) const {
|
||||
PADDLE_ENFORCE(node);
|
||||
|
||||
return node->NodeType() == Node::Type::kOperation && node->Name() == name;
|
||||
}
|
||||
|
||||
inline bool IsVarNamed(ir::Node* node, const std::string& name) const {
|
||||
PADDLE_ENFORCE(node);
|
||||
|
||||
return node->NodeType() == Node::Type::kVariable && node->Name() == name;
|
||||
}
|
||||
|
||||
inline bool IsVarNameEndsWith(ir::Node* node, const std::string& name) const {
|
||||
PADDLE_ENFORCE(node);
|
||||
|
||||
return node->NodeType() == Node::Type::kVariable &&
|
||||
boost::algorithm::ends_with(node->Name(), name);
|
||||
}
|
||||
|
||||
inline bool IsVarNameContains(ir::Node* node, const std::string& name) const {
|
||||
PADDLE_ENFORCE(node);
|
||||
|
||||
return node->NodeType() == Node::Type::kVariable &&
|
||||
node->Name().find(name) != std::string::npos;
|
||||
}
|
||||
|
||||
inline bool IsControlDepFrom(ir::Node* ctrl_dep_node, ir::Node* node) const {
|
||||
PADDLE_ENFORCE(ctrl_dep_node);
|
||||
PADDLE_ENFORCE(node);
|
||||
|
||||
return IsControlDepVar(*ctrl_dep_node) &&
|
||||
ctrl_dep_node->inputs.size() >= 1u &&
|
||||
ctrl_dep_node->inputs[0] == node;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace ir
|
||||
} // namespace framework
|
||||
} // namespace paddle
|
||||
|
||||
#endif // PADDLE_FLUID_FRAMEWORK_IR_LOCK_FREE_OPTIMIZE_PASS_H_
|
@ -0,0 +1,194 @@
|
||||
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. */
|
||||
|
||||
#include "paddle/fluid/framework/ir/seqpool_concat_fuse_pass.h"
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "paddle/fluid/framework/lod_tensor.h"
|
||||
|
||||
#define MAX_CONCAT_INPUTS 200
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
namespace ir {
|
||||
|
||||
PDNode* BuildSeqPoolConcatPattern(PDPattern* pattern,
|
||||
const std::string& name_scope,
|
||||
int num_inputs) {
|
||||
auto is_concat_op_with_inputs = [](Node* x, int num) -> bool {
|
||||
return x && x->IsOp() && x->Op()->Type() == "concat" &&
|
||||
x->Op()->Input("X").size() == static_cast<size_t>(num);
|
||||
};
|
||||
|
||||
auto is_nth_input_var_of_concat = [=](Node* x, int idx) -> bool {
|
||||
return x && x->IsVar() && VarLinksToOp(x, "concat") &&
|
||||
x->outputs.size() == 1 && IsNthInput(x, x->outputs[0], "X", idx) &&
|
||||
is_concat_op_with_inputs(x->outputs[0], num_inputs);
|
||||
};
|
||||
|
||||
auto is_seqpool_op_with_pootype_of_nth_input_of_concat = [=](
|
||||
Node* x, const std::string& type, int idx) -> bool {
|
||||
bool ok = x && x->IsOp() && x->Op()->Type() == "sequence_pool" &&
|
||||
x->Op()->HasAttr("pooltype") &&
|
||||
boost::get<std::string>(x->Op()->GetAttr("pooltype")) == type &&
|
||||
x->outputs.size() == 2; // seqpool should only have 2 outputs
|
||||
if (ok) {
|
||||
// only one output of seqpool_op is nth_input_var of concat
|
||||
// the other one should be unused empty var
|
||||
if (is_nth_input_var_of_concat(x->outputs[0], idx)) {
|
||||
ok = ok && x->outputs[1]->IsVar() && x->outputs[1]->outputs.size() == 0;
|
||||
} else {
|
||||
ok = ok && is_nth_input_var_of_concat(x->outputs[1], idx) &&
|
||||
x->outputs[0]->IsVar() && x->outputs[0]->outputs.size() == 0;
|
||||
}
|
||||
}
|
||||
return ok;
|
||||
};
|
||||
|
||||
auto* concat_op = pattern->NewNode(
|
||||
[=](Node* x) { return is_concat_op_with_inputs(x, num_inputs); },
|
||||
name_scope + "/concat_op");
|
||||
concat_op->assert_op_attr<int>("axis", 1);
|
||||
|
||||
auto* concat_out_var = pattern->NewNode(
|
||||
[=](Node* x) {
|
||||
return x && x->IsVar() && VarLinksFromOp(x, "concat") &&
|
||||
x->inputs.size() == 1 &&
|
||||
is_concat_op_with_inputs(x->inputs[0], num_inputs);
|
||||
},
|
||||
name_scope + "/concat_out_var");
|
||||
concat_out_var->assert_is_only_output_of_op("concat");
|
||||
|
||||
std::vector<PDNode*> seqpool_ops_input_var(num_inputs);
|
||||
std::vector<PDNode*> seqpool_ops_output_var(num_inputs);
|
||||
std::vector<PDNode*> seqpool_ops(num_inputs);
|
||||
|
||||
for (int i = 0; i < num_inputs; ++i) {
|
||||
seqpool_ops_output_var[i] = pattern->NewNode(
|
||||
[=](Node* x) {
|
||||
return x && x->IsVar() && is_nth_input_var_of_concat(x, i) &&
|
||||
x->inputs.size() == 1 &&
|
||||
is_seqpool_op_with_pootype_of_nth_input_of_concat(x->inputs[0],
|
||||
"SUM", i);
|
||||
},
|
||||
name_scope + "/sequence_pool_out_" + std::to_string(i));
|
||||
|
||||
seqpool_ops[i] = pattern->NewNode(
|
||||
[=](Node* x) {
|
||||
return x && x->IsOp() &&
|
||||
is_seqpool_op_with_pootype_of_nth_input_of_concat(x, "SUM", i);
|
||||
},
|
||||
name_scope + "/sequence_pool_op_" + std::to_string(i));
|
||||
|
||||
seqpool_ops_input_var[i] = pattern->NewNode(
|
||||
[=](Node* x) {
|
||||
return x && x->IsVar() && x->outputs.size() >= 1 &&
|
||||
is_seqpool_op_with_pootype_of_nth_input_of_concat(
|
||||
x->outputs[0], "SUM", i);
|
||||
},
|
||||
name_scope + "/sequence_pool_in_" + std::to_string(i));
|
||||
|
||||
// Links
|
||||
seqpool_ops[i]
|
||||
->LinksFrom({seqpool_ops_input_var[i]})
|
||||
.LinksTo({seqpool_ops_output_var[i]});
|
||||
}
|
||||
concat_op->LinksFrom(seqpool_ops_output_var).LinksTo({concat_out_var});
|
||||
return concat_out_var;
|
||||
}
|
||||
|
||||
int BuildFusion(Graph* graph, const std::string& name_scope, Scope* scope,
|
||||
int num_inputs) {
|
||||
GraphPatternDetector gpd;
|
||||
auto* pattern = gpd.mutable_pattern();
|
||||
BuildSeqPoolConcatPattern(pattern, name_scope, num_inputs);
|
||||
|
||||
auto retrieve_node = [](const std::string& name,
|
||||
const GraphPatternDetector::subgraph_t& subgraph,
|
||||
const PDPattern& pat) -> Node* {
|
||||
PADDLE_ENFORCE(subgraph.count(pat.RetrieveNode(name)),
|
||||
"pattern has no Node called %s", name.c_str());
|
||||
Node* p = subgraph.at(pat.RetrieveNode(name));
|
||||
PADDLE_ENFORCE_NOT_NULL(p, "subgraph has no node %s", name.c_str());
|
||||
return p;
|
||||
};
|
||||
|
||||
int fusion_count{0};
|
||||
auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
|
||||
Graph* g) {
|
||||
VLOG(4) << "handle SeqPool Concat fuse";
|
||||
std::vector<std::string> input_names(num_inputs);
|
||||
std::vector<Node*> input_vars(num_inputs);
|
||||
auto& fused_pattern = gpd.pattern();
|
||||
for (int i = 0; i < num_inputs; ++i) {
|
||||
input_vars[i] =
|
||||
retrieve_node(name_scope + "/sequence_pool_in_" + std::to_string(i),
|
||||
subgraph, fused_pattern);
|
||||
input_names[i] = input_vars[i]->Name();
|
||||
}
|
||||
auto* concat_op =
|
||||
retrieve_node(name_scope + "/concat_op", subgraph, fused_pattern);
|
||||
auto* concat_out_var =
|
||||
retrieve_node(name_scope + "/concat_out_var", subgraph, fused_pattern);
|
||||
auto* seqpool_op0 = retrieve_node(name_scope + "/sequence_pool_op_0",
|
||||
subgraph, fused_pattern);
|
||||
|
||||
// Create New OpDesc
|
||||
OpDesc op_desc;
|
||||
op_desc.SetType("fusion_seqpool_concat");
|
||||
op_desc.SetInput("X", input_names);
|
||||
op_desc.SetAttr("pooltype", seqpool_op0->Op()->GetAttr("pooltype"));
|
||||
op_desc.SetAttr("axis", concat_op->Op()->GetAttr("axis"));
|
||||
op_desc.SetOutput("Out", {concat_out_var->Name()});
|
||||
auto* op = graph->CreateOpNode(&op_desc);
|
||||
for (size_t i = 0; i < input_vars.size(); ++i) {
|
||||
IR_NODE_LINK_TO(input_vars[i], op);
|
||||
}
|
||||
IR_NODE_LINK_TO(op, concat_out_var);
|
||||
|
||||
std::unordered_set<const Node*> marked_nodes;
|
||||
for (auto& item : subgraph) {
|
||||
marked_nodes.insert(item.second);
|
||||
}
|
||||
for (size_t i = 0; i < input_vars.size(); ++i) {
|
||||
marked_nodes.erase(input_vars[i]);
|
||||
}
|
||||
marked_nodes.erase(concat_out_var);
|
||||
GraphSafeRemoveNodes(graph, marked_nodes);
|
||||
++fusion_count;
|
||||
};
|
||||
|
||||
gpd(graph, handler);
|
||||
return fusion_count;
|
||||
}
|
||||
|
||||
std::unique_ptr<ir::Graph> SeqPoolConcatFusePass::ApplyImpl(
|
||||
std::unique_ptr<ir::Graph> graph) const {
|
||||
FusePassBase::Init(name_scope_, graph.get());
|
||||
int fusion_count = 0;
|
||||
for (int i = MAX_CONCAT_INPUTS; i > 0; --i) {
|
||||
fusion_count += BuildFusion(
|
||||
graph.get(), name_scope_ + "/" + std::to_string(i), param_scope(), i);
|
||||
}
|
||||
AddStatis(fusion_count);
|
||||
|
||||
return graph;
|
||||
}
|
||||
|
||||
} // namespace ir
|
||||
} // namespace framework
|
||||
} // namespace paddle
|
||||
|
||||
REGISTER_PASS(seqpool_concat_fuse_pass,
|
||||
paddle::framework::ir::SeqPoolConcatFusePass);
|
@ -0,0 +1,38 @@
|
||||
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include "paddle/fluid/framework/ir/fuse_pass_base.h"
|
||||
#include "paddle/fluid/framework/ir/graph.h"
|
||||
#include "paddle/fluid/framework/ir/graph_pattern_detector.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
namespace ir {
|
||||
|
||||
class SeqPoolConcatFusePass : public FusePassBase {
|
||||
public:
|
||||
virtual ~SeqPoolConcatFusePass() {}
|
||||
|
||||
protected:
|
||||
std::unique_ptr<ir::Graph> ApplyImpl(std::unique_ptr<ir::Graph> graph) const;
|
||||
|
||||
const std::string name_scope_{"seqpool_concat_fuse"};
|
||||
};
|
||||
|
||||
} // namespace ir
|
||||
} // namespace framework
|
||||
} // namespace paddle
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue