Open gc by default (#18836)
* open gc by default, test=develop * fix test_train_recognize_digits and disable gc when ngraph is enabled, test=develop * fix conditional_block op eager deletion bug, test=develop * add some comments to reviewers, test=developpadding_in_crf
parent
3816d221ff
commit
7ac748adb4
@ -0,0 +1,61 @@
|
||||
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle/fluid/framework/details/computation_op_handle.h"
|
||||
#include "paddle/fluid/framework/ir/graph_helper.h"
|
||||
#include "paddle/fluid/framework/ir/pass.h"
|
||||
#include "paddle/fluid/operators/controlflow/conditional_block_op_helper.h"
|
||||
#include "paddle/fluid/operators/controlflow/op_variant.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
namespace ir {
|
||||
|
||||
class ConditionalOpEagerDeletionPass : public Pass {
|
||||
protected:
|
||||
void ApplyImpl(Graph *graph) const override {
|
||||
auto all_ops = ir::FilterByNodeWrapper<details::OpHandleBase>(*graph);
|
||||
|
||||
// Find all conditional_op and conditional_grad_op
|
||||
std::unordered_map<size_t, std::pair<std::vector<OperatorBase *>,
|
||||
std::vector<OperatorBase *>>>
|
||||
target_ops;
|
||||
for (auto *op : all_ops) {
|
||||
auto compute_op = dynamic_cast<details::ComputationOpHandle *>(op);
|
||||
if (compute_op == nullptr) continue;
|
||||
|
||||
if (compute_op->Name() == "conditional_block") {
|
||||
target_ops[compute_op->GetScopeIdx()].first.emplace_back(
|
||||
compute_op->GetOp());
|
||||
} else if (compute_op->Name() == "conditional_block_grad") {
|
||||
target_ops[compute_op->GetScopeIdx()].second.emplace_back(
|
||||
compute_op->GetOp());
|
||||
}
|
||||
}
|
||||
|
||||
for (auto &ops_pair : target_ops) {
|
||||
auto &ifelse_ops = ops_pair.second.first;
|
||||
auto &ifelse_grad_ops = ops_pair.second.second;
|
||||
operators::PrepareSafeEagerDeletionOnConditionalOpAndConditionalGradOp(
|
||||
ifelse_ops, ifelse_grad_ops);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace ir
|
||||
} // namespace framework
|
||||
} // namespace paddle
|
||||
|
||||
REGISTER_PASS(conditional_block_op_eager_deletion_pass,
|
||||
paddle::framework::ir::ConditionalOpEagerDeletionPass);
|
@ -1,7 +1,10 @@
|
||||
include(operators)
|
||||
register_operators(DEPS naive_executor)
|
||||
cc_library(op_variant SRCS op_variant.cc DEPS operator proto_desc)
|
||||
cc_library(conditional_block_op_helper SRCS conditional_block_op_helper.cc DEPS operator op_variant conditional_block_op)
|
||||
cc_library(recurrent_op_helper SRCS recurrent_op_helper.cc DEPS operator op_variant recurrent_op)
|
||||
cc_library(while_op_helper SRCS while_op_helper.cc DEPS operator op_variant)
|
||||
|
||||
target_link_libraries(conditional_block_infer_op conditional_block_op)
|
||||
|
||||
file(APPEND ${pybind_file} "USE_OP(less_than);\nUSE_OP(logical_and);\nUSE_NO_KERNEL_OP(read_from_array);\n")
|
||||
|
@ -0,0 +1,169 @@
|
||||
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle/fluid/operators/controlflow/conditional_block_op_helper.h"
|
||||
#include <string>
|
||||
#include <unordered_set>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include "paddle/fluid/operators/controlflow/op_variant.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
static bool IsMatchedConditionalBlockOpAndConditionalBlockGradOp(
|
||||
const OpVariant &fwd_op, const OpVariant &bwd_op) {
|
||||
return fwd_op.Outputs().at(ConditionalOp::kScope) ==
|
||||
bwd_op.Inputs().at(ConditionalOp::kScope);
|
||||
}
|
||||
|
||||
static void FindAllConditionalBlockAndConditionalBlockGradOp(
|
||||
std::vector<OpVariant> *fwd_ops, std::vector<OpVariant> *bwd_ops) {
|
||||
PADDLE_ENFORCE_GE(fwd_ops->size(), bwd_ops->size());
|
||||
|
||||
if (fwd_ops->empty()) return;
|
||||
|
||||
const auto *program =
|
||||
fwd_ops->front().Attr<framework::BlockDesc *>("sub_block")->Program();
|
||||
|
||||
for (size_t i = 1; i < program->Size(); ++i) {
|
||||
auto &block = program->Block(i);
|
||||
for (size_t j = 0; j < block.OpSize(); ++j) {
|
||||
auto *op = block.Op(j);
|
||||
if (op->Type() == "conditional_block") {
|
||||
fwd_ops->emplace_back(op);
|
||||
} else if (op->Type() == "conditional_block_grad") {
|
||||
bwd_ops->emplace_back(op);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
PADDLE_ENFORCE_GE(
|
||||
fwd_ops->size(), bwd_ops->size(),
|
||||
"There are extra conditional_block_grad ops in the graph or program");
|
||||
}
|
||||
|
||||
static void SetSkipVarsForConditionalBlockOp(OpVariant *fwd_op,
|
||||
OpVariant *bwd_op) {
|
||||
auto *grad_block = bwd_op->Attr<framework::BlockDesc *>("sub_block");
|
||||
auto is_skippable_in_fwd = [grad_block](const std::string &var_name) {
|
||||
return var_name != framework::kEmptyVarName &&
|
||||
!grad_block->HasVar(var_name);
|
||||
};
|
||||
|
||||
std::unordered_set<std::string> forward_skip_vars;
|
||||
for (auto *op_desc : grad_block->AllOps()) {
|
||||
for (auto &in_arg_name : op_desc->InputArgumentNames()) {
|
||||
if (is_skippable_in_fwd(in_arg_name)) {
|
||||
forward_skip_vars.insert(in_arg_name);
|
||||
}
|
||||
}
|
||||
|
||||
for (auto &out_arg_name : op_desc->OutputArgumentNames()) {
|
||||
if (is_skippable_in_fwd(out_arg_name)) {
|
||||
forward_skip_vars.insert(out_arg_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto &fwd_attrs = const_cast<framework::AttributeMap &>(fwd_op->Attrs());
|
||||
std::vector<std::string> skip_vars_vec(forward_skip_vars.begin(),
|
||||
forward_skip_vars.end());
|
||||
VLOG(2) << "Prepare to skip " << skip_vars_vec.size()
|
||||
<< " var(s): " << string::join_strings(skip_vars_vec, ' ');
|
||||
fwd_attrs[ConditionalOp::kSkipEagerDeletionVars] = std::move(skip_vars_vec);
|
||||
}
|
||||
|
||||
static void PrepareSafeEagerDeletionOnConditionalOpAndConditionalGradOpImpl(
|
||||
std::vector<OpVariant> *ifelse_ops,
|
||||
std::vector<OpVariant> *ifelse_grad_ops) {
|
||||
FindAllConditionalBlockAndConditionalBlockGradOp(ifelse_ops, ifelse_grad_ops);
|
||||
|
||||
VLOG(2) << "Found conditional_block op num: " << ifelse_ops->size()
|
||||
<< ", conditional_block_grad op num: " << ifelse_grad_ops->size();
|
||||
|
||||
if (ifelse_grad_ops->empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
std::unordered_set<OpVariant, OpVariant::Hasher> ifelse_op_set(
|
||||
ifelse_ops->begin(), ifelse_ops->end());
|
||||
|
||||
for (auto &bwd_op : *ifelse_grad_ops) {
|
||||
const OpVariant *matched_fwd_op = nullptr;
|
||||
for (auto &fwd_op : ifelse_op_set) {
|
||||
if (IsMatchedConditionalBlockOpAndConditionalBlockGradOp(fwd_op,
|
||||
bwd_op)) {
|
||||
PADDLE_ENFORCE(matched_fwd_op == nullptr,
|
||||
"Found multiple matched conditional_block ops");
|
||||
matched_fwd_op = &fwd_op;
|
||||
}
|
||||
}
|
||||
|
||||
PADDLE_ENFORCE_NOT_NULL(matched_fwd_op,
|
||||
"Cannot find matched forward conditional_block op");
|
||||
|
||||
SetSkipVarsForConditionalBlockOp(const_cast<OpVariant *>(matched_fwd_op),
|
||||
&bwd_op);
|
||||
ifelse_op_set.erase(*matched_fwd_op);
|
||||
}
|
||||
}
|
||||
|
||||
void PrepareSafeEagerDeletionOnConditionalOpAndConditionalGradOp(
|
||||
int block_id,
|
||||
const std::vector<std::unique_ptr<framework::OperatorBase>> &all_ops) {
|
||||
// If block_id is not 0, returns
|
||||
// This is because all conditional_block_ops and conditional_block_grad_ops
|
||||
// in the whole program would be processed when block_id is 0 (i.e.
|
||||
// when Executor::Run() or ParallelExecutor constructs).
|
||||
|
||||
// What's more, all conditional_block_ops and conditional_block_grad_ops
|
||||
// must be processed when block_id is zero. If not, conditional_block_op
|
||||
// may run first and erase variables used in conditional_block_grad_op,
|
||||
// and in this moment, conditional_block_grad_ops may be not constructed yet.
|
||||
if (block_id != 0) return;
|
||||
|
||||
std::vector<OpVariant> fwd_ops, bwd_ops;
|
||||
for (auto &op : all_ops) {
|
||||
if (op->Type() == "conditional_block") {
|
||||
fwd_ops.emplace_back(op.get());
|
||||
} else if (op->Type() == "conditional_block_grad") {
|
||||
bwd_ops.emplace_back(op.get());
|
||||
}
|
||||
}
|
||||
|
||||
PrepareSafeEagerDeletionOnConditionalOpAndConditionalGradOpImpl(&fwd_ops,
|
||||
&bwd_ops);
|
||||
}
|
||||
|
||||
void PrepareSafeEagerDeletionOnConditionalOpAndConditionalGradOp(
|
||||
const std::vector<framework::OperatorBase *> &ifelse_ops,
|
||||
const std::vector<framework::OperatorBase *> &ifelse_grad_ops) {
|
||||
std::vector<OpVariant> fwd_ops, bwd_ops;
|
||||
fwd_ops.reserve(ifelse_ops.size());
|
||||
for (auto *op : ifelse_ops) {
|
||||
fwd_ops.emplace_back(op);
|
||||
}
|
||||
|
||||
bwd_ops.reserve(ifelse_grad_ops.size());
|
||||
for (auto *op : ifelse_grad_ops) {
|
||||
bwd_ops.emplace_back(op);
|
||||
}
|
||||
|
||||
PrepareSafeEagerDeletionOnConditionalOpAndConditionalGradOpImpl(&fwd_ops,
|
||||
&bwd_ops);
|
||||
}
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
@ -0,0 +1,34 @@
|
||||
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include "paddle/fluid/framework/operator.h"
|
||||
#include "paddle/fluid/operators/controlflow/conditional_block_op.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
void PrepareSafeEagerDeletionOnConditionalOpAndConditionalGradOp(
|
||||
int block_id,
|
||||
const std::vector<std::unique_ptr<framework::OperatorBase>> &all_ops);
|
||||
|
||||
void PrepareSafeEagerDeletionOnConditionalOpAndConditionalGradOp(
|
||||
const std::vector<framework::OperatorBase *> &ifelse_ops,
|
||||
const std::vector<framework::OperatorBase *> &ifelse_grad_ops);
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
Loading…
Reference in new issue