adjust-the-location-of-cleaning-unuseless-memory-in-value-node

pull/9864/head
lvliang 4 years ago
parent f2b25d4139
commit 1490947ff0

@ -141,11 +141,6 @@ void RunOpsInGraphTask::Run() {
session_->RunOpsInGraphImpl(graph_id_, input_tensors_, &outputs_);
}
void CleanUselessTensorsTask::Run() {
MS_EXCEPTION_IF_NULL(session_);
session_->CleanUselessTensorsImpl(useless_tensors_);
}
void CreateCommGroupTask::Run() { result_ = CommManager::GetInstance().CreateGroupSync(group_name_, ranks_); }
void DestroyCommGroupTask::Run() { result_ = CommManager::GetInstance().DestroyGroup(group_name_); }
@ -392,15 +387,6 @@ void Executor::RunOpsInGraph(const SessionPtr &session, const GraphId &graph_id,
*outputs = task->outputs_;
}
void Executor::CleanUselessTensors(const SessionPtr &session,
const std::shared_ptr<std::vector<tensor::TensorPtr>> &useless_tensors) {
MS_EXCEPTION_IF_NULL(useless_tensors);
auto task = std::make_shared<CleanUselessTensorsTask>();
task->session_ = session;
task->useless_tensors_ = useless_tensors;
SyncRunTask(task);
}
bool Executor::CreateCommGroup(const std::string &group_name, std::vector<uint32_t> ranks) {
auto task = std::make_shared<CreateCommGroupTask>();
task->group_name_ = group_name;

@ -46,8 +46,7 @@ enum TaskType {
kRunOp,
kCreateCommGroup,
kDestroyCommGroup,
kRunOpsInGraph,
kCleanUselessTensors
kRunOpsInGraph
};
class Task {
@ -110,14 +109,6 @@ class RunOpsInGraphTask : public Task {
GraphId graph_id_{0};
};
class CleanUselessTensorsTask : public Task {
public:
CleanUselessTensorsTask() { type_ = kCleanUselessTensors; }
~CleanUselessTensorsTask() override = default;
void Run() override;
std::shared_ptr<std::vector<tensor::TensorPtr>> useless_tensors_{nullptr};
};
class RunOpTask : public Task {
public:
RunOpTask() { type_ = kRunOp; }
@ -175,8 +166,6 @@ class Executor {
const std::vector<int64_t> &tensors_mask);
void RunOpsInGraph(const SessionPtr &session, const GraphId &graph_id, const std::vector<tensor::TensorPtr> &inputs,
VectorRef *outputs);
void CleanUselessTensors(const SessionPtr &session,
const std::shared_ptr<std::vector<tensor::TensorPtr>> &useless_tensors);
bool CreateCommGroup(const std::string &group_name, std::vector<uint32_t> ranks);
bool DestroyCommGroup(const std::string &group_name);
void OnEvent(const ExecutorEvent &event);

@ -1657,11 +1657,6 @@ void SessionBasic::RunOpsInGraph(const GraphId &graph_id, const std::vector<tens
executor_->RunOpsInGraph(shared_from_this(), graph_id, inputs, outputs);
}
void SessionBasic::CleanUselessTensors(const std::shared_ptr<std::vector<tensor::TensorPtr>> &useless_tensors) {
MS_EXCEPTION_IF_NULL(executor_);
executor_->CleanUselessTensors(shared_from_this(), useless_tensors);
}
void SessionBasic::RunGraph(const GraphId &graph_id, const std::vector<tensor::TensorPtr> &inputs, VectorRef *outputs) {
MS_EXCEPTION_IF_NULL(executor_);
executor_->RunGraph(shared_from_this(), graph_id, inputs, outputs);
@ -1710,22 +1705,6 @@ void SessionBasic::UpdateGraphDynamicShapeAttr(const NotNull<KernelGraphPtr> &ro
root_graph->UpdateGraphDynamicAttr();
}
void SessionBasic::CleanUselessTensorsImpl(const std::shared_ptr<std::vector<tensor::TensorPtr>> &useless_tensors) {
auto ms_context = MsContext::GetInstance();
std::string device_target = ms_context->get_param<std::string>(MS_CTX_DEVICE_TARGET);
if (device_target == "CPU") {
return;
}
for (const auto &tensor : *useless_tensors) {
MS_EXCEPTION_IF_NULL(tensor);
const auto &shape = tensor->shape();
if (!shape.empty()) {
// The address of scalar value node does not need to be deleted
tensor->set_device_address(nullptr);
}
}
}
bool SessionBasic::IsGetNextGraph(const GraphId &graph_id, std::string *channel_name) {
auto kernel_graph = graphs_[graph_id];
MS_EXCEPTION_IF_NULL(kernel_graph);

@ -82,7 +82,6 @@ class SessionBasic : public std::enable_shared_from_this<SessionBasic> {
void RunOp(OpRunInfo *, const GraphInfo &, std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs,
const std::vector<int64_t> &tensors_mask);
void RunOpsInGraph(const GraphId &graph_id, const std::vector<tensor::TensorPtr> &inputs, VectorRef *outputs);
void CleanUselessTensors(const std::shared_ptr<std::vector<tensor::TensorPtr>> &useless_tensors);
virtual void RegisterSummaryCallBackFunc(const CallBackFunc &callback);
@ -142,7 +141,6 @@ class SessionBasic : public std::enable_shared_from_this<SessionBasic> {
friend class RunGraphTask;
friend class RunOpTask;
friend class RunOpsInGraphTask;
friend class CleanUselessTensorsTask;
virtual bool IsSupportSummary() { return true; }
virtual void CreateOutputTensors(const GraphId &graph_id, const std::vector<tensor::TensorPtr> &input_tensors,
VectorRef *outputs,
@ -164,7 +162,6 @@ class SessionBasic : public std::enable_shared_from_this<SessionBasic> {
const std::vector<int64_t> &tensors_mask) {}
virtual void RunOpsInGraphImpl(const GraphId &graph_id, const std::vector<tensor::TensorPtr> &inputs,
VectorRef *outputs) {}
void CleanUselessTensorsImpl(const std::shared_ptr<std::vector<tensor::TensorPtr>> &useless_tensors);
void RunInfer(NotNull<FuncGraphPtr> func_graph, const std::vector<tensor::TensorPtr> &inputs);
virtual void SetSummaryNodes(KernelGraph *graph);

@ -253,6 +253,7 @@ AdjointPtr DFunctor::MapMorphism(const AnfNodePtr &morph) {
k_app = k_graph_->NewCNode(inputs);
}
ReplaceEquivdout(k_app, cnode_morph);
cnode_morph->clear_inputs_value();
cnode_morph->set_forward(nullptr, "");
for (size_t i = 0; i < param_adjoints.size(); ++i) {
param_adjoints[i]->RegisterKUser(k_app, i);
@ -387,7 +388,6 @@ void DFunctor::ReplaceEquivdout(const CNodePtr &cnode, const CNodePtr &cnode_mor
MS_EXCEPTION_IF_NULL(out_node);
out_node->set_value(GenNewTensor(manager, out_node, out_node->value(), need_replace_forward));
// clear resource
cnode_morph->clear_inputs_value();
fg->ClearAllManagerInfo();
func_graph->ClearAllManagerInfo();
}

@ -92,6 +92,7 @@ const char PYTHON_PARSE_CLASS_ELLIPSIS[] = "create_ellipsis_obj";
const char NAMED_PRIMITIVE_LEN[] = "len";
const char NAMED_PRIMITIVE_BODY[] = "body";
const char NAMED_PRIMITIVE_ASSIGN[] = "Assign";
const char NAMED_PRIMITIVE_AUGASSIGN[] = "AugAssign";
const char NAMED_PRIMITIVE_FOR[] = "For";
const char NAMED_PRIMITIVE_IF[] = "If";
const char NAMED_PRIMITIVE_WHILE[] = "While";
@ -105,6 +106,7 @@ const char NAMED_PRIMITIVE_ATTRIBUTE[] = "Attribute";
const char NAMED_PRIMITIVE_COMPARE[] = "Compare";
const char NAMED_PRIMITIVE_NAMECONSTANT[] = "NameConstant";
const char NAMED_PRIMITIVE_COMPARATORS[] = "comparators";
const char NAMED_PRIMITIVE_TARGET[] = "target";
const char NAMED_PRIMITIVE_SLICE[] = "slice";
const char NAMED_PRIMITIVE_NAME[] = "Name";
const char NAMED_PRIMITIVE_NUM[] = "Num";

File diff suppressed because it is too large Load Diff

@ -52,6 +52,8 @@ struct PrimAbsInfo {
using AbstractListMap = std::unordered_map<abstract::AbstractBasePtrList, PrimAbsInfo,
abstract::AbstractBasePtrListHasher, abstract::AbstractBasePtrListEqual>;
using OpIndexWithTensorId = std::unordered_map<std::string, std::vector<std::string>>;
using TensorIdWithTensor = std::unordered_map<std::string, std::vector<tensor::TensorPtr>>;
py::tuple RunOp(const py::args &args);
@ -87,6 +89,7 @@ struct TopCellInfo {
FuncGraphPtr df_builder;
FuncGraphPtr bg; // Backward graph
std::string cell_id;
bool is_dynamic_cell{false};
TopCellInfo() = default;
TopCellInfo(ResourcePtr r, FuncGraphPtr df, FuncGraphPtr backward_graph, std::string cellid)
: resource(std::move(r)), df_builder(std::move(df)), bg(std::move(backward_graph)), cell_id(std::move(cellid)) {}
@ -154,9 +157,12 @@ class PynativeExecutor : public std::enable_shared_from_this<PynativeExecutor> {
bool IsDynamicCell(const py::object &cell);
std::string GetCellInfo(const py::object &cell);
void ParseInputArgs(const std::shared_ptr<parse::ParseAst> &ast, const py::object &fn_node);
bool ParseBodyContext(const std::shared_ptr<parse::ParseAst> &ast, const py::object &fn_node);
bool ParseBodyContext(const std::shared_ptr<parse::ParseAst> &ast, const py::object &fn_node,
const std::vector<std::string> &compare_prim = {});
bool ParseIfWhileExprNode(const std::shared_ptr<parse::ParseAst> &ast, const py::object &node);
bool ParseAssignExprNode(const std::shared_ptr<parse::ParseAst> &ast, const py::object &node);
bool ParseAugAssignExprNode(const std::shared_ptr<parse::ParseAst> &ast, const py::object &node,
const std::vector<std::string> &compare_prim = {});
bool ParseForExprNode(const std::shared_ptr<parse::ParseAst> &ast, const py::object &node);
std::string ParseNodeName(const std::shared_ptr<parse::ParseAst> &ast, const py::object &node,
parse::AstMainType type);
@ -190,7 +196,7 @@ class PynativeExecutor : public std::enable_shared_from_this<PynativeExecutor> {
// Update the abstract and device address info of value node and tensors in bprop graph
void UpdateAbstractAndDeviceAddress(const OpExecInfoPtr &op_exec_info, const py::object &out_real);
void SaveTensorsInValueNode(const ResourcePtr &resource);
void CleanTensorsInValueNode();
void CleanPreMemoryInValueNode(const std::string &cell_id);
// Construct grad graph
void PushCurrentGraphToStack();
@ -259,6 +265,7 @@ class PynativeExecutor : public std::enable_shared_from_this<PynativeExecutor> {
static std::mutex instance_lock_;
static int64_t graph_id_;
size_t grad_order_{0};
std::string top_cell_id_;
bool grad_flag_{false};
bool dynamic_cell_{false};
bool grad_is_running_{false};
@ -282,8 +289,8 @@ class PynativeExecutor : public std::enable_shared_from_this<PynativeExecutor> {
// Used for runop and replace forward result of grad graph
std::unordered_map<std::string, size_t> op_index_map_;
std::unordered_map<std::string, std::string> obj_to_forward_id_;
std::unordered_map<std::string, std::vector<std::string>> op_index_with_tensor_id_;
std::unordered_map<std::string, std::vector<tensor::TensorPtr>> tensor_id_with_tensor_;
std::unordered_map<std::string, OpIndexWithTensorId> cell_op_index_with_tensor_id_;
std::unordered_map<std::string, TensorIdWithTensor> cell_tensor_id_with_tensor_;
std::unordered_map<std::string, abstract::AbstractBasePtr> node_abs_map_;
std::unordered_map<std::string, AbstractListMap> prim_abs_list_;
};

@ -553,7 +553,7 @@ std::string Tensor::ToStringInternal(int limit_size) const {
std::ostringstream buf;
auto dtype = Dtype();
MS_EXCEPTION_IF_NULL(dtype);
buf << "Tensor(shape=" << ShapeToString(shape_) << ", dtype=" << dtype->ToString() << ", value=";
buf << "Tensor(id=" << id_ << ", shape=" << ShapeToString(shape_) << ", dtype=" << dtype->ToString() << ", value=";
if (limit_size <= 0 || DataSize() < limit_size) {
// Only print data for small tensor.
buf << ((data().ndim() > 1) ? '\n' : ' ') << data().ToString(data_type_, shape_, false);

@ -361,7 +361,6 @@ class Cell(Cell_):
_pynative_exec.end_graph(self, output, *inputs, **kwargs)
for i, cell in enumerate(self.cells()):
cell.set_grad(origin_grad[i])
self._already_run = True
return output
def _add_attr(self, name, value):

@ -182,6 +182,9 @@ class GradOperation(GradOperation_):
sens_param (bool): Whether to append sensitivity (gradient with respect to output) as input.
If sens_param is False, a 'ones_like(outputs)' sensitivity will be attached automatically.
Default: False.
If the sensor_param is True, a sensitivity (gradient with respect to output) needs to be transferred through
the location parameter or key-value pair parameter. If the value is transferred through the key-value pair
parameter, the key must be sens.
Returns:
The higher-order function which takes a function as argument and returns gradient function for it.
@ -311,16 +314,23 @@ class GradOperation(GradOperation_):
def _pynative_forward_run(self, args, kwargs, fn):
""" Pynative forward run to build grad graph. """
new_kwargs = {}
if self.sens_param:
args = args[:-1]
if not 'sens' in kwargs.keys():
args = args[:-1]
new_kwargs = kwargs
else:
for key, value in kwargs.items():
if key != 'sens':
new_kwargs[key] = value
for arg in args:
if not isinstance(arg, Tensor):
raise TypeError("grad inputs should be tensor in pynative mode")
if isinstance(fn, FunctionType):
_pynative_exec.set_grad_flag(True)
_pynative_exec.new_graph(fn, *args, **kwargs)
output = fn(*args, **kwargs)
_pynative_exec.end_graph(fn, output, *args, **kwargs)
_pynative_exec.new_graph(fn, *args, **new_kwargs)
output = fn(*args, **new_kwargs)
_pynative_exec.end_graph(fn, output, *args, **new_kwargs)
else:
if fn.already_run and not fn.requires_grad:
raise ValueError("obj must set_grad.")
@ -328,7 +338,7 @@ class GradOperation(GradOperation_):
self.need_forward = True
if self.need_forward:
fn.set_grad()
fn(*args, **kwargs)
fn(*args, **new_kwargs)
fn.already_run = False
def __call__(self, fn, weights=None):

@ -404,10 +404,10 @@ def test_pynative_resnet50():
step = step + 1
if step > max_step:
break
start_time = time.time()
input_data = element["image"]
input_label = element["label"]
loss_output = net_with_criterion(input_data, input_label)
start_time = time.time()
grads = train_network(input_data, input_label)
optimizer(grads)
end_time = time.time()

@ -403,10 +403,10 @@ def test_pynative_resnet50():
step = step + 1
if step > max_step:
break
start_time = time.time()
input_data = element["image"]
input_label = element["label"]
loss_output = net_with_criterion(input_data, input_label)
start_time = time.time()
grads = train_network(input_data, input_label)
optimizer(grads)
end_time = time.time()

Loading…
Cancel
Save