add tensor after fold

pull/860/head
chenyemeng 4 years ago
parent fbc543626c
commit 78047f46cf

@ -221,7 +221,6 @@ Status HostCpuEngine::Run(NodePtr &node, const vector<ConstGeTensorPtr> &inputs,
GELOGD("Run node by host cpu engine. node name = %s", node->GetName().c_str()); GELOGD("Run node by host cpu engine. node name = %s", node->GetName().c_str());
std::unique_ptr<HostCpuOp> op_kernel; std::unique_ptr<HostCpuOp> op_kernel;
GE_CHK_STATUS_RET_NOLOG(FindOpKernel(node, op_kernel)); GE_CHK_STATUS_RET_NOLOG(FindOpKernel(node, op_kernel));
#ifndef ONLY_COMPILE_OPEN_SRC
std::map<std::string, const Tensor> named_inputs; std::map<std::string, const Tensor> named_inputs;
std::map<std::string, Tensor> named_outputs; std::map<std::string, Tensor> named_outputs;
auto op_desc = node->GetOpDesc(); auto op_desc = node->GetOpDesc();
@ -246,16 +245,6 @@ Status HostCpuEngine::Run(NodePtr &node, const vector<ConstGeTensorPtr> &inputs,
GE_CHECK_NOTNULL(ge_tensor); GE_CHECK_NOTNULL(ge_tensor);
tmp_outputs.emplace_back(ge_tensor); tmp_outputs.emplace_back(ge_tensor);
} }
#else
std::map<std::string, const Tensor> named_inputs;
std::vector<GeTensorPtr> tmp_outputs;
tmp_outputs.swap(outputs);
std::map<std::string, Tensor> named_outputs;
auto op_desc = node->GetOpDesc();
GE_CHK_STATUS_RET_NOLOG(PrepareInputs(op_desc, inputs, named_inputs));
GE_CHK_STATUS_RET_NOLOG(PrepareOutputs(op_desc, tmp_outputs, named_outputs));
GE_CHK_STATUS_RET_NOLOG(RunInternal(op_desc, *op_kernel, named_inputs, named_outputs));
#endif
GELOGD("Run node by host cpu engine successfully. name node = %s", node->GetName().c_str()); GELOGD("Run node by host cpu engine successfully. name node = %s", node->GetName().c_str());
outputs.swap(tmp_outputs); outputs.swap(tmp_outputs);
return SUCCESS; return SUCCESS;

@ -569,11 +569,7 @@ Status ModelBuilder::MergeWeights() {
return FAILED; return FAILED;
} }
} }
#ifndef ONLY_COMPILE_OPEN_SRC
weight->ClearData(); weight->ClearData();
#else
weight_data.clear();
#endif
} }
return SUCCESS; return SUCCESS;

Loading…
Cancel
Save