!10203 Synchronize latest Ascend software suite 18 Dec 2020

From: @nicholas_yhr
Reviewed-by: 
Signed-off-by:
pull/10203/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit 3ba3ffedd4

@ -381,9 +381,6 @@ checkopts "$@"
echo "---------------- MindSpore: build start ----------------"
mkdir -pv "${BUILD_PATH}/package/mindspore/lib"
git submodule update --init graphengine
cd "${BASEPATH}/graphengine"
git submodule update --init metadef
cd "${BASEPATH}"
if [[ "X$ENABLE_AKG" = "Xon" ]] && [[ "X$ENABLE_D" = "Xon" || "X$ENABLE_GPU" = "Xon" ]]; then
git submodule update --init --recursive akg
fi

@ -38,7 +38,6 @@ elseif (DEFINED ENV{D_LINK_PATH})
find_library(slog libslog.so ${GE_LIB_PATH})
find_library(mmpa libmmpa.a ${GE_LIB_PATH})
find_library(runtime libruntime.so ${GE_LIB_PATH})
find_library(msprof libmsprof.so ${GE_LIB_PATH})
find_library(register libregister.so ${GE_LIB_PATH})
find_library(hccl libhccl.so ${GE_LIB_PATH})
find_library(cce libcce.so ${GE_LIB_PATH})
@ -59,7 +58,6 @@ else()
find_library(cce libcce.so ${ASCEND_RUNTIME_PATH})
find_library(hccl libhccl.so ${ASCEND_RUNTIME_PATH})
find_library(runtime libruntime.so ${ASCEND_RUNTIME_PATH})
find_library(msprof libmsprof.so ${ASCEND_RUNTIME_PATH})
find_library(register libregister.so ${ASCEND_RUNTIME_PATH})
find_library(resource libresource.so ${ASCEND_RUNTIME_PATH})
find_library(error_manager liberror_manager.so ${ASCEND_RUNTIME_PATH})
@ -68,7 +66,6 @@ else()
find_library(cce libcce.so ${ASCEND_TOOLKIT_RUNTIME_PATH})
find_library(hccl libhccl.so ${ASCEND_TOOLKIT_RUNTIME_PATH})
find_library(runtime libruntime.so ${ASCEND_TOOLKIT_RUNTIME_PATH})
find_library(msprof libmsprof.so ${ASCEND_TOOLKIT_RUNTIME_PATH})
find_library(register libregister.so ${ASCEND_TOOLKIT_RUNTIME_PATH})
find_library(resource libresource.so ${ASCEND_TOOLKIT_RUNTIME_PATH})
find_library(error_manager liberror_manager.so ${ASCEND_TOOLKIT_RUNTIME_PATH})

@ -26,7 +26,7 @@ if (ENABLE_D OR ENABLE_ACL OR ENABLE_TESTCASES)
# use slog, error manager, mmpa in non ascend mode, e.g. tests
set(GE_PREBUILD_PATH ${GE_SOURCE_DIR}/third_party/prebuild/${CMAKE_HOST_SYSTEM_PROCESSOR})
set(ENABLE_MS_TESTCASES TRUE)
find_submodule_lib(slog libslog.so ${GE_PREBUILD_PATH})
find_submodule_lib(slog libalog.so ${GE_PREBUILD_PATH})
find_submodule_lib(error_manager liberror_manager.so ${GE_PREBUILD_PATH})
find_submodule_lib(static_mmpa libmmpa.a ${GE_PREBUILD_PATH})
endif()

@ -1 +1 @@
Subproject commit 20a0326976db65ca01f43ae4ccdd85677faaeb5e
Subproject commit 9a7b271674f343157c316b1455aee628c43cffdc

@ -122,7 +122,7 @@ class AscendEnvChecker(EnvChecker):
"""ascend environment check"""
def __init__(self):
self.version = ["1.76.T21.0.B210"]
self.version = ["1.76.22.0.220"]
atlas_nnae_version = "/usr/local/Ascend/nnae/latest/fwkacllib/version.info"
atlas_toolkit_version = "/usr/local/Ascend/ascend-toolkit/latest/fwkacllib/version.info"
hisi_fwk_version = "/usr/local/Ascend/fwkacllib/version.info"

@ -248,17 +248,17 @@ if (ENABLE_D)
find_library(RUNTIME_LIB runtime ${ASCEND_RUNTIME_PATH} ${ASCEND_TOOLKIT_RUNTIME_PATH})
find_library(TSDCLIENT tsdclient HINTS ${ASCEND_RUNTIME_PATH} ${ASCEND_TOOLKIT_RUNTIME_PATH} ${ASCEND_DRIVER_BACK_PATH})
find_library(DATATRANSFER datatransfer HINTS ${ASCEND_RUNTIME_PATH} ${ASCEND_TOOLKIT_RUNTIME_PATH} ${ASCEND_DRIVER_BACK_PATH})
find_library(PROFILING msprofiler ${ASCEND_RUNTIME_PATH} ${ASCEND_TOOLKIT_RUNTIME_PATH})
find_library(PROFILING_SHARED msprof ${ASCEND_DRIVER_PATH})
find_library(PROFILING msprofiler_fwk ${ASCEND_RUNTIME_PATH} ${ASCEND_TOOLKIT_RUNTIME_PATH})
find_library(REGISTER register ${ASCEND_RUNTIME_PATH} ${ASCEND_TOOLKIT_RUNTIME_PATH})
find_library(OPTILING optiling ${ASCEND_OPP_PATH})
# hccl_adpter
find_library(HCCL_ADPTER hcom_graph_adaptor ${ASCEND_RUNTIME_PATH} ${ASCEND_TOOLKIT_RUNTIME_PATH})
find_library(HCCL_BUILDER hcom_opskernel_builder ${ASCEND_RUNTIME_PATH}/plugin/opskernel ${ASCEND_TOOLKIT_RUNTIME_PATH}/plugin/opskernel)
add_library(ms_profile SHARED ${PROFILING})
add_library(ms_profile SHARED ${CMAKE_CURRENT_SOURCE_DIR}/runtime/device/ascend/profiling/profiling_callback_register.cc)
set_target_properties(ms_profile PROPERTIES LINKER_LANGUAGE CXX)
target_link_libraries(ms_profile -Wl,--start-group ${PROFILING_SHARED} ${PROFILING} mindspore::protobuf -Wl,--end-group)
target_link_options(ms_profile PRIVATE -Wl,-init,common_log_init)
target_link_libraries(ms_profile -Wl,--start-group -Wl,--whole-archive ${PROFILING} -Wl,--no-whole-archive mindspore::protobuf -Wl,--end-group)
target_link_libraries(mindspore ge_runtime ${CCE_LIB} ${RUNTIME_LIB} ${TSDCLIENT} ${HCCL} ${DATATRANSFER}
${HCCL_ADPTER} ${REGISTER} -Wl,--no-as-needed ${OPTILING} ${HCCL_BUILDER})
target_link_libraries(mindspore -Wl,--start-group proto_input mindspore::protobuf -Wl,--end-group)

@ -422,12 +422,7 @@ GraphInfo GetSingleOpGraphInfo(const PrimitivePtr &prim, const std::vector<tenso
}
} // namespace
void AscendSession::Init(uint32_t device_id) {
InitExecutor(kAscendDevice, device_id);
auto runtime_instance = device::KernelRuntimeManager::Instance().GetKernelRuntime(kAscendDevice, device_id);
MS_EXCEPTION_IF_NULL(runtime_instance);
runtime_instance->CreateContext();
}
void AscendSession::Init(uint32_t device_id) { InitExecutor(kAscendDevice, device_id); }
void AscendSession::UnifyMindIR(const KernelGraphPtr &graph) {
auto context_ptr = MsContext::GetInstance();

@ -1019,7 +1019,6 @@ void InitHccl() {
mindspore::parse::python_adapter::set_python_env_flag(true);
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);
(void)context::OpenTsd(ms_context);
uint32_t device_id = ms_context->get_param<uint32_t>(MS_CTX_DEVICE_ID);
std::string device_name = ms_context->get_param<std::string>(MS_CTX_DEVICE_TARGET);
ms_context->set_param<bool>(MS_CTX_ENABLE_HCCL, true);
@ -1027,10 +1026,14 @@ void InitHccl() {
ms_context->get_param<std::string>(MS_CTX_DEVICE_TARGET) == kAscendDevice) {
auto runtime_instance = device::KernelRuntimeManager::Instance().GetKernelRuntime(device_name, device_id);
MS_EXCEPTION_IF_NULL(runtime_instance);
runtime_instance->PreInit();
(void)context::OpenTsd(ms_context);
if (!runtime_instance->Init()) {
MS_LOG(ERROR) << "Kernel runtime init error.";
return;
}
} else {
(void)context::OpenTsd(ms_context);
}
#endif
}
@ -1060,9 +1063,29 @@ void ReleaseGeTsd() {
}
}
void StartUpProfiling() {
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);
if (!ms_context->get_param<bool>(MS_CTX_ENABLE_PROFILING)) {
return;
}
MS_LOG(INFO) << "Startup profiling";
// Start up profiling before OpenTsd
uint32_t device_id = ms_context->get_param<uint32_t>(MS_CTX_DEVICE_ID);
std::string device_name = ms_context->get_param<std::string>(MS_CTX_DEVICE_TARGET);
if (ms_context->backend_policy() == "ms" &&
ms_context->get_param<std::string>(MS_CTX_DEVICE_TARGET) == kAscendDevice) {
auto runtime_instance = device::KernelRuntimeManager::Instance().GetKernelRuntime(device_name, device_id);
MS_EXCEPTION_IF_NULL(runtime_instance);
runtime_instance->PreInit();
}
}
void InitBackend() {
// set python env flag
mindspore::parse::python_adapter::set_python_env_flag(true);
// Startup profiling before open tsd
StartUpProfiling();
// open tsd before ge initialize
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);

@ -64,6 +64,7 @@ if (ENABLE_GPU)
# add_library(_mindspore_device_cuda_obj OBJECT ${CUDA_SRC_LIST})
endif ()
list(REMOVE_ITEM D_SRC_LIST "ascend/profiling/profiling_callback_register.cc")
set_property(SOURCE ${DEVICE_SRC_LIST} ${D_SRC_LIST} ${CPU_SRC_LIST}
PROPERTY COMPILE_DEFINITIONS SUBMODULE_ID=mindspore::SubModuleId::SM_DEVICE)
add_library(_mindspore_runtime_device_obj OBJECT ${DEVICE_SRC_LIST} ${D_SRC_LIST} ${CPU_SRC_LIST})

@ -45,6 +45,8 @@
#include "toolchain/adx_datadump_server.h"
#include "utils/shape_utils.h"
#include "utils/trace_base.h"
#include "graphengine/inc/external/acl/error_codes/rt_error_codes.h"
#include "debug/anf_ir_dump.h"
#ifdef MEM_REUSE_DEBUG
#include "backend/optimizer/mem_reuse/mem_reuse_checker.h"
#endif
@ -54,6 +56,7 @@
#include "utils/config_manager.h"
#include "runtime/device/ascend/profiling/reporter/op_name_task_stream_reporter.h"
#include "runtime/hccl_adapter/hccl_adapter.h"
#include "runtime/device/ascend/profiling/profiling_callback_register.h"
#include "backend/kernel_compiler/hccl/hccl_context.h"
#ifdef ENABLE_TDTQUE
#include "tdt/tdt_host_interface.h"
@ -71,11 +74,9 @@ constexpr uint32_t kTupleTaskId = 0;
constexpr uint32_t kTupleStreamId = 1;
constexpr uint32_t kTupleArgs = 2;
constexpr uint32_t kProfilingMaxTaskIdInStream = 65531;
constexpr auto kModuleName = "MindSpore";
namespace mindspore {
namespace device {
namespace ascend {
static const size_t PRAMATER_OUTPUT_INDEX = 0;
namespace mindspore::device::ascend {
static thread_local rtContext_t thread_local_rt_context{nullptr};
namespace {
std::string GetRankId() {
@ -110,7 +111,9 @@ std::string GetRankId() {
}
} // namespace
std::vector<rtExceptionInfo> AscendKernelRuntime::exception_infoes_;
std::vector<rtTaskFailInfo> AscendKernelRuntime::task_fail_infoes_ = {};
uint32_t AscendKernelRuntime::current_graph_id_ = 0;
std::map<std::string, uint32_t> AscendKernelRuntime::overflow_tasks_;
AscendKernelRuntime::~AscendKernelRuntime() { graph_model_map_.clear(); }
void AscendKernelRuntime::SetContext() {
@ -255,6 +258,11 @@ void AscendKernelRuntime::ReleaseDeviceRes() {
mem_manager_->FreeDeviceMemory();
}
auto rt_ret = rtRegTaskFailCallbackByModule(kModuleName, nullptr);
if (rt_ret != RT_ERROR_NONE) {
MS_LOG(EXCEPTION) << "Reg SetTaskFailCallback failed, error: " << rt_ret;
}
(void)DestroySingleOpHccl();
(void)DestroyHccl();
(void)ResetDevice(device_id);
@ -262,6 +270,13 @@ void AscendKernelRuntime::ReleaseDeviceRes() {
MS_LOG(INFO) << "Ascend finalize end";
}
void AscendKernelRuntime::PreInit() {
auto ret = ProfilingManager::GetInstance().StartupProfiling(device_id_);
if (!ret) {
MS_EXCEPTION(DeviceProcessError) << "StartupProfiling failed.";
}
}
bool AscendKernelRuntime::Init() {
if (initialized_) {
InnerSetContext();
@ -269,24 +284,21 @@ bool AscendKernelRuntime::Init() {
}
OpTilingCalculater::GetInstance().Init();
// Start up profiling before rtSetDevice
bool ret = ProfilingManager::GetInstance().StartupProfiling(device_id_);
if (!ret) {
MS_EXCEPTION(DeviceProcessError) << "StartupProfiling failed.";
}
ret = InitDevice();
bool ret = InitDevice();
if (!ret) {
return ret;
}
SetDebugger();
mem_manager_ = std::make_shared<AscendMemoryManager>();
MS_EXCEPTION_IF_NULL(mem_manager_);
mem_manager_->MallocDeviceMemory();
// Set callback func when exception error
auto rt_ret = rtSetTaskFailCallback(ExceptionCallback);
auto rt_ret = rtRegTaskFailCallbackByModule(kModuleName, TaskFailCallback);
if (rt_ret != RT_ERROR_NONE) {
MS_LOG(EXCEPTION) << "SetTaskFailCallback failed, error: " << rt_ret;
MS_LOG(EXCEPTION) << "Reg SetTaskFailCallback failed, error: " << rt_ret;
}
initialized_ = true;
@ -525,42 +537,57 @@ void AscendKernelRuntime::LaunchDataDump(GraphId graph_id) {
}
}
void AscendKernelRuntime::ExceptionCallback(rtExceptionInfo *exception_info) {
void AscendKernelRuntime::TaskFailCallback(rtTaskFailInfo *task_fail_info) {
MS_EXCEPTION_IF_NULL(task_fail_info);
static std::mutex exception_mutex;
std::lock_guard<std::mutex> lock(exception_mutex);
exception_infoes_.push_back(*exception_info);
if (task_fail_info->retcode == ACL_ERROR_RT_AICORE_OVER_FLOW) {
auto key = std::to_string(task_fail_info->streamid) + std::to_string(task_fail_info->taskid);
auto find_iter = overflow_tasks_.find(key);
if (find_iter == overflow_tasks_.end()) {
overflow_tasks_[key] = 1;
} else {
if (overflow_tasks_[key] == 5) {
auto node_name = AscendKernelRuntime::GetErrorNodeName(task_fail_info->streamid, task_fail_info->taskid);
MS_LOG(WARNING) << "Node run task overflow, node name: " << node_name;
overflow_tasks_.erase(find_iter);
} else {
overflow_tasks_[key]++;
}
}
} else {
MS_LOG(WARNING) << "Task fail infos task_id: " << task_fail_info->taskid
<< ", stream_id: " << task_fail_info->streamid << ", tid: " << task_fail_info->tid
<< ", device_id: " << task_fail_info->deviceid << ", retcode: " << task_fail_info->retcode;
task_fail_infoes_.push_back(*task_fail_info);
}
}
void AscendKernelRuntime::DumpTaskExceptionInfo(const session::KernelGraph *graph) {
MS_EXCEPTION_IF_NULL(graph);
std::vector<std::string> full_scope_name{};
// Find node name(full scope name)
auto runtime_info_map = ModelRunner::Instance().GetRuntimeInfoMap(graph->graph_id());
MS_LOG(ERROR) << "Exception_infos_ size: " << exception_infoes_.size() << ". first example: "
<< ", task_id: " << exception_infoes_.at(0).taskid
<< ", stream_id: " << exception_infoes_.at(0).streamid << ", tid: " << exception_infoes_.at(0).tid
<< ", device_id: " << exception_infoes_.at(0).deviceid;
for (const auto &exception_info : exception_infoes_) {
for (const auto &iter : runtime_info_map) {
auto task_id = std::get<kTupleTaskId>(*iter.second);
auto stream_id = std::get<kTupleStreamId>(*iter.second);
if (task_id == exception_info.taskid && stream_id == exception_info.streamid) {
full_scope_name.push_back(iter.first);
MS_LOG(ERROR) << "Node: " << iter.first << ", run task error.";
}
string AscendKernelRuntime::GetErrorNodeName(uint32_t streamid, uint32_t taskid) {
auto runtime_info_map = ModelRunner::Instance().GetRuntimeInfoMap(AscendKernelRuntime::current_graph_id_);
for (const auto &iter : runtime_info_map) {
auto task_id = std::get<kTupleTaskId>(*iter.second);
auto stream_id = std::get<kTupleStreamId>(*iter.second);
if (task_id == taskid && stream_id == streamid) {
MS_LOG(ERROR) << "Node: " << iter.first << ", run task error.";
return iter.first;
}
}
return "";
}
void AscendKernelRuntime::DumpTaskExceptionInfo(const session::KernelGraph *graph) {
MS_EXCEPTION_IF_NULL(graph);
auto full_scope_name =
AscendKernelRuntime::GetErrorNodeName(task_fail_infoes_.at(0).streamid, task_fail_infoes_.at(0).taskid);
// Dump error data in local path
const std::string local_path = std::string("./task_error_dump/") + std::to_string(exception_infoes_.at(0).deviceid);
const std::string local_path = std::string("./task_error_dump/") + std::to_string(task_fail_infoes_.at(0).deviceid);
for (const auto &node : graph->execution_order()) {
for (auto &name : full_scope_name) {
if (node->fullname_with_scope() == name) {
MS_LOG(ERROR) << "Begin to dump node (" << name << ") task error input/output data in local path."
<< " trace: " << trace::DumpSourceLines(node);
E2eDumpUtil::DumpInputImpl(node, false, local_path, &name, nullptr);
E2eDumpUtil::DumpOutputImpl(node, false, local_path, &name, nullptr);
}
if (node->fullname_with_scope() == full_scope_name) {
MS_LOG(ERROR) << "Begin to dump node (" << full_scope_name << ") task error input/output data in local path."
<< " trace: " << trace::DumpSourceLines(node);
E2eDumpUtil::DumpInputImpl(node, false, local_path, &full_scope_name, nullptr);
E2eDumpUtil::DumpOutputImpl(node, false, local_path, &full_scope_name, nullptr);
}
}
}
@ -571,7 +598,8 @@ bool AscendKernelRuntime::Run(session::KernelGraph *graph, bool is_task_sink) {
#if defined(_WIN32) || defined(_WIN64)
auto start_time = std::chrono::steady_clock::now();
#else
struct timeval start_time, end_time;
struct timeval start_time {};
struct timeval end_time {};
(void)gettimeofday(&start_time, nullptr);
#endif
if (is_task_sink) {
@ -630,6 +658,7 @@ bool AscendKernelRuntime::RunDynamicKernelAsync(const session::KernelGraph *grap
}
bool AscendKernelRuntime::RunTask(const session::KernelGraph *graph) {
current_graph_id_ = graph->graph_id();
InnerSetContext();
MS_EXCEPTION_IF_NULL(graph);
if (graph->is_dynamic_shape()) {
@ -656,7 +685,8 @@ bool AscendKernelRuntime::RunTask(const session::KernelGraph *graph) {
bool status = ModelRunner::Instance().RunModel(graph->graph_id(), input_tensors, output_tensors);
if (!status) {
DumpTaskExceptionInfo(graph);
std::string file_name = "task_error_debug" + std::to_string(current_graph_id_) + ".ir";
DumpIR(file_name, std::shared_ptr<session::KernelGraph>(const_cast<session::KernelGraph *>(graph)));
#ifdef ENABLE_TDTQUE
// Run task error, we should call TdtHostDestroy to release tdt to avoid DeviceQueueOp hostPush hung
// case1: cpu usage 100% cause thread/process exit, but some tdt thread remain in backend
@ -667,10 +697,9 @@ bool AscendKernelRuntime::RunTask(const session::KernelGraph *graph) {
MS_LOG(INFO) << "Destroy tsd success.";
}
#endif
return false;
}
exception_infoes_.clear();
task_fail_infoes_.clear();
return true;
}
@ -857,6 +886,4 @@ void AscendKernelRuntime::KernelLaunchProfiling(const std::string &kernel_name)
MS_LOG(EXCEPTION) << "Too many profiling data";
}
}
} // namespace ascend
} // namespace device
} // namespace mindspore
} // namespace mindspore::device::ascend

@ -32,9 +32,7 @@
using ge::model_runner::TaskInfo;
using std::unordered_map;
using std::vector;
namespace mindspore {
namespace device {
namespace ascend {
namespace mindspore::device::ascend {
class AscendKernelRuntime : public KernelRuntime {
public:
AscendKernelRuntime() = default;
@ -56,6 +54,7 @@ class AscendKernelRuntime : public KernelRuntime {
void SetContext() override;
void CreateContext() override;
void *context() const override { return rt_context_; }
void PreInit() override;
protected:
DeviceAddressPtr CreateDeviceAddress(void *device_ptr, size_t device_size, const string &format,
@ -80,8 +79,9 @@ class AscendKernelRuntime : public KernelRuntime {
bool CheckGraphIdValid(GraphId graph_id) const;
void DistributeDebugTask(NotNull<const session::KernelGraph *> graph, NotNull<std::function<void *()>> model_handle);
void LaunchDataDump(GraphId graph_id);
static string GetErrorNodeName(uint32_t streamid, uint32_t taskid);
static void DumpTaskExceptionInfo(const session::KernelGraph *graph);
static void ExceptionCallback(rtExceptionInfo *exception_info);
static void TaskFailCallback(rtTaskFailInfo *task_fail_info);
void ReportProfilingData();
rtContext_t rt_context_{nullptr};
@ -90,11 +90,11 @@ class AscendKernelRuntime : public KernelRuntime {
unordered_map<GraphId, std::shared_ptr<ge::model_runner::DavinciModel>> graph_model_map_;
unordered_map<GraphId, std::shared_ptr<DataDumper>> graph_data_dumper_;
std::map<std::pair<uint32_t, uint32_t>, std::string> stream_id_task_id_op_name_map_;
static std::vector<rtExceptionInfo> exception_infoes_;
static uint32_t current_graph_id_;
static std::map<std::string, uint32_t> overflow_tasks_;
static std::vector<rtTaskFailInfo> task_fail_infoes_;
};
MS_REG_KERNEL_RUNTIME(kAscendDevice, AscendKernelRuntime);
} // namespace ascend
} // namespace device
} // namespace mindspore
} // namespace mindspore::device::ascend
#endif // MINDSPORE_CCSRC_RUNTIME_DEVICE_ASCEND_ASCEND_KERNEL_RUNTIME_H_

@ -31,7 +31,7 @@ void AscendMemoryManager::MallocDeviceMemory() {
device_mem_size_ = context_mem == 0 ? kAscendDeviceMemSize : context_mem;
auto ret = rtMalloc(reinterpret_cast<void **>(&device_mem_base_), device_mem_size_, RT_MEMORY_HBM);
if (ret != ACL_RT_SUCCESS) {
if (ret == ACL_ERROR_RT_DRV_INTERNEL_ERROR) {
if (ret == ACL_ERROR_RT_DRV_INTERNAL_ERROR) {
auto context_ptr = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(context_ptr);
unsigned int device_id = context_ptr->get_param<uint32_t>(MS_CTX_DEVICE_ID);

@ -1,42 +0,0 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "runtime/device/ascend/profiling/plugin_impl.h"
#include <string>
#include "utils/log_adapter.h"
using std::string;
namespace mindspore {
namespace device {
namespace ascend {
Reporter *PluginImpl::reporter_ = nullptr;
PluginImpl::PluginImpl(const std::string &module) : module_(module) { MS_LOG(INFO) << "Create PluginImpl."; }
int PluginImpl::Init(const Reporter *reporter) {
MS_LOG(INFO) << "PluginImpl init";
MS_EXCEPTION_IF_NULL(reporter);
reporter_ = const_cast<Reporter *>(reporter);
return 0;
}
int PluginImpl::UnInit() {
MS_LOG(INFO) << " PluginImpl Uninit ";
reporter_ = nullptr;
return 0;
}
} // namespace ascend
} // namespace device
} // namespace mindspore

@ -1,45 +0,0 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_RUNTIME_DEVICE_ASCEND_PROFILING_PLUGIN_IMPL_H_
#define MINDSPORE_CCSRC_RUNTIME_DEVICE_ASCEND_PROFILING_PLUGIN_IMPL_H_
#include <string>
#include "./prof_engine.h"
using Msprof::Engine::PluginIntf;
using Msprof::Engine::Reporter;
using std::string;
namespace mindspore {
namespace device {
namespace ascend {
class PluginImpl : public PluginIntf {
public:
explicit PluginImpl(const std::string &module);
~PluginImpl() override = default;
int Init(const Reporter *reporter) override;
int UnInit() override;
static Reporter *GetPluginReporter() { return reporter_; }
private:
static Reporter *reporter_;
std::string module_;
};
} // namespace ascend
} // namespace device
} // namespace mindspore
#endif // MINDSPORE_CCSRC_RUNTIME_DEVICE_ASCEND_PROFILING_PLUGIN_IMPL_H_

@ -0,0 +1,93 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "runtime/device/ascend/profiling/profiling_callback_register.h"
#include "runtime/base.h"
namespace Analysis {
namespace Dvvp {
namespace ProfilerCommon {
extern int32_t MsprofilerInit();
} // namespace ProfilerCommon
} // namespace Dvvp
} // namespace Analysis
namespace {
constexpr Status PROF_SUCCESS = 0;
constexpr Status PROF_FAILED = 0xFFFFFFFF;
} // namespace
Status RegProfCtrlCallback(MsprofCtrlCallback func) {
if (VMCallbackRegister::GetInstance().registed()) {
return VMCallbackRegister::GetInstance().DoRegProfCtrlCallback(func);
} else {
return PROF_SUCCESS;
}
}
Status RegProfSetDeviceCallback(MsprofSetDeviceCallback func) {
if (VMCallbackRegister::GetInstance().registed()) {
return VMCallbackRegister::GetInstance().DoRegProfSetDeviceCallback(func);
} else {
return PROF_SUCCESS;
}
}
Status RegProfReporterCallback(MsprofReporterCallback func) {
if (VMCallbackRegister::GetInstance().registed()) {
return VMCallbackRegister::GetInstance().DoRegProfReporterCallback(func);
} else {
return PROF_SUCCESS;
}
}
Status ProfCommandHandle(ProfCommandHandleType type, void *data, uint32_t len) {
if (VMCallbackRegister::GetInstance().registed()) {
return VMCallbackRegister::GetInstance().DoProfCommandHandle(type, data, len);
} else {
return PROF_SUCCESS;
}
}
bool IsInitialize() { return true; }
VMCallbackRegister &VMCallbackRegister::GetInstance() {
static VMCallbackRegister instance;
return instance;
}
bool VMCallbackRegister::Registe(Status (*pRegProfCtrlCallback)(MsprofCtrlCallback),
Status (*pRegProfSetDeviceCallback)(MsprofSetDeviceCallback),
Status (*pRegProfReporterCallback)(MsprofReporterCallback),
Status (*pProfCommandHandle)(ProfCommandHandleType, void *, uint32_t)) {
if (!registed_) {
pRegProfCtrlCallback_ = pRegProfCtrlCallback;
pRegProfSetDeviceCallback_ = pRegProfSetDeviceCallback;
pRegProfReporterCallback_ = pRegProfReporterCallback;
pProfCommandHandle_ = pProfCommandHandle;
registed_ = true;
ForceMsprofilerInit();
return true;
}
return false;
}
void VMCallbackRegister::ForceMsprofilerInit() {
if (!ms_profile_inited_) {
Analysis::Dvvp::ProfilerCommon::MsprofilerInit();
ms_profile_inited_ = true;
}
}

@ -0,0 +1,82 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_RUNTIME_DEVICE_ASCEND_PROFILING_PROFILING_CALLBACK_REGISTER_H_
#define MINDSPORE_CCSRC_RUNTIME_DEVICE_ASCEND_PROFILING_PROFILING_CALLBACK_REGISTER_H_
#include "toolchain/prof_callback.h"
#define MAX_DEV_NUM (64)
using Status = uint32_t;
enum ProfCommandHandleType {
kProfCommandhandleInit = 0,
kProfCommandhandleStart,
kProfCommandhandleStop,
kProfCommandhandleFinalize,
kProfCommandhandleModelSubscribe,
kProfCommandhandleModelUnsubscribe
};
struct ProfCommandHandleData {
uint64_t profSwitch;
uint32_t devNums; // length of device id list
uint32_t devIdList[MAX_DEV_NUM];
uint32_t modelId;
};
Status RegProfCtrlCallback(MsprofCtrlCallback func);
Status RegProfSetDeviceCallback(MsprofSetDeviceCallback func);
Status RegProfReporterCallback(MsprofReporterCallback func);
Status ProfCommandHandle(ProfCommandHandleType type, void *data, uint32_t len);
bool IsInitialize();
class __attribute__((visibility("default"))) VMCallbackRegister {
public:
static VMCallbackRegister &GetInstance();
VMCallbackRegister(const VMCallbackRegister &) = delete;
VMCallbackRegister &operator=(const VMCallbackRegister &) = delete;
bool Registe(Status (*pRegProfCtrlCallback)(MsprofCtrlCallback),
Status (*pRegProfSetDeviceCallback)(MsprofSetDeviceCallback),
Status (*pRegProfReporterCallback)(MsprofReporterCallback),
Status (*pProfCommandHandle)(ProfCommandHandleType, void *, uint32_t));
void ForceMsprofilerInit();
bool registed() { return registed_; }
Status DoRegProfCtrlCallback(MsprofCtrlCallback func) { return pRegProfCtrlCallback_(func); }
Status DoRegProfSetDeviceCallback(MsprofSetDeviceCallback func) { return pRegProfSetDeviceCallback_(func); }
Status DoRegProfReporterCallback(MsprofReporterCallback func) { return pRegProfReporterCallback_(func); }
Status DoProfCommandHandle(ProfCommandHandleType type, void *data, uint32_t len) {
return pProfCommandHandle_(type, data, len);
}
private:
VMCallbackRegister()
: registed_(false),
ms_profile_inited_(false),
pRegProfCtrlCallback_(nullptr),
pRegProfSetDeviceCallback_(nullptr),
pRegProfReporterCallback_(nullptr),
pProfCommandHandle_(nullptr) {}
~VMCallbackRegister() = default;
bool registed_;
bool ms_profile_inited_;
Status (*pRegProfCtrlCallback_)(MsprofCtrlCallback);
Status (*pRegProfSetDeviceCallback_)(MsprofSetDeviceCallback);
Status (*pRegProfReporterCallback_)(MsprofReporterCallback);
Status (*pProfCommandHandle_)(ProfCommandHandleType, void *, uint32_t);
};
#endif // MINDSPORE_CCSRC_RUNTIME_DEVICE_ASCEND_PROFILING_PROFILING_CALLBACK_REGISTER_H_

@ -1,37 +0,0 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "runtime/device/ascend/profiling/profiling_engine_impl.h"
#include "utils/log_adapter.h"
#include "runtime/device/ascend/profiling/plugin_impl.h"
namespace mindspore {
namespace device {
namespace ascend {
PluginIntf *ProfilingEngineImpl::CreatePlugin() {
MS_LOG(INFO) << "Create Plugin.";
return new (std::nothrow) PluginImpl("Framework");
}
int ProfilingEngineImpl::ReleasePlugin(PluginIntf *plugin) {
if (plugin != nullptr) {
delete plugin;
plugin = nullptr;
}
return 0;
}
} // namespace ascend
} // namespace device
} // namespace mindspore

@ -1,39 +0,0 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_RUNTIME_DEVICE_ASCEND_PROFILING_PROFILING_ENGINE_IMPL_H_
#define MINDSPORE_CCSRC_RUNTIME_DEVICE_ASCEND_PROFILING_PROFILING_ENGINE_IMPL_H_
#include "./prof_engine.h"
using Msprof::Engine::EngineIntf;
using Msprof::Engine::PluginIntf;
namespace mindspore {
namespace device {
namespace ascend {
class ProfilingEngineImpl : public EngineIntf {
public:
ProfilingEngineImpl() = default;
~ProfilingEngineImpl() override = default;
PluginIntf *CreatePlugin() override;
int ReleasePlugin(PluginIntf *plugin) override;
};
} // namespace ascend
} // namespace device
} // namespace mindspore
#endif // MINDSPORE_CCSRC_RUNTIME_DEVICE_ASCEND_PROFILING_PROFILING_ENGINE_IMPL_H_

@ -23,13 +23,21 @@
#include <nlohmann/json.hpp>
#include "utils/contract.h"
#include "utils/ms_context.h"
#include "toolchain/prof_callback.h"
#include "runtime/device/ascend/profiling/profiling_callback_register.h"
using std::map;
using std::string;
using Status = uint32_t;
namespace mindspore {
namespace device {
namespace ascend {
class ProfilingEngineImpl;
struct MsprofCallback {
MsprofCtrlCallback msprofCtrlCallback;
MsprofSetDeviceCallback msprofSetDeviceCallback;
MsprofReporterCallback msprofReporterCallback;
};
class ProfilingManager {
public:
static ProfilingManager &GetInstance();
@ -43,17 +51,31 @@ class ProfilingManager {
MS_EXCEPTION_IF_NULL(context);
return context->get_param<bool>(MS_CTX_ENABLE_PROFILING);
}
Status PluginInit() const;
void PluginUnInit() const;
Status CallMsprofReport(NotNull<ReporterData *> reporter_data) const;
struct MsprofCallback &GetMsprofCallback() {
return prof_cb_;
}
void SetMsprofCtrlCallback(MsprofCtrlCallback func) { prof_cb_.msprofCtrlCallback = func; }
void SetMsprofReporterCallback(MsprofReporterCallback func) { prof_cb_.msprofReporterCallback = func; }
void SetMsprofSetDeviceCallback(MsprofSetDeviceCallback func) { prof_cb_.msprofSetDeviceCallback = func; }
Status GetProfConf(NotNull<MsprofGeOptions *> prof);
protected:
ProfilingManager();
~ProfilingManager() { prof_handle_ = nullptr; }
~ProfilingManager() {}
private:
bool ProfStartUp(const nlohmann::json &json);
std::shared_ptr<ProfilingEngineImpl> engine_0_;
bool ProfStartUp(NotNull<MsprofGeOptions *> prof_conf);
uint32_t device_id_;
void *prof_handle_;
MsprofCallback prof_cb_;
};
Status RegProfCtrlCallback(MsprofCtrlCallback func);
Status RegProfSetDeviceCallback(MsprofSetDeviceCallback func);
Status RegProfReporterCallback(MsprofReporterCallback func);
Status ProfCommandHandle(ProfCommandHandleType type, void *data, uint32_t len);
} // namespace ascend
} // namespace device
} // namespace mindspore

@ -24,14 +24,15 @@
#include "runtime/device/ascend/profiling/reporter/task_desc_reporter.h"
#include "utils/ms_context.h"
#include "runtime/device/ascend/profiling/reporter/point_reporter.h"
#include "nlohmann/json.hpp"
namespace mindspore {
namespace device {
namespace ascend {
constexpr uint32_t kMaxProfilingNodeNum = 100;
constexpr char kCustomNode[] = "PROFILING_CUSTOM_";
constexpr char kFpStartNode[] = "PROFILING_FP_START";
constexpr char kBpEndNode[] = "PROFILING_BP_END";
constexpr char kFpStartNode[] = "fp_point";
constexpr char kBpEndNode[] = "bp_point";
constexpr char kIterEndNode[] = "PROFILING_ITER_END";
// PROFILING_CUSTOM_LOGID_START 3
constexpr uint64_t kProfilingFpStartLogId = 1;
@ -42,14 +43,29 @@ std::map<uint32_t, std::vector<std::string>> ProfilingUtils::graph_kernel_name_;
std::map<uint32_t, std::vector<std::shared_ptr<ProfDesc>>> ProfilingUtils::graph_point_;
uint32_t ProfilingUtils::custom_node_index_ = 1;
nlohmann::json GetContextProfilingOption() {
auto context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(context);
const string prof_options_str = context->get_param<std::string>(MS_CTX_PROFILING_OPTIONS);
nlohmann::json j;
try {
j = nlohmann::json::parse(prof_options_str);
} catch (nlohmann::json::parse_error &e) {
MS_LOG(EXCEPTION) << "Parse profiling option json failed, error:" << e.what();
}
return j;
}
ProfilingTraceInfo ProfilingUtils::GetProfilingTraceFromEnv(NotNull<const session::KernelGraph *> graph_ptr) {
MS_LOG(INFO) << "get env start";
custom_node_index_ = 1;
auto &cnode_exec_order = graph_ptr->execution_order();
auto profiling_option = GetContextProfilingOption();
ProfilingTraceInfo profiling_trace;
profiling_trace.trace_begin = GetTraceBegin(cnode_exec_order);
profiling_trace.trace_bp_end = GetTraceBpEnd(cnode_exec_order);
profiling_trace.trace_netoutput = GetTraceNetoutput(cnode_exec_order);
profiling_trace.trace_begin = GetTraceBegin(cnode_exec_order, profiling_option);
profiling_trace.trace_bp_end = GetTraceBpEnd(cnode_exec_order, profiling_option);
profiling_trace.trace_netoutput = GetTraceNetoutput(cnode_exec_order, profiling_option);
for (uint32_t i = 1; i <= kMaxProfilingNodeNum; ++i) {
std::string env_str = std::string(kCustomNode) + std::to_string(i);
@ -80,10 +96,14 @@ void ProfilingUtils::GetTraceHccl(const std::vector<CNodePtr> &cnode_exec_order,
}
}
std::string ProfilingUtils::GetTraceBegin(const std::vector<CNodePtr> &cnode_exec_order) {
const char *trace_begin = std::getenv(kFpStartNode);
if (trace_begin != nullptr) {
return std::string(trace_begin);
std::string ProfilingUtils::GetTraceBegin(const std::vector<CNodePtr> &cnode_exec_order, const nlohmann::json &option) {
auto iter = option.find(kFpStartNode);
if (iter != option.end() && iter->is_string()) {
std::string trace_begin_str = *iter;
if (!trace_begin_str.empty()) {
MS_LOG(INFO) << "Get fp_point from profiling_option:" << trace_begin_str;
return trace_begin_str;
}
}
std::string fp_start_str;
@ -124,12 +144,16 @@ void ProfilingUtils::GetCNodeOutputRealNode(const std::string &node_name, const
}
}
std::string ProfilingUtils::GetTraceBpEnd(const std::vector<CNodePtr> &cnode_exec_order) {
const char *trace_bp_end = std::getenv(kBpEndNode);
if (trace_bp_end != nullptr) {
return std::string(trace_bp_end);
std::string ProfilingUtils::GetTraceBpEnd(const std::vector<CNodePtr> &cnode_exec_order, const nlohmann::json &option) {
auto bp_point = option.find(kBpEndNode);
if (bp_point != option.end() && bp_point->is_string()) {
std::string bp_point_str = *bp_point;
if (!bp_point_str.empty()) {
MS_LOG(INFO) << "Get bp_point from profiling_option:" << bp_point_str;
return bp_point_str;
}
}
std::string bp_end_str;
// Contain hccl kernel
auto iter = cnode_exec_order.rbegin();
@ -179,9 +203,17 @@ std::string ProfilingUtils::GetGraphLastTbeKernelName(const std::vector<CNodePtr
return last_tbe_kernel_name;
}
std::string ProfilingUtils::GetTraceNetoutput(const std::vector<CNodePtr> &cnode_exec_order) {
const char *trace_netoutput = std::getenv(kIterEndNode);
return trace_netoutput == nullptr ? GetGraphLastTbeKernelName(cnode_exec_order) : std::string(trace_netoutput);
std::string ProfilingUtils::GetTraceNetoutput(const std::vector<CNodePtr> &cnode_exec_order,
const nlohmann::json &option) {
auto iter_end = option.find(kIterEndNode);
if (iter_end != option.end() && iter_end->is_string()) {
std::string iter_end_str = *iter_end;
if (!iter_end_str.empty()) {
MS_LOG(INFO) << "Get iter_end from profiling_option:" << iter_end_str;
return iter_end_str;
}
}
return GetGraphLastTbeKernelName(cnode_exec_order);
}
NotNull<CNodePtr> ProfilingUtils::CreateProfilingCNode(const ProfilingContent &profiling_content,

@ -118,9 +118,9 @@ class ProfilingUtils {
NotNull<session::KernelGraph *> graph_ptr);
static CNodePtr CreateProfilingCNodeWithStream(const AnfNodePtr &anf_node, const ProfilingContent &profiling_content,
NotNull<session::KernelGraph *> graph_ptr);
static std::string GetTraceBegin(const std::vector<CNodePtr> &cnode_exec_order);
static std::string GetTraceBpEnd(const std::vector<CNodePtr> &cnode_exec_order);
static std::string GetTraceNetoutput(const std::vector<CNodePtr> &cnode_exec_order);
static std::string GetTraceBegin(const std::vector<CNodePtr> &cnode_exec_order, const nlohmann::json &option);
static std::string GetTraceBpEnd(const std::vector<CNodePtr> &cnode_exec_order, const nlohmann::json &option);
static std::string GetTraceNetoutput(const std::vector<CNodePtr> &cnode_exec_order, const nlohmann::json &option);
static std::string GetGraphLastTbeKernelName(const std::vector<CNodePtr> &cnode_exec_order);
static void GetTraceHccl(const std::vector<CNodePtr> &cnode_exec_order,
NotNull<ProfilingTraceInfo *> profiling_trace);

@ -16,7 +16,7 @@
#include <algorithm>
#include "runtime/device/ascend/profiling/reporter/desc_reporter.h"
#include "runtime/device/ascend/profiling/plugin_impl.h"
#include "runtime/device/ascend/profiling/profiling_manager.h"
#include "utils/log_adapter.h"
constexpr size_t kReportMaxLen = 2048;
@ -27,16 +27,13 @@ namespace ascend {
DescReporter::~DescReporter() = default;
void DescReporter::ReportByLine(const std::string &data, const std::string &file_name) const {
auto reporter = PluginImpl::GetPluginReporter();
MS_EXCEPTION_IF_NULL(reporter);
auto tot_size = data.size();
size_t cur_size = 0;
while (cur_size < tot_size) {
size_t remain_size = tot_size - cur_size;
size_t report_size = std::min(remain_size, kReportMaxLen);
Msprof::Engine::ReporterData report_data{};
ReporterData report_data{};
report_data.deviceId = device_id_;
report_data.dataLen = report_size;
report_data.data = (unsigned char *)data.c_str() + cur_size;
@ -44,7 +41,7 @@ void DescReporter::ReportByLine(const std::string &data, const std::string &file
if (ret != 0) {
MS_LOG(EXCEPTION) << "Memcpy_s report data tag failed";
}
auto report_ret = reporter->Report(&report_data);
auto report_ret = ProfilingManager::GetInstance().CallMsprofReport(NOT_NULL(&report_data));
if (report_ret != 0) {
MS_LOG(EXCEPTION) << "Report data failed";
}

@ -515,6 +515,10 @@ CNodePtr KernelAdjust::CreateStreamAssignAddnOP(const std::shared_ptr<session::K
selected_kernel_builder.SetKernelType(KernelType::TBE_KERNEL);
MS_EXCEPTION_IF_NULL(switch_loop_input.at(kCurLoopCountParamName));
assign_add_one->set_abstract(switch_loop_input.at(kCurLoopCountParamName)->abstract());
// add AssignAdd op to kernel ref node map
session::AnfWithOutIndex final_pair = std::make_pair(assign_add_one, 0);
session::KernelWithIndex kernel_with_index = AnfAlgo::VisitKernel(AnfAlgo::GetInputNode(assign_add_one, 0), 0);
kernel_graph_ptr->AddRefCorrespondPairs(final_pair, kernel_with_index);
return assign_add_one;
}

@ -99,6 +99,8 @@ class KernelRuntime {
#endif
}
virtual void PreInit() {}
protected:
virtual DeviceAddressPtr CreateDeviceAddress(void *device_ptr, size_t device_size, const string &format,
TypeId type_id) = 0;

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save