!6342 fix ci code warning

Merge pull request !6342 from jjfeing/master
pull/6342/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit b3c7515ba0

@ -30,7 +30,6 @@
namespace mindspore { namespace mindspore {
#define MS_API __attribute__((visibility("default"))) #define MS_API __attribute__((visibility("default")))
namespace inference { namespace inference {
enum DataType { enum DataType {
kMSI_Unknown = 0, kMSI_Unknown = 0,
kMSI_Bool = 1, kMSI_Bool = 1,
@ -209,7 +208,6 @@ class VectorInferTensorWrapRequest : public RequestBase {
} }
const std::vector<InferTensor> &tensor_list_; const std::vector<InferTensor> &tensor_list_;
}; };
} // namespace inference } // namespace inference
} // namespace mindspore } // namespace mindspore
#endif // MINDSPORE_INCLUDE_INFER_TENSOR_H_ #endif // MINDSPORE_INCLUDE_INFER_TENSOR_H_

@ -25,7 +25,6 @@
namespace mindspore { namespace mindspore {
namespace inference { namespace inference {
enum StatusCode { SUCCESS = 0, FAILED, INVALID_INPUTS }; enum StatusCode { SUCCESS = 0, FAILED, INVALID_INPUTS };
class Status { class Status {

@ -57,7 +57,6 @@ CNodePtr Insert(const FuncGraphPtr &func_graph, const CNodePtr &cnode, const std
} else { } else {
new_node = kernel_graph->NewCNode(cnode); new_node = kernel_graph->NewCNode(cnode);
} }
} else if (op_name == kBasicLSTMCellWeightGradOpName) { } else if (op_name == kBasicLSTMCellWeightGradOpName) {
std::vector<AnfNodePtr> make_tuple_inputs = {NewValueNode(prim::kPrimMakeTuple)}; std::vector<AnfNodePtr> make_tuple_inputs = {NewValueNode(prim::kPrimMakeTuple)};
size_t out_num = AnfAlgo::GetOutputTensorNum(cnode); size_t out_num = AnfAlgo::GetOutputTensorNum(cnode);

@ -208,7 +208,6 @@ void AscendControlParser::LinkGraph(NotNull<KernelGraphPtr> kg) {
memo.clear(); memo.clear();
// assign label resource // assign label resource
device::ascend::AscendLabelAssign::GetInstance().AssignLabel(kg); device::ascend::AscendLabelAssign::GetInstance().AssignLabel(kg);
// AttachChildGraphToReturnNode(kg, NOT_NULL(&memo));
} }
void AscendControlParser::EraseParameter(NotNull<KernelGraphPtr> root_graph, void AscendControlParser::EraseParameter(NotNull<KernelGraphPtr> root_graph,

@ -68,7 +68,6 @@ void E2eDumpUtil::DumpGPUMemToFile(const std::string &file_path, const std::stri
TensorLoader *tensor_loader = debug_services->tensor_loader(); TensorLoader *tensor_loader = debug_services->tensor_loader();
auto ret = tensor_loader->DumpTensorToFile(original_kernel_name, trans_flag, file_path, format, int_shapes, type, auto ret = tensor_loader->DumpTensorToFile(original_kernel_name, trans_flag, file_path, format, int_shapes, type,
addr->type_id(), addr->format(), slot); addr->type_id(), addr->format(), slot);
if (!ret) { if (!ret) {
MS_LOG(ERROR) << "DumpTensorToFile Failed: flag:" << std::to_string(trans_flag) << ", path:" << file_path MS_LOG(ERROR) << "DumpTensorToFile Failed: flag:" << std::to_string(trans_flag) << ", path:" << file_path
<< ", host_format:" << format; << ", host_format:" << format;

@ -29,7 +29,6 @@ void AscendMemoryManager::MallocDeviceMemory() {
auto context_mem = GetDeviceMemSizeFromContext(); auto context_mem = GetDeviceMemSizeFromContext();
device_mem_size_ = context_mem == 0 ? kAscendDeviceMemSize : context_mem; device_mem_size_ = context_mem == 0 ? kAscendDeviceMemSize : context_mem;
auto ret = rtMalloc(reinterpret_cast<void **>(&device_mem_base_), device_mem_size_, RT_MEMORY_HBM); auto ret = rtMalloc(reinterpret_cast<void **>(&device_mem_base_), device_mem_size_, RT_MEMORY_HBM);
if (ret != RT_ERROR_NONE) { if (ret != RT_ERROR_NONE) {
MS_EXCEPTION(DeviceProcessError) << "rtMalloc mem size[" << device_mem_size_ << "] fail, ret[" << ret << "]"; MS_EXCEPTION(DeviceProcessError) << "rtMalloc mem size[" << device_mem_size_ << "] fail, ret[" << ret << "]";
} }

@ -542,7 +542,6 @@ void AscendStreamAssign::InsertStreamActiveForCommon(const NotNull<KernelGraphPt
if (AnfAlgo::GetCNodeName(cur_cnode_ptr) == kStreamSwitchOpName) { if (AnfAlgo::GetCNodeName(cur_cnode_ptr) == kStreamSwitchOpName) {
MS_LOG(INFO) << "Insert StreamActive op after FP StreamSwitch for stream parallel"; MS_LOG(INFO) << "Insert StreamActive op after FP StreamSwitch for stream parallel";
// UpdateStreamSwitch(graph_ptr, cur_cnode_ptr, &update_cnode_list);
update_cnode_list.emplace_back(cur_cnode_ptr); update_cnode_list.emplace_back(cur_cnode_ptr);
} else { } else {
update_cnode_list.emplace_back(cur_cnode_ptr); update_cnode_list.emplace_back(cur_cnode_ptr);

@ -57,9 +57,6 @@ constexpr const char *kOpTypeOpDebug = "Opdebug";
namespace mindspore { namespace mindspore {
namespace device { namespace device {
namespace ascend { namespace ascend {
static void DumpKernelOutput(const CNodePtr &kernel, void *args, NotNull<aicpu::dump::Task *> task);
static void DumpKernelInput(const CNodePtr &kernel, void *args, NotNull<aicpu::dump::Task *> task);
static void RtLoadDumpData(const aicpu::dump::OpMappingInfo &dump_info, void **ptr);
DataDumper::~DataDumper() { DataDumper::~DataDumper() {
ReleaseDevMem(&dev_load_mem_); ReleaseDevMem(&dev_load_mem_);
@ -328,7 +325,7 @@ void DataDumper::OpDebugUnregister() {
} }
} }
void RtLoadDumpData(const aicpu::dump::OpMappingInfo &dump_info, void **ptr) { void DataDumper::RtLoadDumpData(const aicpu::dump::OpMappingInfo &dump_info, void **ptr) {
std::string proto_str; std::string proto_str;
size_t proto_size = dump_info.ByteSizeLong(); size_t proto_size = dump_info.ByteSizeLong();
bool ret = dump_info.SerializeToString(&proto_str); bool ret = dump_info.SerializeToString(&proto_str);
@ -357,7 +354,7 @@ void RtLoadDumpData(const aicpu::dump::OpMappingInfo &dump_info, void **ptr) {
} }
} }
void DumpKernelOutput(const CNodePtr &kernel, void *args, NotNull<aicpu::dump::Task *> task) { void DataDumper::DumpKernelOutput(const CNodePtr &kernel, void *args, NotNull<aicpu::dump::Task *> task) {
if (!DumpJsonParser::GetInstance().OutputNeedDump()) { if (!DumpJsonParser::GetInstance().OutputNeedDump()) {
MS_LOG(INFO) << "Skip dump output"; MS_LOG(INFO) << "Skip dump output";
return; return;
@ -391,7 +388,7 @@ void DumpKernelOutput(const CNodePtr &kernel, void *args, NotNull<aicpu::dump::T
} }
} }
void DumpKernelInput(const CNodePtr &kernel, void *args, NotNull<aicpu::dump::Task *> task) { void DataDumper::DumpKernelInput(const CNodePtr &kernel, void *args, NotNull<aicpu::dump::Task *> task) {
if (!DumpJsonParser::GetInstance().InputNeedDump()) { if (!DumpJsonParser::GetInstance().InputNeedDump()) {
MS_LOG(INFO) << "Skip dump input"; MS_LOG(INFO) << "Skip dump input";
return; return;

@ -65,6 +65,9 @@ class DataDumper {
void SetOpDebugMappingInfo(const NotNull<aicpu::dump::OpMappingInfo *> dump_info) const; void SetOpDebugMappingInfo(const NotNull<aicpu::dump::OpMappingInfo *> dump_info) const;
void ConstructDumpTask(NotNull<const CNodePtr &> kernel, NotNull<aicpu::dump::Task *> dump_task) const; void ConstructDumpTask(NotNull<const CNodePtr &> kernel, NotNull<aicpu::dump::Task *> dump_task) const;
void GetNeedDumpKernelList(NotNull<std::map<std::string, CNodePtr> *> kernel_map) const; void GetNeedDumpKernelList(NotNull<std::map<std::string, CNodePtr> *> kernel_map) const;
static void DumpKernelOutput(const CNodePtr &kernel, void *args, NotNull<aicpu::dump::Task *> task);
static void DumpKernelInput(const CNodePtr &kernel, void *args, NotNull<aicpu::dump::Task *> task);
static void RtLoadDumpData(const aicpu::dump::OpMappingInfo &dump_info, void **ptr);
std::function<void *()> model_handle_; std::function<void *()> model_handle_;
uint32_t debug_task_id_; uint32_t debug_task_id_;

@ -541,7 +541,6 @@ KernelSelectStatus SelectKernelInfo(const CNodePtr &kernel_node, KernelType kern
kernel::KernelQuery(kernel_node, &kernel_info_list, kernel_type); kernel::KernelQuery(kernel_node, &kernel_info_list, kernel_type);
auto select_status = SetMatchedKernelInfo(kernel_node, kernel_info_list); auto select_status = SetMatchedKernelInfo(kernel_node, kernel_info_list);
// If aicore not find valid kernel info reloading aicpu kernel info list to find it // If aicore not find valid kernel info reloading aicpu kernel info list to find it
if (select_status == kNoMatched) { if (select_status == kNoMatched) {
MS_LOG(WARNING) << "The node [" << kernel_node->DebugString() MS_LOG(WARNING) << "The node [" << kernel_node->DebugString()
<< "] cannot find valid TBE kernel info, try to get aicpu kernel info"; << "] cannot find valid TBE kernel info, try to get aicpu kernel info";

@ -33,7 +33,7 @@ inline void *GetMPIAdapterHandle() {
return handle; return handle;
} }
inline void *GetMPIAdapterFunc(const char *name) { void *GetMPIAdapterFunc(const char *name) {
static void *handle = GetMPIAdapterHandle(); static void *handle = GetMPIAdapterHandle();
if (handle == nullptr) { if (handle == nullptr) {
MS_LOG(EXCEPTION) << "Load lib " << name << " failed, make sure you have installed it!"; MS_LOG(EXCEPTION) << "Load lib " << name << " failed, make sure you have installed it!";

@ -28,7 +28,6 @@ DvppProcess::DvppProcess() {}
DvppProcess::~DvppProcess() {} DvppProcess::~DvppProcess() {}
static uint32_t ToEven(uint32_t num) { return (num + 1) / 2 * 2; } static uint32_t ToEven(uint32_t num) { return (num + 1) / 2 * 2; }
static uint32_t ToOdd(uint32_t num) { static uint32_t ToOdd(uint32_t num) {
if (num == 0) { if (num == 0) {
return 1; return 1;

@ -13,13 +13,13 @@
# limitations under the License. # limitations under the License.
# ============================================================================ # ============================================================================
import pytest
import numpy as np
import mindspore as ms import mindspore as ms
from mindspore.nn import ReLU from mindspore.nn import ReLU
from mindspore.nn import Cell from mindspore.nn import Cell
from mindspore.common.tensor import Tensor from mindspore.common.tensor import Tensor
from mindspore.ops import operations as P from mindspore.ops import operations as P
import numpy as np
import pytest
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training

Loading…
Cancel
Save