pull/14460/head
hwjiaorui 4 years ago
parent 3c30a9feed
commit dac67cbabb

@ -89,7 +89,6 @@ class MatrixInverseGpuKernel : public GpuKernel {
kernel_node_ = kernel_node; kernel_node_ = kernel_node;
handle_ = device::gpu::GPUDeviceManager::GetInstance().GetCublasHandle(); handle_ = device::gpu::GPUDeviceManager::GetInstance().GetCublasHandle();
auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
if (input_shape.empty() || input_shape.size() < 2) { if (input_shape.empty() || input_shape.size() < 2) {
MS_LOG(EXCEPTION) << "The dim entered needs to be greater than 2, but " << input_shape.size() << " was taken"; MS_LOG(EXCEPTION) << "The dim entered needs to be greater than 2, but " << input_shape.size() << " was taken";
} }

@ -33,7 +33,6 @@ bool HcomAllBroadCastKernel::Launch(const std::vector<AddressPtr> &inputs,
MS_EXCEPTION_IF_NULL(stream_ptr); MS_EXCEPTION_IF_NULL(stream_ptr);
auto hccl_result = HcclBroadcast(inputs[0]->addr, hccl_count_, hccl_data_type_list_[0], root_id_, auto hccl_result = HcclBroadcast(inputs[0]->addr, hccl_count_, hccl_data_type_list_[0], root_id_,
HcclContext::GetInstance().hccl_comm(), stream_ptr); HcclContext::GetInstance().hccl_comm(), stream_ptr);
if (hccl_result != HCCL_SUCCESS) { if (hccl_result != HCCL_SUCCESS) {
MS_LOG(ERROR) << "HcomBroadcastOp : hcom_broadcast fail, return: " << hccl_result; MS_LOG(ERROR) << "HcomBroadcastOp : hcom_broadcast fail, return: " << hccl_result;
return false; return false;

@ -33,7 +33,6 @@ using std::vector;
namespace mindspore { namespace mindspore {
namespace somas { namespace somas {
Status SomasSolverCore::MemoryAllocationSolver() { Status SomasSolverCore::MemoryAllocationSolver() {
auto start = std::chrono::system_clock::now(); auto start = std::chrono::system_clock::now();
Status retval = SUCCESS; Status retval = SUCCESS;

@ -27,7 +27,6 @@
namespace mindspore { namespace mindspore {
namespace somas { namespace somas {
constexpr auto kSolNumThresholdMultiThread = 8; constexpr auto kSolNumThresholdMultiThread = 8;
Status SomasSolverPre::checkTensors(TensorsDescMap *pTensors, uint32_t index1, uint32_t index2) { Status SomasSolverPre::checkTensors(TensorsDescMap *pTensors, uint32_t index1, uint32_t index2) {
auto &tensors = *pTensors; auto &tensors = *pTensors;

@ -212,6 +212,5 @@ std::string AscendInferenceSession::InputsInfo(const std::vector<ParameterPtr> &
} }
return graph + " " + actual; return graph + " " + actual;
} }
} // namespace session } // namespace session
} // namespace mindspore } // namespace mindspore

@ -63,7 +63,6 @@ bool IsSameServer(const std::vector<uint32_t> &rank_ids) {
string DoGetHcomGroup(const string &original_group) { string DoGetHcomGroup(const string &original_group) {
string communi_parallel_mode = parallel::ParallelContext::GetInstance()->communi_parallel_mode(); string communi_parallel_mode = parallel::ParallelContext::GetInstance()->communi_parallel_mode();
if (communi_parallel_mode == parallel::ALL_GROUP_PARALLEL) { if (communi_parallel_mode == parallel::ALL_GROUP_PARALLEL) {
return original_group; return original_group;
} }

@ -112,7 +112,6 @@ void ProfilingManager::PluginUnInit() const {
Status ProfilingManager::GetProfConf(const NotNull<MsprofGeOptions *> prof) { Status ProfilingManager::GetProfConf(const NotNull<MsprofGeOptions *> prof) {
string job_id = std::to_string(GetJobId()); string job_id = std::to_string(GetJobId());
if (memcpy_s(prof->jobId, sizeof(prof->jobId), job_id.c_str(), strlen(job_id.c_str())) != EOK) { if (memcpy_s(prof->jobId, sizeof(prof->jobId), job_id.c_str(), strlen(job_id.c_str())) != EOK) {
MS_LOG(ERROR) << "Copy job_id failed."; MS_LOG(ERROR) << "Copy job_id failed.";
return PROF_FAILED; return PROF_FAILED;

@ -21,7 +21,6 @@
namespace mindspore { namespace mindspore {
namespace device { namespace device {
namespace cpu { namespace cpu {
uint8_t *CPUMemoryManager::MallocStaticMem(size_t size, bool, uint32_t) { uint8_t *CPUMemoryManager::MallocStaticMem(size_t size, bool, uint32_t) {
void *ptr = malloc(size); void *ptr = malloc(size);
if (ptr != nullptr) { if (ptr != nullptr) {

@ -27,7 +27,6 @@
namespace mindspore { namespace mindspore {
namespace device { namespace device {
class DynamicKernel { class DynamicKernel {
public: public:
DynamicKernel(void *stream, const CNodePtr &cnode_ptr) DynamicKernel(void *stream, const CNodePtr &cnode_ptr)

@ -1268,7 +1268,6 @@ session::KernelWithIndex GPUKernelRuntime::GetPrevNodeOutput(const AnfNodePtr &n
return addr_iter->second[i]; return addr_iter->second[i];
} }
} // namespace gpu } // namespace gpu
} // namespace device } // namespace device
} // namespace mindspore } // namespace mindspore

Loading…
Cancel
Save