parent
dd9a5a385a
commit
fb343bd607
@ -0,0 +1,65 @@
|
||||
/**
|
||||
* Copyright 2019 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "device/ascend/ascend_memory_manager.h"
|
||||
#include "device/ascend/ascend_memory_allocator.h"
|
||||
#include "utils/context/ms_context.h"
|
||||
#include "runtime/mem.h"
|
||||
namespace mindspore {
|
||||
namespace device {
|
||||
namespace ascend {
|
||||
static const uint64_t ASCEND_MEM_SIZE = 20;
|
||||
static const uint64_t ASCEND_MEM_SIZE_BYTE = (ASCEND_MEM_SIZE << 30);
|
||||
|
||||
void AscendMemoryManager::MallocDeviceMemory() {
|
||||
device_mem_size_ = ASCEND_MEM_SIZE_BYTE;
|
||||
static_mem_offset_ = FloatToSize(device_mem_size_ * GRAPH_INIT_ASCEND_MEM_RATIO);
|
||||
auto ret = rtMalloc(reinterpret_cast<void **>(&device_mem_base_), static_mem_offset_, RT_MEMORY_HBM);
|
||||
if (ret != RT_ERROR_NONE) {
|
||||
MS_EXCEPTION(DeviceProcessError) << "rtMalloc mem size[" << static_mem_offset_ << "] fail, ret[" << ret << "]";
|
||||
}
|
||||
device_mem_pool_size_ = FloatToSize(device_mem_size_ * (1 - GRAPH_INIT_ASCEND_MEM_RATIO));
|
||||
ret = rtMalloc(reinterpret_cast<void **>(&device_mem_pool_base_), device_mem_pool_size_, RT_MEMORY_HBM);
|
||||
if (ret != RT_ERROR_NONE) {
|
||||
MS_EXCEPTION(DeviceProcessError) << "rtMalloc mem size[" << device_mem_pool_size_ << "] fail, ret[" << ret << "]";
|
||||
}
|
||||
AscendMemoryAllocator::GetInstance().set_device_mem_pool_base(device_mem_pool_base_);
|
||||
AscendMemoryAllocator::GetInstance().set_device_mem_pool_size(device_mem_pool_size_);
|
||||
}
|
||||
|
||||
void AscendMemoryManager::FreeDeviceMemory() {
|
||||
if (device_mem_base_ != nullptr) {
|
||||
auto ret = rtFree(device_mem_base_);
|
||||
if (ret != RT_ERROR_NONE) {
|
||||
MS_LOG(ERROR) << "rtFree mem size[" << device_mem_size_ << "] fail, ret[" << ret << "]";
|
||||
}
|
||||
device_mem_base_ = nullptr;
|
||||
}
|
||||
if (device_mem_pool_base_ != nullptr) {
|
||||
auto ret = rtFree(device_mem_pool_base_);
|
||||
if (ret != RT_ERROR_NONE) {
|
||||
MS_LOG(ERROR) << "rtFree mem size[" << device_mem_pool_size_ << "] fail, ret[" << ret << "]";
|
||||
}
|
||||
device_mem_pool_base_ = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void *AscendMemoryManager::AllocTensorMemDynamic(size_t size) {
|
||||
return AscendMemoryAllocator::GetInstance().AllocTensorMem(size);
|
||||
}
|
||||
} // namespace ascend
|
||||
} // namespace device
|
||||
} // namespace mindspore
|
@ -0,0 +1,35 @@
|
||||
/**
|
||||
* Copyright 2019 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_MINDSPORE_CCSRC_DEVICE_ASCEND_ASCEND_MEMORY_MANAGER_H_
|
||||
#define MINDSPORE_MINDSPORE_CCSRC_DEVICE_ASCEND_ASCEND_MEMORY_MANAGER_H_
|
||||
#include "device/memory_manager.h"
|
||||
namespace mindspore {
|
||||
namespace device {
|
||||
namespace ascend {
|
||||
class AscendMemoryManager : public MemoryManager {
|
||||
public:
|
||||
AscendMemoryManager() = default;
|
||||
virtual ~AscendMemoryManager() = default;
|
||||
|
||||
void MallocDeviceMemory() override;
|
||||
void FreeDeviceMemory() override;
|
||||
void *AllocTensorMemDynamic(size_t size) override;
|
||||
};
|
||||
} // namespace ascend
|
||||
} // namespace device
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_MINDSPORE_CCSRC_DEVICE_ASCEND_ASCEND_MEMORY_MANAGER_H_
|
@ -0,0 +1,88 @@
|
||||
/**
|
||||
* Copyright 2019 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "device/gpu/gpu_memory_manager.h"
|
||||
#include "device/gpu/gpu_memory_allocator.h"
|
||||
#include "utils/context/ms_context.h"
|
||||
#include "utils/convert_utils.h"
|
||||
namespace mindspore {
|
||||
namespace device {
|
||||
namespace gpu {
|
||||
void *GPUMemoryManager::AllocTensorMemDynamic(size_t size) {
|
||||
return GPUMemoryAllocator::GetInstance().AllocTensorMem(size);
|
||||
}
|
||||
|
||||
void GPUMemoryManager::FreeTensorMemDynamic(void *device_ptr) {
|
||||
GPUMemoryAllocator::GetInstance().FreeTensorMem(device_ptr);
|
||||
}
|
||||
|
||||
void GPUMemoryManager::MallocDeviceMemory() {
|
||||
auto context_ptr = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(context_ptr);
|
||||
// If use the dynamic memory pool, then alloc the first memory block to init.
|
||||
if (context_ptr->enable_dynamic_mem_pool()) {
|
||||
auto device_addr = AllocTensorMemDynamic(1);
|
||||
if (!device_addr) {
|
||||
MS_LOG(ERROR) << "Dynamic memory pool init error.";
|
||||
}
|
||||
} else {
|
||||
// Need to reserve 20% space for dynamic memory
|
||||
const float init_gpu_mem_ratio = 0.8;
|
||||
size_t mem_size = FloatToSize(GPUMemoryAllocator::GetInstance().free_mem_size() * init_gpu_mem_ratio);
|
||||
auto alloc_size =
|
||||
GPUMemoryAllocator::GetInstance().AllocDeviceMem(mem_size, reinterpret_cast<void **>(&device_mem_base_));
|
||||
device_mem_size_ = alloc_size;
|
||||
static_mem_offset_ = device_mem_size_;
|
||||
}
|
||||
}
|
||||
|
||||
void GPUMemoryManager::FreeDeviceMemory() {
|
||||
if (device_mem_base_ != nullptr) {
|
||||
if (!GPUMemoryAllocator::GetInstance().FreeDeviceMem(device_mem_base_)) {
|
||||
MS_LOG(EXCEPTION) << "Could not free gpu device memory.";
|
||||
}
|
||||
}
|
||||
GPUMemoryAllocator::GetInstance().ReleaseDeviceRes();
|
||||
}
|
||||
|
||||
uint8_t *GPUMemoryManager::MallocStaticMem(size_t size, bool) {
|
||||
auto context_ptr = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(context_ptr);
|
||||
if (context_ptr->enable_dynamic_mem_pool()) {
|
||||
auto device_ptr = AllocTensorMemDynamic(size);
|
||||
MS_EXCEPTION_IF_NULL(device_ptr);
|
||||
return AddressOffset(device_ptr, 0);
|
||||
}
|
||||
|
||||
auto align_size = GetCommonAlignSize(size);
|
||||
if (static_mem_offset_ < align_size) {
|
||||
MS_LOG(EXCEPTION) << "Out of memory!!! total[" << device_mem_size_ << "](dynamic[" << total_dynamic_size_
|
||||
<< "] static[" << total_static_size_ << "])"
|
||||
<< " malloc [" << align_size << "] failed!";
|
||||
}
|
||||
auto offset = static_mem_offset_ - align_size;
|
||||
if (dynamic_mem_offset_ > offset) {
|
||||
MS_LOG(EXCEPTION) << "Out of memory!!! total[" << device_mem_size_ << "](dynamic[" << total_dynamic_size_
|
||||
<< "] static[" << total_static_size_ << "])"
|
||||
<< " malloc [" << align_size << "] failed!";
|
||||
}
|
||||
total_static_size_ += align_size;
|
||||
static_mem_offset_ = offset;
|
||||
return device_mem_base_ + offset;
|
||||
}
|
||||
} // namespace gpu
|
||||
} // namespace device
|
||||
} // namespace mindspore
|
@ -0,0 +1,40 @@
|
||||
/**
|
||||
* Copyright 2019 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_MINDSPORE_CCSRC_DEVICE_GPU_GPU_MEMORY_MANAGER_H_
|
||||
#define MINDSPORE_MINDSPORE_CCSRC_DEVICE_GPU_GPU_MEMORY_MANAGER_H_
|
||||
#include "device/memory_manager.h"
|
||||
namespace mindspore {
|
||||
namespace device {
|
||||
namespace gpu {
|
||||
class GPUMemoryManager : public MemoryManager {
|
||||
public:
|
||||
GPUMemoryManager() = default;
|
||||
virtual ~GPUMemoryManager() = default;
|
||||
|
||||
void MallocDeviceMemory() override;
|
||||
void FreeDeviceMemory() override;
|
||||
|
||||
void *AllocTensorMemDynamic(size_t size) override;
|
||||
void FreeTensorMemDynamic(void *device_ptr) override;
|
||||
|
||||
protected:
|
||||
uint8_t *MallocStaticMem(size_t size, bool communication_mem);
|
||||
};
|
||||
} // namespace gpu
|
||||
} // namespace device
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_MINDSPORE_CCSRC_DEVICE_GPU_GPU_MEMORY_MANAGER_H_
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,170 @@
|
||||
/**
|
||||
* Copyright 2019 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "device/memory_manager.h"
|
||||
#include "session/anf_runtime_algorithm.h"
|
||||
#include "utils/context/ms_context.h"
|
||||
using mindspore::memreuse::BestFitMemReuse;
|
||||
using mindspore::memreuse::MemReuseUtilPtr;
|
||||
namespace mindspore {
|
||||
namespace device {
|
||||
MemoryManager::~MemoryManager() {
|
||||
device_mem_base_ = nullptr;
|
||||
device_mem_pool_base_ = nullptr;
|
||||
mem_reuse_util_ptr_ = nullptr;
|
||||
}
|
||||
|
||||
size_t MemoryManager::GetCommonAlignSize(size_t input_size) const {
|
||||
return (input_size + kMemAlignSize + 31) / kMemAlignSize * kMemAlignSize;
|
||||
}
|
||||
|
||||
size_t MemoryManager::GetCommunicationAlignSize(size_t input_size) const {
|
||||
return (input_size + kMemAlignSize - 1) / kMemAlignSize * kMemAlignSize + 2 * kMemAlignSize;
|
||||
}
|
||||
|
||||
void MemoryManager::InitReuseDynamicMemory(session::KernelGraph *graph) {
|
||||
MS_EXCEPTION_IF_NULL(graph);
|
||||
MemReuseUtilPtr mem_reuse_util_ptr = std::make_shared<memreuse::MemReuseUtil>();
|
||||
MS_EXCEPTION_IF_NULL(mem_reuse_util_ptr);
|
||||
// set all infos
|
||||
mem_reuse_util_ptr->SetAllInfo(graph);
|
||||
auto bestfit_mem_reuse = std::make_shared<BestFitMemReuse>();
|
||||
MS_EXCEPTION_IF_NULL(bestfit_mem_reuse);
|
||||
bestfit_mem_reuse->Reuse(mem_reuse_util_ptr.get());
|
||||
size_t total_allocated_size = bestfit_mem_reuse->GetAllocatedSize();
|
||||
MS_LOG(INFO) << "TotalReuseDynamicSize [" << total_allocated_size << "]";
|
||||
mem_reuse_util_ptr_ = mem_reuse_util_ptr;
|
||||
auto base_ptr = MallocDynamicMem(total_allocated_size, false);
|
||||
mem_reuse_util_ptr_->set_mem_base(base_ptr);
|
||||
}
|
||||
|
||||
uint8_t *MemoryManager::MallocOutputMem(const AnfNodePtr &node, size_t index, int flag, size_t size) {
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
auto context_ptr = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(context_ptr);
|
||||
uint8_t *ptr = nullptr;
|
||||
if (AnfAlgo::IsCommunicationOp(node)) {
|
||||
bool communication_mem = false;
|
||||
if (context_ptr->enable_hccl()) {
|
||||
communication_mem = true;
|
||||
}
|
||||
if (flag == kStaticMem) {
|
||||
ptr = MallocStaticMem(size, communication_mem);
|
||||
} else {
|
||||
ptr = MallocDynamicMem(size, communication_mem);
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
if (flag == kStaticMem) {
|
||||
ptr = MallocStaticMem(size, false);
|
||||
} else if (flag == kDynamicMem) {
|
||||
ptr = MallocDynamicMem(size, false);
|
||||
} else if (flag == kReuseDynamicMem) {
|
||||
ptr = mem_reuse_util_ptr_->GetNodeOutputPtr(node, index);
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
uint8_t *MemoryManager::MallocWorkSpaceMem(const AnfNodePtr &node, size_t index, int flag, size_t size) {
|
||||
if (flag == kReuseDynamicMem) {
|
||||
return mem_reuse_util_ptr_->GetNodeWorkSpacePtr(node, index);
|
||||
}
|
||||
return MallocDynamicMem(size, false);
|
||||
}
|
||||
|
||||
uint8_t *MemoryManager::MallocMem(int flag, size_t size) {
|
||||
uint8_t *ptr = nullptr;
|
||||
if (flag == kStaticMem) {
|
||||
ptr = MallocStaticMem(size, false);
|
||||
} else if (flag == kDynamicMem) {
|
||||
ptr = MallocDynamicMem(size, false);
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
uint8_t *MemoryManager::MallocStaticMem(size_t size, bool communication_mem) {
|
||||
size_t align_size = 0;
|
||||
if (communication_mem) {
|
||||
align_size = GetCommunicationAlignSize(size);
|
||||
} else {
|
||||
align_size = GetCommonAlignSize(size);
|
||||
}
|
||||
if (static_mem_offset_ < align_size) {
|
||||
MS_LOG(EXCEPTION) << "Out of memory!!! total[" << device_mem_size_ << "](dynamic[" << total_dynamic_size_
|
||||
<< "] static[" << total_static_size_ << "])"
|
||||
<< " malloc [" << align_size << "] failed!";
|
||||
}
|
||||
total_static_size_ += align_size;
|
||||
auto offset = static_mem_offset_ - align_size;
|
||||
if (dynamic_mem_offset_ > offset) {
|
||||
MS_LOG(EXCEPTION) << "Out of memory!!! total[" << device_mem_size_ << "](dynamic[" << total_dynamic_size_
|
||||
<< "] static[" << total_static_size_ << "])"
|
||||
<< " malloc [" << align_size << "] failed!";
|
||||
}
|
||||
static_mem_offset_ = offset;
|
||||
if (communication_mem) {
|
||||
return device_mem_base_ + offset + kMemAlignSize;
|
||||
} else {
|
||||
return device_mem_base_ + offset;
|
||||
}
|
||||
}
|
||||
|
||||
uint8_t *MemoryManager::MallocDynamicMem(size_t size, bool communication_mem) {
|
||||
size_t align_size = 0;
|
||||
if (communication_mem) {
|
||||
align_size = GetCommunicationAlignSize(size);
|
||||
} else {
|
||||
align_size = GetCommonAlignSize(size);
|
||||
}
|
||||
uint64_t offset = dynamic_mem_offset_;
|
||||
auto new_offset = dynamic_mem_offset_ + align_size;
|
||||
if (new_offset > static_mem_offset_) {
|
||||
MS_LOG(EXCEPTION) << "Out of memory!!! total[" << device_mem_size_ << "](dynamic[" << total_dynamic_size_
|
||||
<< "] static[" << total_static_size_ << "])"
|
||||
<< " malloc [" << align_size << "] failed!";
|
||||
}
|
||||
total_dynamic_size_ += align_size;
|
||||
dynamic_mem_offset_ = new_offset;
|
||||
|
||||
if (communication_mem) {
|
||||
return device_mem_base_ + offset + kMemAlignSize;
|
||||
} else {
|
||||
return device_mem_base_ + offset;
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryManager::MallocOpMemory(const DeviceAddressPtr address, size_t size) {
|
||||
auto device_ptr = AllocTensorMemDynamic(size);
|
||||
MS_EXCEPTION_IF_NULL(device_ptr);
|
||||
address->ptr_ = device_ptr;
|
||||
address->mem_dynamic_alloc_ = true;
|
||||
}
|
||||
|
||||
void *MemoryManager::AllocTensorMemDynamic(size_t size) {
|
||||
if (size == 0) {
|
||||
MS_LOG(ERROR) << "AllocTensorMemDynamic size is 0.";
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void MemoryManager::FreeTensorMemDynamic(void *device_ptr) {
|
||||
if (device_ptr == nullptr) {
|
||||
MS_LOG(ERROR) << "FreeTensorMemDynamic device_ptr is null.";
|
||||
}
|
||||
}
|
||||
} // namespace device
|
||||
} // namespace mindspore
|
@ -0,0 +1,71 @@
|
||||
/**
|
||||
* Copyright 2019 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_MINDSPORE_CCSRC_DEVICE_MEMORY_MANAGER_H_
|
||||
#define MINDSPORE_MINDSPORE_CCSRC_DEVICE_MEMORY_MANAGER_H_
|
||||
#include <memory>
|
||||
#include "pre_activate/mem_reuse/mem_reuse.h"
|
||||
#include "pre_activate/mem_reuse/mem_reuse_allocator.h"
|
||||
namespace mindspore {
|
||||
namespace device {
|
||||
const int kStaticMem = 0;
|
||||
const int kDynamicMem = 1;
|
||||
const int kReuseDynamicMem = 2;
|
||||
const int kGetAllOuts = -1;
|
||||
const uint64_t kMemAlignSize = 512;
|
||||
using MemReuseUtilPtr = mindspore::memreuse::MemReuseUtilPtr;
|
||||
|
||||
class MemoryManager {
|
||||
public:
|
||||
MemoryManager() = default;
|
||||
virtual ~MemoryManager();
|
||||
|
||||
virtual void MallocDeviceMemory() = 0;
|
||||
virtual void FreeDeviceMemory() = 0;
|
||||
void ResetDynamicMemory() {
|
||||
total_dynamic_size_ = 0;
|
||||
dynamic_mem_offset_ = 0;
|
||||
}
|
||||
|
||||
void InitReuseDynamicMemory(session::KernelGraph *graph);
|
||||
uint8_t *MallocOutputMem(const AnfNodePtr &node, size_t index, int flag, size_t size);
|
||||
uint8_t *MallocWorkSpaceMem(const AnfNodePtr &node, size_t index, int flag, size_t size);
|
||||
virtual uint8_t *MallocMem(int flag, size_t size);
|
||||
|
||||
// Alloc memory use the dynamic memory pool.
|
||||
virtual void *AllocTensorMemDynamic(size_t size);
|
||||
// Free memory use the dynamic memory pool.
|
||||
virtual void FreeTensorMemDynamic(void *device_ptr);
|
||||
virtual void MallocOpMemory(const DeviceAddressPtr address, size_t size);
|
||||
size_t GetCommonAlignSize(size_t input_size) const;
|
||||
size_t GetCommunicationAlignSize(size_t input_size) const;
|
||||
|
||||
protected:
|
||||
virtual uint8_t *MallocStaticMem(size_t size, bool communication_mem);
|
||||
virtual uint8_t *MallocDynamicMem(size_t size, bool communication_mem);
|
||||
uint8_t *device_mem_base_{nullptr};
|
||||
uint8_t *device_mem_pool_base_{nullptr};
|
||||
uint64_t device_mem_size_{0};
|
||||
uint64_t device_mem_pool_size_{0};
|
||||
uint64_t dynamic_mem_offset_{0};
|
||||
uint64_t static_mem_offset_{0};
|
||||
size_t total_static_size_ = 0;
|
||||
size_t total_dynamic_size_ = 0;
|
||||
MemReuseUtilPtr mem_reuse_util_ptr_{nullptr};
|
||||
};
|
||||
} // namespace device
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_MINDSPORE_CCSRC_DEVICE_MEMORY_MANAGER_H_
|
Loading…
Reference in new issue