|
|
|
@ -25,14 +25,12 @@ namespace allocation {
|
|
|
|
|
bool CUDAAllocator::IsAllocThreadSafe() const { return true; }
|
|
|
|
|
void CUDAAllocator::FreeImpl(Allocation* allocation) {
|
|
|
|
|
platform::CUDADeviceGuard guard(place_.device);
|
|
|
|
|
auto* cuda_allocation = dynamic_cast<CUDAAllocation*>(allocation);
|
|
|
|
|
PADDLE_ENFORCE_NOT_NULL(cuda_allocation);
|
|
|
|
|
PADDLE_ENFORCE_EQ(boost::get<platform::CUDAPlace>(cuda_allocation->place()),
|
|
|
|
|
PADDLE_ENFORCE_EQ(boost::get<platform::CUDAPlace>(allocation->place()),
|
|
|
|
|
place_);
|
|
|
|
|
PADDLE_ENFORCE(cudaFree(allocation->ptr()));
|
|
|
|
|
VLOG(2) << "cudaFree is called";
|
|
|
|
|
delete allocation;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Allocation* CUDAAllocator::AllocateImpl(size_t size, Allocator::Attr attr) {
|
|
|
|
|
platform::CUDADeviceGuard guard(place_.device);
|
|
|
|
|
void* ptr;
|
|
|
|
@ -42,8 +40,9 @@ Allocation* CUDAAllocator::AllocateImpl(size_t size, Allocator::Attr attr) {
|
|
|
|
|
"Cannot allocate %d on GPU %d, cuda status %d, %s", size, place_.device,
|
|
|
|
|
status, cudaGetErrorString(status)));
|
|
|
|
|
}
|
|
|
|
|
return new CUDAAllocation(ptr, size, platform::Place(place_));
|
|
|
|
|
return new Allocation(ptr, size, platform::Place(place_));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
} // namespace allocation
|
|
|
|
|
} // namespace memory
|
|
|
|
|
} // namespace paddle
|
|
|
|
|