Feature/auto_growth_allocator (#18561)
* feature/auto_growth_allocator, test=develop * add unittest of AlignedAllocator, test=develop * try to turn on auto_growth to test on CI, test=develop * fix segmentation fault in mixed_vector.h, test=develop * add unittests, test=developDDDivano-patch-1
parent
bb2f5d24a2
commit
ae58afc546
@ -1,48 +0,0 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "paddle/fluid/framework/eigen.h"
|
||||
#include "paddle/fluid/framework/tensor.h"
|
||||
#include "paddle/fluid/platform/device_context.h"
|
||||
#include "paddle/fluid/platform/for_range.h"
|
||||
#include "unsupported/Eigen/CXX11/Tensor"
|
||||
|
||||
// NOTE(yy): this unittest is not important. It just used for debugging.
|
||||
// It can be removed later.
|
||||
struct FillZero {
|
||||
public:
|
||||
float* ptr_;
|
||||
|
||||
__device__ void operator()(size_t i) { ptr_[i] = 0.0f; }
|
||||
};
|
||||
|
||||
namespace paddle {
|
||||
TEST(Eigen, main) {
|
||||
framework::Tensor tensor;
|
||||
platform::CUDAPlace gpu(0);
|
||||
float* ptr = tensor.mutable_data<float>({10, 10}, gpu);
|
||||
auto& dev_ctx = *reinterpret_cast<platform::CUDADeviceContext*>(
|
||||
platform::DeviceContextPool::Instance().Get(gpu));
|
||||
PADDLE_ENFORCE(cudaMemset(ptr, 0, sizeof(float) * 100));
|
||||
|
||||
platform::ForRange<platform::CUDADeviceContext> for_range(dev_ctx, 100);
|
||||
for_range(FillZero{ptr});
|
||||
dev_ctx.Wait();
|
||||
|
||||
auto eigen_vec = framework::EigenVector<float>::Flatten(tensor);
|
||||
auto& eigen_dev = *dev_ctx.eigen_device();
|
||||
eigen_vec.device(eigen_dev) = eigen_vec.constant(0.0f);
|
||||
}
|
||||
} // namespace paddle
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,124 @@
|
||||
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle/fluid/memory/allocation/auto_growth_best_fit_allocator.h"
|
||||
#include <algorithm>
|
||||
#include <list>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <mutex> // NOLINT
|
||||
#include <unordered_map>
|
||||
#include "paddle/fluid/memory/allocation/aligned_allocator.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace memory {
|
||||
namespace allocation {
|
||||
|
||||
AutoGrowthBestFitAllocator::AutoGrowthBestFitAllocator(
|
||||
const std::shared_ptr<Allocator> &underlying_allocator, size_t alignment,
|
||||
size_t chunk_size)
|
||||
: underlying_allocator_(
|
||||
std::make_shared<AlignedAllocator>(underlying_allocator, alignment)),
|
||||
alignment_(alignment),
|
||||
chunk_size_(std::max(AlignedSize(chunk_size, alignment), alignment)) {}
|
||||
|
||||
Allocation *AutoGrowthBestFitAllocator::AllocateImpl(size_t size) {
|
||||
size = AlignedSize(size, alignment_);
|
||||
|
||||
std::lock_guard<std::mutex> guard(mtx_);
|
||||
auto iter = free_blocks_.lower_bound(std::make_pair(size, nullptr));
|
||||
BlockIt block_it;
|
||||
if (iter != free_blocks_.end()) {
|
||||
block_it = iter->second;
|
||||
free_blocks_.erase(iter);
|
||||
auto *chunk = block_it->chunk_;
|
||||
size_t remaining_size = block_it->size_ - size;
|
||||
if (remaining_size == 0) {
|
||||
block_it->is_free_ = false;
|
||||
} else {
|
||||
auto remaining_free_block = chunk->blocks_.insert(
|
||||
block_it, Block(block_it->ptr_, remaining_size, true, chunk));
|
||||
free_blocks_.emplace(std::make_pair(remaining_size, block_it->ptr_),
|
||||
remaining_free_block);
|
||||
block_it->ptr_ =
|
||||
reinterpret_cast<uint8_t *>(block_it->ptr_) + remaining_size;
|
||||
block_it->size_ = size;
|
||||
block_it->is_free_ = false;
|
||||
}
|
||||
} else {
|
||||
size_t realloc_size = std::max(size, chunk_size_);
|
||||
|
||||
try {
|
||||
chunks_.emplace_back(underlying_allocator_->Allocate(realloc_size));
|
||||
} catch (BadAlloc &ex) {
|
||||
if (size == realloc_size) throw ex;
|
||||
realloc_size = size;
|
||||
chunks_.emplace_back(underlying_allocator_->Allocate(realloc_size));
|
||||
}
|
||||
|
||||
auto *chunk = &(*chunks_.rbegin());
|
||||
realloc_size = chunk->allocation_->size();
|
||||
uint8_t *p = reinterpret_cast<uint8_t *>(chunk->allocation_->ptr());
|
||||
auto &blocks = chunk->blocks_;
|
||||
|
||||
size_t remaining_size = realloc_size - size;
|
||||
if (remaining_size > 0) {
|
||||
blocks.emplace_back(p, remaining_size, true, chunk);
|
||||
free_blocks_.emplace(std::make_pair(remaining_size, p), --(blocks.end()));
|
||||
}
|
||||
blocks.emplace_back(p + remaining_size, size, false, chunk);
|
||||
block_it = --(blocks.end());
|
||||
VLOG(2) << "Not found and reallocate " << realloc_size << ", and remaining "
|
||||
<< remaining_size;
|
||||
}
|
||||
return new BlockAllocation(block_it);
|
||||
}
|
||||
|
||||
void AutoGrowthBestFitAllocator::FreeImpl(Allocation *allocation) {
|
||||
std::lock_guard<std::mutex> guard(mtx_);
|
||||
auto block_it = static_cast<BlockAllocation *>(allocation)->block_it_;
|
||||
auto &blocks = block_it->chunk_->blocks_;
|
||||
|
||||
block_it->is_free_ = true;
|
||||
|
||||
if (block_it != blocks.begin()) {
|
||||
auto prev_it = block_it;
|
||||
--prev_it;
|
||||
|
||||
if (prev_it->is_free_) {
|
||||
free_blocks_.erase(std::make_pair(prev_it->size_, prev_it->ptr_));
|
||||
prev_it->size_ += block_it->size_;
|
||||
blocks.erase(block_it);
|
||||
block_it = prev_it;
|
||||
}
|
||||
}
|
||||
|
||||
auto next_it = block_it;
|
||||
++next_it;
|
||||
|
||||
if (next_it != blocks.end() && next_it->is_free_) {
|
||||
free_blocks_.erase(std::make_pair(next_it->size_, next_it->ptr_));
|
||||
block_it->size_ += next_it->size_;
|
||||
blocks.erase(next_it);
|
||||
}
|
||||
|
||||
free_blocks_.emplace(std::make_pair(block_it->size_, block_it->ptr_),
|
||||
block_it);
|
||||
|
||||
delete allocation;
|
||||
}
|
||||
|
||||
} // namespace allocation
|
||||
} // namespace memory
|
||||
} // namespace paddle
|
@ -0,0 +1,86 @@
|
||||
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <list>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <mutex> // NOLINT
|
||||
#include <utility>
|
||||
#include "paddle/fluid/memory/allocation/allocator.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace memory {
|
||||
namespace allocation {
|
||||
|
||||
class AutoGrowthBestFitAllocator : public Allocator {
|
||||
public:
|
||||
explicit AutoGrowthBestFitAllocator(
|
||||
const std::shared_ptr<Allocator> &underlying_allocator, size_t alignment,
|
||||
size_t chunk_size = 0);
|
||||
|
||||
bool IsAllocThreadSafe() const override { return true; }
|
||||
|
||||
protected:
|
||||
Allocation *AllocateImpl(size_t size) override;
|
||||
|
||||
void FreeImpl(Allocation *allocation) override;
|
||||
|
||||
private:
|
||||
template <typename T>
|
||||
using List = std::list<T>;
|
||||
|
||||
struct Chunk;
|
||||
|
||||
struct Block {
|
||||
Block(void *ptr, size_t size, bool is_free, Chunk *chunk)
|
||||
: ptr_(ptr), size_(size), is_free_(is_free), chunk_(chunk) {}
|
||||
|
||||
void *ptr_;
|
||||
size_t size_;
|
||||
bool is_free_;
|
||||
Chunk *chunk_; // which chunk it is from
|
||||
};
|
||||
|
||||
struct Chunk {
|
||||
explicit Chunk(AllocationPtr allocation)
|
||||
: allocation_(std::move(allocation)) {}
|
||||
|
||||
AllocationPtr allocation_;
|
||||
List<Block> blocks_;
|
||||
};
|
||||
|
||||
struct BlockAllocation : public Allocation {
|
||||
explicit BlockAllocation(const List<Block>::iterator &it)
|
||||
: Allocation(it->ptr_, it->size_, it->chunk_->allocation_->place()),
|
||||
block_it_(it) {}
|
||||
|
||||
List<Block>::iterator block_it_;
|
||||
};
|
||||
|
||||
using BlockIt = List<Block>::iterator;
|
||||
|
||||
std::shared_ptr<Allocator> underlying_allocator_;
|
||||
std::map<std::pair<size_t, void *>, BlockIt> free_blocks_;
|
||||
std::list<Chunk> chunks_;
|
||||
size_t alignment_;
|
||||
size_t chunk_size_;
|
||||
|
||||
mutable std::mutex mtx_;
|
||||
};
|
||||
|
||||
} // namespace allocation
|
||||
} // namespace memory
|
||||
} // namespace paddle
|
@ -1,77 +0,0 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle/fluid/memory/allocation/auto_increment_allocator.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace memory {
|
||||
namespace allocation {
|
||||
bool AutoIncrementAllocator::IsAllocThreadSafe() const { return true; }
|
||||
|
||||
std::shared_ptr<Allocator> AutoIncrementAllocator::CreateNewAllocator() {
|
||||
std::lock_guard<std::mutex> guard(mtx_);
|
||||
auto old_size = allocator_num_.load();
|
||||
PADDLE_ENFORCE_LT(old_size, underlying_allocators_.size(),
|
||||
"Allocator number exceeds capacity %d",
|
||||
underlying_allocators_.size());
|
||||
underlying_allocators_[old_size] = creator_();
|
||||
prev_success_allocator_ = old_size;
|
||||
++allocator_num_;
|
||||
PADDLE_ENFORCE(
|
||||
underlying_allocators_[old_size]->IsAllocThreadSafe(),
|
||||
"the underlying allocator must be thread safe. This is a program "
|
||||
"bug.");
|
||||
return underlying_allocators_[old_size];
|
||||
}
|
||||
Allocation *AutoIncrementAllocator::AllocateImpl(size_t size) {
|
||||
auto cur = prev_success_allocator_.load();
|
||||
size_t retry_count = allocator_num_.load();
|
||||
size_t allocator_num = retry_count;
|
||||
while (retry_count-- > 0) { // until there retry count is zero
|
||||
try {
|
||||
auto res = underlying_allocators_[cur]->Allocate(size);
|
||||
prev_success_allocator_ = cur;
|
||||
return res.release();
|
||||
} catch (BadAlloc &) {
|
||||
if (++cur >= allocator_num) {
|
||||
cur = 0;
|
||||
}
|
||||
} catch (...) {
|
||||
// if there is another type of allocation, just rethrow it.
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
// This happens when the first allocator is exhausted and
|
||||
// there are more than 1 allocation requests
|
||||
// In this situation, the first allocation request would success
|
||||
// and the second allocation request would fail if we do not use
|
||||
// the newly created allocator by the first allocation request.
|
||||
for (cur = allocator_num; cur < allocator_num_; ++cur) {
|
||||
try {
|
||||
auto ret = underlying_allocators_[cur]->Allocate(size);
|
||||
prev_success_allocator_ = cur;
|
||||
return ret.release();
|
||||
} catch (BadAlloc &) {
|
||||
} catch (...) {
|
||||
throw;
|
||||
}
|
||||
}
|
||||
// No suitable allocator
|
||||
return CreateNewAllocator()->Allocate(size).release();
|
||||
}
|
||||
|
||||
} // namespace allocation
|
||||
} // namespace memory
|
||||
} // namespace paddle
|
@ -1,80 +0,0 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic> // NOLINT
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <mutex> // NOLINT
|
||||
#include <thread> // NOLINT
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include "paddle/fluid/memory/allocation/allocator.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace memory {
|
||||
namespace allocation {
|
||||
|
||||
// The AutoIncrementAllocator manages many underlying allocators. If none of
|
||||
// them can allocate the request memory, a new allocator will be created and
|
||||
// invoke its `allocate` method.
|
||||
//
|
||||
// NOTE(yy): The AutoIncrementAllocator will prefer to allocate memory from
|
||||
// the latest successful allocator.
|
||||
//
|
||||
// NOTE(yy): We may need to release an underlying allocator if it allocate
|
||||
// nothing. However, it is generally not useful, since it will make performance
|
||||
// undetermined.
|
||||
//
|
||||
// NOTE(yy): This allocator is only locked when creating new underlying
|
||||
// allocator. The allocation requests from many threads may be dispatched
|
||||
// to the same underlying allocator. So the underlying allocator must be
|
||||
// thread safe.
|
||||
//
|
||||
// NOTE(zjl): Add capacity parameters to constructor. A high-performance
|
||||
// thread-safe std::vector with varying size is hard to implement.
|
||||
// Fortunately, we can get the total GPU memory and each chunk size.
|
||||
// Therefore, we can get the suitable capacity of AutoIncrementAllocator.
|
||||
class AutoIncrementAllocator : public Allocator {
|
||||
public:
|
||||
// Creator is the method to create ManagedAllocator
|
||||
using AllocatorCreator = std::function<std::shared_ptr<Allocator>()>;
|
||||
|
||||
explicit AutoIncrementAllocator(AllocatorCreator&& creator, size_t capacity)
|
||||
: creator_(std::move(creator)), underlying_allocators_(capacity) {}
|
||||
|
||||
bool IsAllocThreadSafe() const override;
|
||||
|
||||
private:
|
||||
std::shared_ptr<Allocator> CreateNewAllocator();
|
||||
|
||||
protected:
|
||||
Allocation* AllocateImpl(size_t size) override;
|
||||
|
||||
private:
|
||||
AllocatorCreator creator_;
|
||||
|
||||
std::vector<AllocatorCreator::result_type> underlying_allocators_;
|
||||
std::atomic<size_t> allocator_num_{0};
|
||||
|
||||
// Use std::atomic rather than std::mutex, since std::atomic is usually
|
||||
// lock-free
|
||||
std::atomic<size_t> prev_success_allocator_{0};
|
||||
|
||||
std::mutex mtx_;
|
||||
};
|
||||
} // namespace allocation
|
||||
} // namespace memory
|
||||
} // namespace paddle
|
@ -1,47 +0,0 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle/fluid/memory/allocation/conditional_allocator.h"
|
||||
#include <memory>
|
||||
|
||||
namespace paddle {
|
||||
namespace memory {
|
||||
namespace allocation {
|
||||
|
||||
ConditionalAllocator& ConditionalAllocator::AddAllocator(
|
||||
std::function<bool(size_t)> func, std::shared_ptr<Allocator> allocator) {
|
||||
underlying_allocators_.emplace_back(std::move(func), std::move(allocator));
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool ConditionalAllocator::IsAllocThreadSafe() const {
|
||||
return std::all_of(underlying_allocators_.begin(),
|
||||
underlying_allocators_.end(),
|
||||
[](const AllocatorWithCond& allocatorWithCond) {
|
||||
return allocatorWithCond.second->IsAllocThreadSafe();
|
||||
});
|
||||
}
|
||||
|
||||
Allocation* ConditionalAllocator::AllocateImpl(size_t size) {
|
||||
for (auto& pair : underlying_allocators_) {
|
||||
if (pair.first(size)) {
|
||||
return pair.second->Allocate(size).release();
|
||||
}
|
||||
}
|
||||
throw BadAlloc("No suitable allocator");
|
||||
}
|
||||
|
||||
} // namespace allocation
|
||||
} // namespace memory
|
||||
} // namespace paddle
|
@ -1,59 +0,0 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include "paddle/fluid/memory/allocation/allocator.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace memory {
|
||||
namespace allocation {
|
||||
|
||||
// A composite allocator who will dispatch the allocation request by registered
|
||||
// condition.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// auto* cond_allocator = new ConditionalAllocator();
|
||||
// cond_allocator->AddAllocator([](size_t size){
|
||||
// // if size > 10
|
||||
// return size > 10;
|
||||
// }, allocator_b).AddAllocator([](size_t size){
|
||||
// // else
|
||||
// return true;
|
||||
// }, allocator_c);
|
||||
class ConditionalAllocator : public Allocator {
|
||||
public:
|
||||
ConditionalAllocator() = default;
|
||||
|
||||
ConditionalAllocator& AddAllocator(std::function<bool(size_t)> func,
|
||||
std::shared_ptr<Allocator> allocator);
|
||||
|
||||
bool IsAllocThreadSafe() const override;
|
||||
|
||||
protected:
|
||||
Allocation* AllocateImpl(size_t size) override;
|
||||
|
||||
private:
|
||||
using AllocatorWithCond =
|
||||
std::pair<std::function<bool(size_t)>, std::shared_ptr<Allocator>>;
|
||||
std::vector<AllocatorWithCond> underlying_allocators_;
|
||||
};
|
||||
|
||||
} // namespace allocation
|
||||
} // namespace memory
|
||||
} // namespace paddle
|
@ -0,0 +1,81 @@
|
||||
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "paddle/fluid/memory/allocation/aligned_allocator.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace memory {
|
||||
namespace allocation {
|
||||
|
||||
TEST(aligned, aligned_size) {
|
||||
ASSERT_EQ(AlignedSize(1024, 1024), 1024);
|
||||
ASSERT_EQ(AlignedSize(1023, 1024), 1024);
|
||||
ASSERT_EQ(AlignedSize(1025, 1024), 2048);
|
||||
}
|
||||
|
||||
struct StubAllocator : public Allocator {
|
||||
public:
|
||||
StubAllocator() = default;
|
||||
|
||||
size_t AllocNum() const { return alloc_num_; }
|
||||
|
||||
protected:
|
||||
Allocation *AllocateImpl(size_t size) override {
|
||||
++alloc_num_;
|
||||
return new Allocation(new uint8_t[size], size, platform::CPUPlace());
|
||||
}
|
||||
|
||||
void FreeImpl(Allocation *allocation) override {
|
||||
delete[] static_cast<uint8_t *>(allocation->ptr());
|
||||
delete allocation;
|
||||
--alloc_num_;
|
||||
}
|
||||
|
||||
private:
|
||||
size_t alloc_num_{0};
|
||||
};
|
||||
|
||||
bool IsAligned(const AllocationPtr &alloc, size_t alignment) {
|
||||
return reinterpret_cast<uintptr_t>(alloc->ptr()) % alignment == 0;
|
||||
}
|
||||
|
||||
TEST(aligned_allocator, aligned_allocator) {
|
||||
size_t alignment = 1024;
|
||||
auto allocator = std::make_shared<StubAllocator>();
|
||||
auto aligned_allocator =
|
||||
std::make_shared<AlignedAllocator>(allocator, alignment);
|
||||
|
||||
auto alloc1 = aligned_allocator->Allocate(1345);
|
||||
ASSERT_EQ(allocator->AllocNum(), 1);
|
||||
ASSERT_TRUE(IsAligned(alloc1, alignment));
|
||||
alloc1.reset();
|
||||
ASSERT_EQ(allocator->AllocNum(), 0);
|
||||
|
||||
{
|
||||
auto alloc2 = aligned_allocator->Allocate(200);
|
||||
ASSERT_TRUE(IsAligned(alloc2, alignment));
|
||||
ASSERT_EQ(allocator->AllocNum(), 1);
|
||||
|
||||
auto alloc3 = aligned_allocator->Allocate(3021);
|
||||
ASSERT_TRUE(IsAligned(alloc3, alignment));
|
||||
ASSERT_EQ(allocator->AllocNum(), 2);
|
||||
}
|
||||
|
||||
ASSERT_EQ(allocator->AllocNum(), 0);
|
||||
}
|
||||
|
||||
} // namespace allocation
|
||||
} // namespace memory
|
||||
} // namespace paddle
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue