Remove attribute in Allocator::Allocate (#17878)

* remove attribute in Allocator::Allocate, test=develop

* fix travis ci error, test=develop
dependabot/pip/python/requests-2.20.0
Zeng Jinle 6 years ago committed by GitHub
parent 33d1e56506
commit 3ece61f71e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -12,8 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef PADDLE_FLUID_FRAMEWORK_IR_LOCK_FREE_OPTIMIZE_PASS_H_
#define PADDLE_FLUID_FRAMEWORK_IR_LOCK_FREE_OPTIMIZE_PASS_H_
#pragma once
#include <string>
#include <vector>
@ -126,5 +125,3 @@ class LockFreeOptimizePass : public Pass {
} // namespace ir
} // namespace framework
} // namespace paddle
#endif // PADDLE_FLUID_FRAMEWORK_IR_LOCK_FREE_OPTIMIZE_PASS_H_

@ -81,8 +81,7 @@ void InitTensorHolder(Scope* scope, const paddle::platform::Place& place,
const char* var_name) {
auto x = scope->Var(var_name);
auto tensor = x->GetMutable<LoDTensor>();
tensor->mutable_data(place, proto::VarType::FP32,
::paddle::memory::Allocator::kDefault, 1);
tensor->mutable_data(place, proto::VarType::FP32, 1);
}
void MainTest(bool convWithExistingBias) {

@ -110,8 +110,7 @@ void InitTensorHolder(Scope* scope, const paddle::platform::Place& place,
const char* var_name) {
auto x = scope->Var(var_name);
auto tensor = x->GetMutable<LoDTensor>();
tensor->mutable_data(place, proto::VarType::FP32,
::paddle::memory::Allocator::kDefault, 1);
tensor->mutable_data(place, proto::VarType::FP32, 1);
}
void MainTest(const ProgramDesc& prog, int conv_count, int pool_count,

@ -102,8 +102,7 @@ void InitTensorHolder(Scope* scope, const paddle::platform::Place& place,
const char* var_name) {
auto x = scope->Var(var_name);
auto tensor = x->GetMutable<LoDTensor>();
tensor->mutable_data(place, proto::VarType::FP32,
::paddle::memory::Allocator::kDefault, 1);
tensor->mutable_data(place, proto::VarType::FP32, 1);
}
void MainTest(const ProgramDesc& prog, int removed_nodes_num) {

@ -377,12 +377,12 @@ class ExecutionContext {
}
template <typename T>
T& GetKernelConfig(int idx) const {
T& GetKernelConfig(size_t idx) const {
PADDLE_ENFORCE(
kernel_configs_ && kernel_configs_->size() > static_cast<size_t>(idx),
"%s selected kernel doesn't have kernel config %lu <= %d",
"%s selected kernel doesn't have kernel config %lu <= %lu",
op_.Type().c_str(), kernel_configs_->size(), idx);
return *boost::get<std::shared_ptr<T>>(kernel_configs_->at(idx));
return *boost::get<std::shared_ptr<T>>((*kernel_configs_)[idx]);
}
private:

@ -103,7 +103,7 @@ TAlgorithm AlgorithmsCache<TAlgorithm>::GetAlgorithm(
++search_times_;
return algo;
}
TAlgorithm algo;
TAlgorithm algo{};
int64_t min = static_cast<uint64_t>(INT_MAX);
for (const auto& m : hash_) {
if (m.first < min) {

@ -35,7 +35,6 @@ size_t Tensor::memory_size() const {
}
void* Tensor::mutable_data(platform::Place place, proto::VarType::Type type,
memory::Allocator::Attr attr,
size_t requested_size) {
type_ = type;
PADDLE_ENFORCE_GE(numel(), 0,
@ -50,18 +49,17 @@ void* Tensor::mutable_data(platform::Place place, proto::VarType::Type type,
/* some versions of boost::variant don't have operator!= */
if (holder_ == nullptr || !(holder_->place() == place) ||
holder_->size() < size + offset_) {
holder_ = memory::AllocShared(place, size, attr);
holder_ = memory::AllocShared(place, size);
offset_ = 0;
}
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(holder_->ptr()) +
offset_);
}
void* Tensor::mutable_data(platform::Place place, memory::Allocator::Attr attr,
size_t requested_size) {
void* Tensor::mutable_data(platform::Place place, size_t requested_size) {
PADDLE_ENFORCE(this->holder_ != nullptr,
"Cannot invoke mutable data if current hold nothing.");
return mutable_data(place, type_, attr, requested_size);
return mutable_data(place, type_, requested_size);
}
Tensor& Tensor::ShareDataWith(const Tensor& src) {

@ -87,17 +87,12 @@ class Tensor {
* @note If not exist, then allocation.
*/
template <typename T>
T* mutable_data(platform::Place place,
memory::Allocator::Attr attr = memory::Allocator::kDefault,
size_t requested_size = 0);
T* mutable_data(platform::Place place, size_t requested_size = 0);
void* mutable_data(platform::Place place, proto::VarType::Type type,
memory::Allocator::Attr attr = memory::Allocator::kDefault,
size_t requested_size = 0);
void* mutable_data(platform::Place place,
memory::Allocator::Attr attr = memory::Allocator::kDefault,
size_t requested_size = 0);
void* mutable_data(platform::Place place, size_t requested_size = 0);
/**
* @brief Return a pointer to mutable memory block.
@ -109,9 +104,7 @@ class Tensor {
* @note If not exist, then allocation.
*/
template <typename T>
T* mutable_data(DDim dims, platform::Place place,
memory::Allocator::Attr attr = memory::Allocator::kDefault,
size_t requested_size = 0);
T* mutable_data(DDim dims, platform::Place place, size_t requested_size = 0);
/*! Return the dimensions of the memory block. */
const DDim& dims() const;

@ -49,20 +49,17 @@ inline T* Tensor::data() {
template <typename T>
inline T* Tensor::mutable_data(DDim dims, platform::Place place,
memory::Allocator::Attr attr,
size_t requested_size) {
static_assert(std::is_pod<T>::value, "T must be POD");
Resize(dims);
return mutable_data<T>(place, attr, requested_size);
return mutable_data<T>(place, requested_size);
}
template <typename T>
inline T* Tensor::mutable_data(platform::Place place,
memory::Allocator::Attr attr,
size_t requested_size) {
inline T* Tensor::mutable_data(platform::Place place, size_t requested_size) {
static_assert(std::is_pod<T>::value, "T must be POD");
return reinterpret_cast<T*>(
mutable_data(place, DataTypeTrait<T>::DataType, attr, requested_size));
mutable_data(place, DataTypeTrait<T>::DataType, requested_size));
}
inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) {

@ -89,9 +89,8 @@ class AlignedAllocator : public ThinAlignedAllocator {
using ThinAlignedAllocator::ThinAlignedAllocator;
protected:
Allocation* AllocateImpl(size_t size, Allocator::Attr attr) override {
auto raw_allocation =
underlying_allocator_->Allocate(size + kAlignment, attr);
Allocation* AllocateImpl(size_t size) override {
auto raw_allocation = underlying_allocator_->Allocate(size + kAlignment);
return new AlignedAllocation<kAlignment>(std::move(raw_allocation), size);
}

@ -14,8 +14,6 @@
#include "paddle/fluid/memory/allocation/allocator.h"
#include <functional>
namespace paddle {
namespace memory {
namespace allocation {

@ -146,42 +146,8 @@ class Allocation {
};
// Base interface class of memory Allocator.
// To allocate a memory, allocator needs two parameters:
// 1. size of bytes.
// 2. Attribute of memory.
// NOTE: the attribute of memory might be ignored if the allocator does not
// care it.
class Allocator {
public:
enum Attr {
kDefault = 0, // Default attribute. Uses the fast or stablest allocation
// algorithm.
kFixedHuge = 1, // The allocation may not be freed until the program
// ends. e.g., `Parameters` and `Momentum`.
kFluxHuge = 2, // The allocation may create and freed frequently and the
// allocation is considerable huge. Like `activations`
// and gradients.
kScratchpad =
3, // The `Scratchpad` memory is allocated and freed very soon,
// usually within an operator or aux memory.
// Like CUDNN workspace, AUX memory in batch norm, etc.
//
// https://en.wikipedia.org/wiki/Scratchpad_memory
kCrossDevice =
4, // The memory used cross-device memory copy/communication.
// For example:
// 1. it can use an `pinned` memory for CPU-GPU
// communication.
// 2. it can use an `registered` memory for RDMA
// communication.
NumOfAttrs = 5 // The number of all attributes. It is used internally.
};
virtual ~Allocator() {}
class AllocationDeleter {
@ -195,8 +161,8 @@ class Allocator {
using AllocationPtr = std::unique_ptr<Allocation, AllocationDeleter>;
// Allocate an allocation.
inline AllocationPtr Allocate(size_t size, Allocator::Attr attr = kDefault) {
auto ptr = AllocateImpl(size, attr);
inline AllocationPtr Allocate(size_t size) {
auto ptr = AllocateImpl(size);
ptr->RegisterDecoratedAllocator(this);
return AllocationPtr(ptr);
}
@ -211,7 +177,7 @@ class Allocator {
virtual bool IsAllocThreadSafe() const;
protected:
virtual Allocation* AllocateImpl(size_t size, Allocator::Attr attr) = 0;
virtual Allocation* AllocateImpl(size_t size) = 0;
virtual void FreeImpl(Allocation* allocation);
};

@ -67,8 +67,8 @@ class CPUManagedAllocator : public Allocator {
bool IsAllocThreadSafe() const override { return true; }
protected:
Allocation* AllocateImpl(size_t size, Allocator::Attr attr) override {
return normal_allocator_->Allocate(size, attr).release();
Allocation* AllocateImpl(size_t size) override {
return normal_allocator_->Allocate(size).release();
}
private:
@ -101,11 +101,10 @@ class ChunkedAllocator : public Allocator {
auto* cond_allocator = new ConditionalAllocator();
cond_allocator
->AddAllocator(
[this](size_t size, Attr attr) { return size < max_chunk_size_; },
->AddAllocator([this](size_t size) { return size < max_chunk_size_; },
default_allocator_)
.AddAllocator(
[](size_t size, Attr attr) {
[](size_t size) {
return true; // default case
},
raw_allocator_);
@ -133,8 +132,8 @@ class ChunkedAllocator : public Allocator {
bool IsAllocThreadSafe() const override { return true; }
protected:
Allocation* AllocateImpl(size_t size, Allocator::Attr attr) override {
return default_allocator_->Allocate(size, attr).release();
Allocation* AllocateImpl(size_t size) override {
return default_allocator_->Allocate(size).release();
}
protected:
@ -263,7 +262,7 @@ class AllocatorFacadePrivate {
explicit ZeroSizeAllocator(platform::Place place) : place_(place) {}
protected:
Allocation* AllocateImpl(size_t size, Allocator::Attr attr) override {
Allocation* AllocateImpl(size_t size) override {
return new Allocation(nullptr, 0, place_);
}
@ -304,13 +303,13 @@ AllocatorFacade& AllocatorFacade::Instance() {
}
std::shared_ptr<Allocation> AllocatorFacade::AllocShared(
const platform::Place& place, size_t size, Allocator::Attr attr) {
return std::shared_ptr<Allocation>(Alloc(place, size, attr));
const platform::Place& place, size_t size) {
return std::shared_ptr<Allocation>(Alloc(place, size));
}
AllocationPtr AllocatorFacade::Alloc(const platform::Place& place, size_t size,
Allocator::Attr attr) {
return m_->GetAllocator(place, size)->Allocate(size, attr);
AllocationPtr AllocatorFacade::Alloc(const platform::Place& place,
size_t size) {
return m_->GetAllocator(place, size)->Allocate(size);
}
} // namespace allocation

@ -38,13 +38,11 @@ class AllocatorFacade {
static AllocatorFacade& Instance();
// Allocate a shared allocation.
std::shared_ptr<Allocation> AllocShared(
const platform::Place& place, size_t size,
Allocator::Attr attr = Allocator::kDefault);
std::shared_ptr<Allocation> AllocShared(const platform::Place& place,
size_t size);
// Allocate a unique allocation.
AllocationPtr Alloc(const platform::Place& place, size_t size,
Allocator::Attr attr = Allocator::kDefault);
AllocationPtr Alloc(const platform::Place& place, size_t size);
// TODO(yy): Allocate a Copy-On-Write allocation?
private:

@ -34,14 +34,13 @@ std::shared_ptr<Allocator> AutoIncrementAllocator::CreateNewAllocator() {
"bug.");
return underlying_allocators_[old_size];
}
Allocation *AutoIncrementAllocator::AllocateImpl(size_t size,
Allocator::Attr attr) {
Allocation *AutoIncrementAllocator::AllocateImpl(size_t size) {
auto cur = prev_success_allocator_.load();
size_t retry_count = allocator_num_.load();
size_t allocator_num = retry_count;
while (retry_count-- > 0) { // until there retry count is zero
try {
auto res = underlying_allocators_[cur]->Allocate(size, attr);
auto res = underlying_allocators_[cur]->Allocate(size);
prev_success_allocator_ = cur;
return res.release();
} catch (BadAlloc &) {
@ -61,7 +60,7 @@ Allocation *AutoIncrementAllocator::AllocateImpl(size_t size,
// the newly created allocator by the first allocation request.
for (cur = allocator_num; cur < allocator_num_; ++cur) {
try {
auto ret = underlying_allocators_[cur]->Allocate(size, attr);
auto ret = underlying_allocators_[cur]->Allocate(size);
prev_success_allocator_ = cur;
return ret.release();
} catch (BadAlloc &) {
@ -70,7 +69,7 @@ Allocation *AutoIncrementAllocator::AllocateImpl(size_t size,
}
}
// No suitable allocator
return CreateNewAllocator()->Allocate(size, attr).release();
return CreateNewAllocator()->Allocate(size).release();
}
} // namespace allocation

@ -19,6 +19,7 @@
#include <memory>
#include <mutex> // NOLINT
#include <thread> // NOLINT
#include <utility>
#include <vector>
#include "paddle/fluid/memory/allocation/allocator.h"
@ -60,7 +61,7 @@ class AutoIncrementAllocator : public Allocator {
std::shared_ptr<Allocator> CreateNewAllocator();
protected:
Allocation* AllocateImpl(size_t size, Allocator::Attr attr) override;
Allocation* AllocateImpl(size_t size) override;
private:
AllocatorCreator creator_;

@ -140,7 +140,7 @@ void BestFitAllocator::FreeImpl(Allocation* allocation) {
InsertFreeNode(chunk_it);
delete allocation;
}
Allocation* BestFitAllocator::AllocateImpl(size_t size, Allocator::Attr attr) {
Allocation* BestFitAllocator::AllocateImpl(size_t size) {
auto highest_set_bit = static_cast<size_t>(HighestBitPos(size));
MapIt map_it;
for (; highest_set_bit < free_chunks_.size(); ++highest_set_bit) {

@ -120,7 +120,7 @@ class BestFitAllocator : public Allocator {
protected:
void FreeImpl(Allocation* allocation) override;
Allocation* AllocateImpl(size_t size, Allocator::Attr attr) override;
Allocation* AllocateImpl(size_t size) override;
private:
Allocation* allocation_; // not owned

@ -13,8 +13,10 @@
// limitations under the License.
#include "paddle/fluid/memory/allocation/best_fit_allocator.h"
#include <memory>
#include <random>
#include <thread> // NOLINT
#include <utility>
#include <vector>
#include "gtest/gtest.h"
#include "paddle/fluid/memory/allocation/cpu_allocator.h"
@ -33,10 +35,10 @@ class StubAllocation : public Allocation {
TEST(BestFitAllocator, test_allocation) {
StubAllocation stub(4UL * 1024 * 1024 * 1024);
BestFitAllocator allocator(&stub);
{ auto allocation = allocator.Allocate(64, allocator.kDefault); }
{ auto allocation = allocator.Allocate(64); }
{
auto allocation = allocator.Allocate(80, allocator.kDefault);
auto allocation = allocator.Allocate(80);
{
auto best_fit_allocation =
@ -48,10 +50,10 @@ TEST(BestFitAllocator, test_allocation) {
ASSERT_EQ(allocation->ptr(), nullptr);
}
auto allocation2 = allocator.Allocate(60, allocator.kDefault);
auto allocation3 = allocator.Allocate(90, allocator.kDefault);
auto allocation2 = allocator.Allocate(60);
auto allocation3 = allocator.Allocate(90);
allocation2.reset();
allocation2 = allocator.Allocate(30, allocator.kDefault);
allocation2 = allocator.Allocate(30);
{
auto best_fit_allocation =
@ -59,7 +61,7 @@ TEST(BestFitAllocator, test_allocation) {
ASSERT_EQ(best_fit_allocation->ChunkIterator()->offset_, 80);
}
allocation2.reset();
allocation2 = allocator.Allocate(60, allocator.kDefault);
allocation2 = allocator.Allocate(60);
{
auto best_fit_allocation =
@ -70,7 +72,7 @@ TEST(BestFitAllocator, test_allocation) {
allocation.reset();
allocation2.reset();
allocation = allocator.Allocate(80 + 60, allocator.kDefault);
allocation = allocator.Allocate(80 + 60);
{
auto best_fit_allocation =
dynamic_cast<BestFitAllocation*>(allocation.get());
@ -79,8 +81,8 @@ TEST(BestFitAllocator, test_allocation) {
allocation.reset();
allocation = allocator.Allocate(80, allocator.kDefault);
allocation2 = allocator.Allocate(60, allocator.kDefault);
allocation = allocator.Allocate(80);
allocation2 = allocator.Allocate(60);
allocation = nullptr;
allocation2 = nullptr;
allocation3 = nullptr;
@ -91,8 +93,7 @@ TEST(BestFitAllocator, test_allocation) {
TEST(BestFitAllocator, test_concurrent_cpu_allocation) {
CPUAllocator allocator;
auto global_allocation =
allocator.Allocate(256UL * 1024 * 1024, allocator.kDefault);
auto global_allocation = allocator.Allocate(256UL * 1024 * 1024);
std::unique_ptr<Allocator> best_fit_allocator(
new BestFitAllocator(global_allocation.get()));
@ -106,8 +107,8 @@ TEST(BestFitAllocator, test_concurrent_cpu_allocation) {
for (size_t i = 0; i < 128; ++i) {
size_t allocate_size = dist(engine);
auto allocation = locked_allocator.Allocate(
sizeof(size_t) * allocate_size, locked_allocator.kDefault);
auto allocation =
locked_allocator.Allocate(sizeof(size_t) * allocate_size);
size_t* data = reinterpret_cast<size_t*>(allocation->ptr());

@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include <memory>
#include <random>
#include <thread> // NOLINT
#include <vector>
@ -36,8 +37,7 @@ struct ForEachFill {
TEST(BestFitAllocator, concurrent_cuda) {
CUDAAllocator allocator(platform::CUDAPlace(0));
// 256 MB
auto cuda_allocation =
allocator.Allocate(256U * 1024 * 1024, allocator.kDefault);
auto cuda_allocation = allocator.Allocate(256U * 1024 * 1024);
LockedAllocator concurrent_allocator(
std::unique_ptr<Allocator>(new BestFitAllocator(cuda_allocation.get())));
@ -50,8 +50,8 @@ TEST(BestFitAllocator, concurrent_cuda) {
for (size_t i = 0; i < 128; ++i) {
size_t allocate_size = dist(engine);
auto allocation = concurrent_allocator.Allocate(
sizeof(size_t) * allocate_size, concurrent_allocator.kDefault);
auto allocation =
concurrent_allocator.Allocate(sizeof(size_t) * allocate_size);
size_t* data = reinterpret_cast<size_t*>(allocation->ptr());

@ -53,7 +53,7 @@ void BufferedAllocator::FreeImpl(Allocation *allocation) {
allocations_.emplace(allocation->size(), AllocationPtr(allocation));
}
Allocation *BufferedAllocator::AllocateImpl(size_t size, Allocator::Attr attr) {
Allocation *BufferedAllocator::AllocateImpl(size_t size) {
{
platform::LockGuardPtr<std::mutex> guard(mtx_);
auto it = allocations_.lower_bound(size);
@ -65,10 +65,10 @@ Allocation *BufferedAllocator::AllocateImpl(size_t size, Allocator::Attr attr) {
}
try {
return underlying_allocator_->Allocate(size, attr).release();
return underlying_allocator_->Allocate(size).release();
} catch (BadAlloc &) {
FreeCache(size);
return underlying_allocator_->Allocate(size, attr).release();
return underlying_allocator_->Allocate(size).release();
}
}

@ -45,7 +45,7 @@ class BufferedAllocator : public Allocator {
protected:
void FreeImpl(Allocation *allocation) override;
Allocation *AllocateImpl(size_t size, Allocator::Attr attr) override;
Allocation *AllocateImpl(size_t size) override;
private:
std::shared_ptr<Allocator> underlying_allocator_;

@ -36,7 +36,7 @@ inline std::unique_ptr<BufferedAllocator> GetBufferedAllocator(
TEST(buffered_allocator, thread_safety) {
std::unique_ptr<CPUAllocator> allocator(new CPUAllocator());
auto chunk = allocator->Allocate(1 << 20, allocator->kDefault);
auto chunk = allocator->Allocate(1 << 20);
{
auto buf_allocator = GetBufferedAllocator(chunk.get(), true);
ASSERT_EQ(buf_allocator->IsAllocThreadSafe(), true);
@ -72,7 +72,7 @@ class StubAllocator : public Allocator {
++destruct_count_;
delete allocation;
}
Allocation *AllocateImpl(size_t size, Allocator::Attr attr) override {
Allocation *AllocateImpl(size_t size) override {
++construct_count_;
if (size == 0) {
return new StubAllocation(nullptr, 0, platform::CPUPlace());
@ -98,7 +98,7 @@ TEST(buffered_allocator, lazy_free) {
{
underlying_allocator->ResetCounter();
auto x = allocator->Allocate(1025, allocator->kDefault);
auto x = allocator->Allocate(1025);
ASSERT_EQ(underlying_allocator->GetAllocCount(), kOne);
ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero);
x = nullptr;
@ -107,10 +107,10 @@ TEST(buffered_allocator, lazy_free) {
{
underlying_allocator->ResetCounter();
auto x = allocator->Allocate(900, allocator->kDefault);
auto x = allocator->Allocate(900);
ASSERT_EQ(underlying_allocator->GetAllocCount(), kZero);
ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero);
auto y = allocator->Allocate(2048, allocator->kDefault);
auto y = allocator->Allocate(2048);
ASSERT_EQ(underlying_allocator->GetAllocCount(), kOne);
ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero);
x = nullptr;
@ -129,13 +129,13 @@ TEST(buffered_allocator, lazy_free) {
TEST(buffered_allocator, garbage_collection) {
std::unique_ptr<CPUAllocator> cpu_allocator(new CPUAllocator());
auto chunk = cpu_allocator->Allocate(2048, cpu_allocator->kDefault);
auto chunk = cpu_allocator->Allocate(2048);
auto allocator = GetBufferedAllocator(chunk.get(), false);
auto x1 = allocator->Allocate(1600, allocator->kDefault);
auto x2 = allocator->Allocate(400, allocator->kDefault);
auto x1 = allocator->Allocate(1600);
auto x2 = allocator->Allocate(400);
x1 = nullptr;
x2 = nullptr;
auto x3 = allocator->Allocate(1600, allocator->kDefault);
auto x3 = allocator->Allocate(1600);
ASSERT_NE(x3, nullptr);
ASSERT_NE(x3->ptr(), nullptr);
}

@ -13,14 +13,14 @@
// limitations under the License.
#include "paddle/fluid/memory/allocation/conditional_allocator.h"
#include <memory>
namespace paddle {
namespace memory {
namespace allocation {
ConditionalAllocator& ConditionalAllocator::AddAllocator(
std::function<bool(size_t, Allocator::Attr)> func,
std::shared_ptr<Allocator> allocator) {
std::function<bool(size_t)> func, std::shared_ptr<Allocator> allocator) {
underlying_allocators_.emplace_back(std::move(func), std::move(allocator));
return *this;
}
@ -33,11 +33,10 @@ bool ConditionalAllocator::IsAllocThreadSafe() const {
});
}
Allocation* ConditionalAllocator::AllocateImpl(size_t size,
Allocator::Attr attr) {
Allocation* ConditionalAllocator::AllocateImpl(size_t size) {
for (auto& pair : underlying_allocators_) {
if (pair.first(size, attr)) {
return pair.second->Allocate(size, attr).release();
if (pair.first(size)) {
return pair.second->Allocate(size).release();
}
}
throw BadAlloc("No suitable allocator");

@ -14,6 +14,7 @@
#pragma once
#include <functional>
#include <memory>
#include <utility>
#include <vector>
#include "paddle/fluid/memory/allocation/allocator.h"
@ -28,13 +29,10 @@ namespace allocation {
// For example:
//
// auto* cond_allocator = new ConditionalAllocator();
// cond_allocator->AddAllocator([](size_t size, Attr attr){
// cond_allocator->AddAllocator([](size_t size){
// // if size > 10
// return size > 10;
// }, allocator_a).AddAllocator([](size_t size, Attr attr){
// // elif attr is kDefault
// return attr == kDefault;
// }, allocator_b).AddAllocator([](size_t size, Attr attr){
// }, allocator_b).AddAllocator([](size_t size){
// // else
// return true;
// }, allocator_c);
@ -42,17 +40,17 @@ class ConditionalAllocator : public Allocator {
public:
ConditionalAllocator() = default;
ConditionalAllocator& AddAllocator(std::function<bool(size_t, Attr)> func,
ConditionalAllocator& AddAllocator(std::function<bool(size_t)> func,
std::shared_ptr<Allocator> allocator);
bool IsAllocThreadSafe() const override;
protected:
Allocation* AllocateImpl(size_t size, Allocator::Attr attr) override;
Allocation* AllocateImpl(size_t size) override;
private:
using AllocatorWithCond =
std::pair<std::function<bool(size_t, Attr)>, std::shared_ptr<Allocator>>;
std::pair<std::function<bool(size_t)>, std::shared_ptr<Allocator>>;
std::vector<AllocatorWithCond> underlying_allocators_;
};

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save