Fix allocator bug (#16712)
* Revert "Revert "Fix allocator bug"" This reverts commitfix_ema174d0d0b90
. * Revert "fix travis ci" This reverts commit5656fa9f7c
. test=develop * add inlined_vector.h, test=develop * add inlined_vector_test,test=develop
parent
035771512d
commit
c6189637cd
@ -0,0 +1,69 @@
|
||||
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
#include <cstdint>
|
||||
#include <vector>
|
||||
#include "paddle/fluid/platform/enforce.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
|
||||
template <typename T, size_t N>
|
||||
class InlinedVector {
|
||||
static_assert(N > 0, "N must be larger than 0");
|
||||
|
||||
public:
|
||||
inline InlinedVector() { len_ = 0; }
|
||||
|
||||
inline size_t size() const { return len_; }
|
||||
|
||||
inline T& operator[](size_t i) { return i < N ? head_[i] : tail_[i - N]; }
|
||||
|
||||
inline const T& operator[](size_t i) const {
|
||||
return i < N ? head_[i] : tail_[i - N];
|
||||
}
|
||||
|
||||
inline void emplace_back(const T& item) {
|
||||
if (LIKELY(len_ < N)) {
|
||||
head_[len_++] = item;
|
||||
} else {
|
||||
tail_.emplace_back(item);
|
||||
++len_;
|
||||
}
|
||||
}
|
||||
|
||||
inline void pop_back() {
|
||||
if (UNLIKELY(len_ > N)) {
|
||||
tail_.pop_back();
|
||||
}
|
||||
--len_;
|
||||
}
|
||||
|
||||
inline T& back() {
|
||||
if (LIKELY(len_ <= N)) {
|
||||
return head_[len_ - 1];
|
||||
} else {
|
||||
return tail_.back();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
T head_[N];
|
||||
size_t len_;
|
||||
std::vector<T> tail_;
|
||||
};
|
||||
|
||||
} // namespace framework
|
||||
} // namespace paddle
|
@ -0,0 +1,82 @@
|
||||
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle/fluid/framework/inlined_vector.h"
|
||||
#include <cstdlib>
|
||||
#include <ctime>
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
|
||||
template <typename T, size_t N>
|
||||
static std::vector<T> ToStdVector(const framework::InlinedVector<T, N> &vec) {
|
||||
std::vector<T> std_vec;
|
||||
std_vec.reserve(vec.size());
|
||||
for (size_t i = 0; i < vec.size(); ++i) {
|
||||
std_vec.emplace_back(vec[i]);
|
||||
}
|
||||
return std_vec;
|
||||
}
|
||||
|
||||
template <size_t N>
|
||||
void InlinedVectorCheck(size_t n) {
|
||||
std::srand(std::time(nullptr));
|
||||
|
||||
std::vector<int> std_vec;
|
||||
framework::InlinedVector<int, N> vec;
|
||||
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
int value = rand(); // NOLINT
|
||||
|
||||
std_vec.emplace_back(value);
|
||||
vec.emplace_back(value);
|
||||
|
||||
CHECK_EQ(std_vec.size(), vec.size());
|
||||
CHECK_EQ(std_vec.back(), vec.back());
|
||||
|
||||
CHECK_EQ(vec.back(), value);
|
||||
}
|
||||
|
||||
bool is_equal = (std_vec == ToStdVector(vec));
|
||||
|
||||
CHECK_EQ(is_equal, true);
|
||||
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
CHECK_EQ(std_vec.size(), vec.size());
|
||||
CHECK_EQ(std_vec.back(), vec.back());
|
||||
std_vec.pop_back();
|
||||
vec.pop_back();
|
||||
CHECK_EQ(std_vec.size(), vec.size());
|
||||
}
|
||||
|
||||
CHECK_EQ(std_vec.size(), static_cast<size_t>(0));
|
||||
CHECK_EQ(vec.size(), static_cast<size_t>(0));
|
||||
}
|
||||
|
||||
TEST(inlined_vector, inlined_vector) {
|
||||
for (size_t i = 0; i < 20; ++i) {
|
||||
InlinedVectorCheck<1>(i);
|
||||
InlinedVectorCheck<10>(i);
|
||||
InlinedVectorCheck<15>(i);
|
||||
InlinedVectorCheck<20>(i);
|
||||
InlinedVectorCheck<21>(i);
|
||||
InlinedVectorCheck<25>(i);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace framework
|
||||
} // namespace paddle
|
@ -0,0 +1,91 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <gflags/gflags.h>
|
||||
#include <gtest/gtest.h>
|
||||
#include "paddle/fluid/memory/allocation/allocator_facade.h"
|
||||
|
||||
#ifdef PADDLE_WITH_CUDA
|
||||
DECLARE_double(fraction_of_gpu_memory_to_use);
|
||||
DECLARE_double(fraction_of_cuda_pinned_memory_to_use);
|
||||
DECLARE_int64(gpu_allocator_retry_time);
|
||||
#endif
|
||||
|
||||
DECLARE_string(allocator_strategy);
|
||||
|
||||
namespace paddle {
|
||||
namespace memory {
|
||||
namespace allocation {
|
||||
|
||||
TEST(allocator, allocator) {
|
||||
#ifdef PADDLE_WITH_CUDA
|
||||
FLAGS_fraction_of_gpu_memory_to_use = 0.01;
|
||||
FLAGS_gpu_allocator_retry_time = 500;
|
||||
FLAGS_fraction_of_cuda_pinned_memory_to_use = 0.5;
|
||||
#endif
|
||||
|
||||
FLAGS_allocator_strategy = "naive_best_fit";
|
||||
|
||||
auto &instance = AllocatorFacade::Instance();
|
||||
platform::Place place;
|
||||
size_t size = 1024;
|
||||
|
||||
{
|
||||
place = platform::CPUPlace();
|
||||
size = 1024;
|
||||
auto cpu_allocation = instance.Alloc(place, size);
|
||||
ASSERT_NE(cpu_allocation, nullptr);
|
||||
ASSERT_NE(cpu_allocation->ptr(), nullptr);
|
||||
ASSERT_EQ(cpu_allocation->place(), place);
|
||||
ASSERT_EQ(cpu_allocation->size(), size);
|
||||
}
|
||||
|
||||
#ifdef PADDLE_WITH_CUDA
|
||||
{
|
||||
place = platform::CUDAPlace(0);
|
||||
size = 1024;
|
||||
auto gpu_allocation = instance.Alloc(place, size);
|
||||
ASSERT_NE(gpu_allocation, nullptr);
|
||||
ASSERT_NE(gpu_allocation->ptr(), nullptr);
|
||||
ASSERT_EQ(gpu_allocation->place(), place);
|
||||
ASSERT_GE(gpu_allocation->size(), size);
|
||||
}
|
||||
|
||||
{
|
||||
// Allocate 2GB gpu memory
|
||||
place = platform::CUDAPlace(0);
|
||||
size = 2 * static_cast<size_t>(1 << 30);
|
||||
auto gpu_allocation = instance.Alloc(place, size);
|
||||
ASSERT_NE(gpu_allocation, nullptr);
|
||||
ASSERT_NE(gpu_allocation->ptr(), nullptr);
|
||||
ASSERT_EQ(gpu_allocation->place(), place);
|
||||
ASSERT_GE(gpu_allocation->size(), size);
|
||||
}
|
||||
|
||||
{
|
||||
place = platform::CUDAPinnedPlace();
|
||||
size = (1 << 20);
|
||||
auto cuda_pinned_allocation =
|
||||
instance.Alloc(platform::CUDAPinnedPlace(), 1 << 20);
|
||||
ASSERT_NE(cuda_pinned_allocation, nullptr);
|
||||
ASSERT_NE(cuda_pinned_allocation->ptr(), nullptr);
|
||||
ASSERT_EQ(cuda_pinned_allocation->place(), place);
|
||||
ASSERT_GE(cuda_pinned_allocation->size(), size);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace allocation
|
||||
} // namespace memory
|
||||
} // namespace paddle
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue