From 84d1c734ca2fe7a17e000467823d49891507cf0b Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Sun, 25 Jun 2017 15:40:45 -0700 Subject: [PATCH 01/43] add paddle/memory/detail/cpu_allocator* --- paddle/CMakeLists.txt | 1 + paddle/memory/CMakeLists.txt | 1 + paddle/memory/README.md | 14 ++--- paddle/memory/detail/CMakeLists.txt | 1 + paddle/memory/detail/cpu_allocator.h | 63 ++++++++++++++++++++++ paddle/memory/detail/cpu_allocator_test.cc | 32 +++++++++++ paddle/memory/memory.cc | 51 ++++++++++++++++++ paddle/memory/memory.h | 27 ++++++++++ paddle/platform/place.cc | 12 ++--- paddle/platform/place.h | 45 ++++++++++------ paddle/platform/place_test.cc | 14 ++--- 11 files changed, 224 insertions(+), 37 deletions(-) create mode 100644 paddle/memory/CMakeLists.txt create mode 100644 paddle/memory/detail/CMakeLists.txt create mode 100644 paddle/memory/detail/cpu_allocator.h create mode 100644 paddle/memory/detail/cpu_allocator_test.cc create mode 100644 paddle/memory/memory.cc create mode 100644 paddle/memory/memory.h diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index 573bd937a3..0cddb95244 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -10,6 +10,7 @@ add_subdirectory(trainer) add_subdirectory(scripts) add_subdirectory(optimizer) add_subdirectory(strings) +add_subdirectory(memory) # Do not build go directory until go cmake is working smoothly. # if(CMAKE_Go_COMPILER) diff --git a/paddle/memory/CMakeLists.txt b/paddle/memory/CMakeLists.txt new file mode 100644 index 0000000000..3943c3cfad --- /dev/null +++ b/paddle/memory/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory(detail) diff --git a/paddle/memory/README.md b/paddle/memory/README.md index fd32d07ef4..e5f7880e4c 100644 --- a/paddle/memory/README.md +++ b/paddle/memory/README.md @@ -31,7 +31,7 @@ In `paddle/memory/memory.h` we have: namespace memory { template void* Alloc(Place, size_t); template void Free(Place, void*); -template void Used(Place); +template size_t Used(Place); } // namespace memory ``` @@ -39,7 +39,7 @@ These function templates have specializations on either `platform::CPUPlace` or ```cpp template<> -void Alloc(CPUPlace p, size_t size) { +void* Alloc(CPUPlace p, size_t size) { return GetCPUBuddyAllocator()->Alloc(size); } ``` @@ -102,15 +102,11 @@ class BuddyAllocator { }; ``` -#### System Allocators - -The `GPUAllocator` and `CPUAllocator` are calls *system allocators*. They work as the fallback allocators of `BuddyAllocator`. A system allocator holds information about a device, including the amount of memory has been allocated, so we can call +Because BuddyAllocator has the meta-data of each block, it can trace the used memory -- record the amount returned by `Alloc` freed in `Free`. Instead, `CPUAllocator` and `GPUAllocator` doesn't know the size of freed memory block and cannot do the trace. -- `GPUAllocator::Used()` and -- `CPUAllocator::Used()` - -to get the amount of memory that has been allocated so far. +#### System Allocators +The `GPUAllocator` and `CPUAllocator` are calls *system allocators*. They work as the fallback allocators of `BuddyAllocator`. ## Justification diff --git a/paddle/memory/detail/CMakeLists.txt b/paddle/memory/detail/CMakeLists.txt new file mode 100644 index 0000000000..fb8a11062d --- /dev/null +++ b/paddle/memory/detail/CMakeLists.txt @@ -0,0 +1 @@ +cc_test(cpu_allocator_test SRCS cpu_allocator_test.cc) diff --git a/paddle/memory/detail/cpu_allocator.h b/paddle/memory/detail/cpu_allocator.h new file mode 100644 index 0000000000..8a872d3800 --- /dev/null +++ b/paddle/memory/detail/cpu_allocator.h @@ -0,0 +1,63 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include // for malloc and free +#include // for size_t + +namespace paddle { +namespace memory { +namespace detail { + +// CPUAllocator calls cudaMallocHost, which returns +// pinned and mlocked memory as staging areas for data exchange +// between host and device. Allocates too much would reduce the +// amount of memory available to the system for paging. So, by +// default, we should use CPUAllocator. +template +class CPUAllocator { +public: + void* Alloc(size_t size); + void Free(void* p); +}; + +template <> +class CPUAllocator { +public: + void* Alloc(size_t size) { return malloc(size); } + void Free(void* p) { free(p); } +}; + +// If CMake macro WITH_GPU is OFF, C++ compiler won't generate the +// following specialization that depends on the CUDA library. +#ifdef WITH_GPU +template <> +class CPUAllocator { +public: + void* Alloc(size_t size) { + void* p; + if (cudaMallocHost(&p, size) != cudaSuccess) { + return NULL; + } + return *p; + } + + void Free(void* p) { cudaFreeHost(p); } +}; +#endif // WITH_GPU + +} // namespace detail +} // namespace memory +} // namespace paddle diff --git a/paddle/memory/detail/cpu_allocator_test.cc b/paddle/memory/detail/cpu_allocator_test.cc new file mode 100644 index 0000000000..0aa33a22fd --- /dev/null +++ b/paddle/memory/detail/cpu_allocator_test.cc @@ -0,0 +1,32 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/memory/detail/cpu_allocator.h" +#include "gtest/gtest.h" + +TEST(CPUAllocator, NonStaging) { + paddle::memory::detail::CPUAllocator a; + void* p = a.Alloc(4096); + EXPECT_NE(p, nullptr); + a.Free(p); +} + +#ifdef WITH_GPU +TEST(CPUAllocator, Staging) { + paddle::memory::detail::CPUAllocator a; + void* p = a.Alloc(4096); + EXPECT_NE(p, nullptr); + a.Free(p); +} +#endif // WITH_GPU diff --git a/paddle/memory/memory.cc b/paddle/memory/memory.cc new file mode 100644 index 0000000000..5f1253ede6 --- /dev/null +++ b/paddle/memory/memory.cc @@ -0,0 +1,51 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/memory/memory.h" + +namespace paddle { +namespace memory { + +template <> +void* Alloc(CPUPlace, size_t size) { + return GetCPUBuddyAllocator()->Alloc(size); +} + +template <> +void* Alloc(GPUPlace pl, size_t size) { + return GetGPUBuddyAllocator(pl.device)->Alloc(size); +} + +template <> +void Free(CPUPlace, void* p) { + return GetCPUBuddyAllocator()->Free(p); +} + +template <> +void* Alloc(GPUPlace pl, void* p) { + return GetGPUBuddyAllocator(pl.device)->Free(p); +} + +template <> +size_t Used(CPUPlace) { + return GetCPUBuddyAllocator()->Used(); +} + +template <> +size_t Alloc(GPUPlace pl) { + return GetGPUBuddyAllocator(pl.device)->Used(); +} + +} // namespace memory +} // namespace paddle diff --git a/paddle/memory/memory.h b/paddle/memory/memory.h new file mode 100644 index 0000000000..ae8ac6ca52 --- /dev/null +++ b/paddle/memory/memory.h @@ -0,0 +1,27 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/frameowork/place.h" + +namespace paddle { +namespace memory { + +typename void* Alloc(Place, size_t); +typename void Free(Place, void*); +typename size_t Used(Place); + +} // namespace memory +} // namespace paddle diff --git a/paddle/platform/place.cc b/paddle/platform/place.cc index 1afd03c011..0704820aa0 100644 --- a/paddle/platform/place.cc +++ b/paddle/platform/place.cc @@ -8,8 +8,8 @@ namespace detail { class PlacePrinter : public boost::static_visitor<> { public: PlacePrinter(std::ostream &os) : os_(os) {} - void operator()(const CpuPlace &) { os_ << "CpuPlace"; } - void operator()(const GpuPlace &p) { os_ << "GpuPlace(" << p.device << ")"; } + void operator()(const CPUPlace &) { os_ << "CPUPlace"; } + void operator()(const GPUPlace &p) { os_ << "GPUPlace(" << p.device << ")"; } private: std::ostream &os_; @@ -22,14 +22,14 @@ static Place the_default_place; void set_place(const Place &place) { the_default_place = place; } const Place &get_place() { return the_default_place; } -const GpuPlace default_gpu() { return GpuPlace(0); } -const CpuPlace default_cpu() { return CpuPlace(); } +const GPUPlace default_gpu() { return GPUPlace(0); } +const CPUPlace default_cpu() { return CPUPlace(); } bool is_gpu_place(const Place &p) { - return boost::apply_visitor(IsGpuPlace(), p); + return boost::apply_visitor(IsGPUPlace(), p); } bool is_cpu_place(const Place &p) { - return !boost::apply_visitor(IsGpuPlace(), p); + return !boost::apply_visitor(IsGPUPlace(), p); } bool places_are_same_class(const Place &p1, const Place &p2) { diff --git a/paddle/platform/place.h b/paddle/platform/place.h index 489572c526..7cead18388 100644 --- a/paddle/platform/place.h +++ b/paddle/platform/place.h @@ -1,43 +1,58 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + #pragma once + #include #include namespace paddle { namespace platform { -struct CpuPlace { +struct CPUPlace { // WORKAROUND: for some reason, omitting this constructor // causes errors with boost 1.59 and OSX - CpuPlace() {} + CPUPlace() {} // needed for variant equality comparison - inline bool operator==(const CpuPlace &) const { return true; } - inline bool operator!=(const CpuPlace &) const { return false; } + inline bool operator==(const CPUPlace &) const { return true; } + inline bool operator!=(const CPUPlace &) const { return false; } }; -struct GpuPlace { - GpuPlace() : GpuPlace(0) {} - GpuPlace(int d) : device(d) {} +struct GPUPlace { + GPUPlace() : GPUPlace(0) {} + GPUPlace(int d) : device(d) {} // needed for variant equality comparison - inline bool operator==(const GpuPlace &o) const { return device == o.device; } - inline bool operator!=(const GpuPlace &o) const { return !(*this == o); } + inline bool operator==(const GPUPlace &o) const { return device == o.device; } + inline bool operator!=(const GPUPlace &o) const { return !(*this == o); } int device; }; -struct IsGpuPlace : public boost::static_visitor { - bool operator()(const CpuPlace &) const { return false; } - bool operator()(const GpuPlace &gpu) const { return true; } +struct IsGPUPlace : public boost::static_visitor { + bool operator()(const CPUPlace &) const { return false; } + bool operator()(const GPUPlace &gpu) const { return true; } }; -typedef boost::variant Place; +typedef boost::variant Place; void set_place(const Place &); const Place &get_place(); -const GpuPlace default_gpu(); -const CpuPlace default_cpu(); +const GPUPlace default_gpu(); +const CPUPlace default_cpu(); bool is_gpu_place(const Place &); bool is_cpu_place(const Place &); diff --git a/paddle/platform/place_test.cc b/paddle/platform/place_test.cc index 73fccceedf..33e2e5a439 100644 --- a/paddle/platform/place_test.cc +++ b/paddle/platform/place_test.cc @@ -3,8 +3,8 @@ #include "gtest/gtest.h" TEST(Place, Equality) { - paddle::platform::CpuPlace cpu; - paddle::platform::GpuPlace g0(0), g1(1), gg0(0); + paddle::platform::CPUPlace cpu; + paddle::platform::GPUPlace g0(0), g1(1), gg0(0); EXPECT_EQ(cpu, cpu); EXPECT_EQ(g0, g0); @@ -22,19 +22,19 @@ TEST(Place, Default) { EXPECT_TRUE(paddle::platform::is_gpu_place(paddle::platform::default_gpu())); EXPECT_TRUE(paddle::platform::is_cpu_place(paddle::platform::default_cpu())); - paddle::platform::set_place(paddle::platform::CpuPlace()); + paddle::platform::set_place(paddle::platform::CPUPlace()); EXPECT_TRUE(paddle::platform::is_cpu_place(paddle::platform::get_place())); } TEST(Place, Print) { { std::stringstream ss; - ss << paddle::platform::GpuPlace(1); - EXPECT_EQ("GpuPlace(1)", ss.str()); + ss << paddle::platform::GPUPlace(1); + EXPECT_EQ("GPUPlace(1)", ss.str()); } { std::stringstream ss; - ss << paddle::platform::CpuPlace(); - EXPECT_EQ("CpuPlace", ss.str()); + ss << paddle::platform::CPUPlace(); + EXPECT_EQ("CPUPlace", ss.str()); } } From db128c4586c3c925a6c53a9ae770cb07cdbea1bf Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Sun, 25 Jun 2017 17:54:06 -0700 Subject: [PATCH 02/43] Pass cpu_allocator_test --- CMakeLists.txt | 2 +- cmake/generic.cmake | 4 ++++ paddle/memory/detail/CMakeLists.txt | 6 +++++- paddle/memory/detail/cpu_allocator.h | 13 +++++++++---- paddle/memory/detail/cpu_allocator_test.cc | 16 +++++++++++----- paddle/memory/memory.cc | 14 ++++++++++++-- paddle/memory/memory.h | 16 +++++++++++++--- 7 files changed, 55 insertions(+), 16 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c5d7f2c7ec..3c719d35ec 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -71,7 +71,7 @@ if(ANDROID) "Disable RDMA when cross-compiling for Android" FORCE) endif(ANDROID) -set(THIRD_PARTY_PATH "${PROJ_ROOT}/third_party" CACHE STRING +set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING "A path setting third party libraries download & build directories.") if (WITH_C_API AND WITH_PYTHON) diff --git a/cmake/generic.cmake b/cmake/generic.cmake index 69e8164a00..840155750e 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -78,6 +78,10 @@ # # cc_test(example_test SRCS example_test.cc DEPS example glog gflags) +if(WITH_GPU) + add_definitions(-DPADDLE_WITH_GPU) +endif() + if(NOT APPLE) find_package(Threads REQUIRED) link_libraries(${CMAKE_THREAD_LIBS_INIT}) diff --git a/paddle/memory/detail/CMakeLists.txt b/paddle/memory/detail/CMakeLists.txt index fb8a11062d..c425e9f947 100644 --- a/paddle/memory/detail/CMakeLists.txt +++ b/paddle/memory/detail/CMakeLists.txt @@ -1 +1,5 @@ -cc_test(cpu_allocator_test SRCS cpu_allocator_test.cc) +if(${WITH_GPU}) + nv_test(cpu_allocator_test SRCS cpu_allocator_test.cc) # nv_test links CUDA, but +else(${WITH_GPU}) + cc_test(cpu_allocator_test SRCS cpu_allocator_test.cc) # cc_test doesn't. +endif(${WITH_GPU}) diff --git a/paddle/memory/detail/cpu_allocator.h b/paddle/memory/detail/cpu_allocator.h index 8a872d3800..0d8ea3f52b 100644 --- a/paddle/memory/detail/cpu_allocator.h +++ b/paddle/memory/detail/cpu_allocator.h @@ -17,6 +17,11 @@ limitations under the License. */ #include // for malloc and free #include // for size_t +#ifdef PADDLE_WITH_GPU +#include +#include +#endif // PADDLE_WITH_GPU + namespace paddle { namespace memory { namespace detail { @@ -40,9 +45,9 @@ public: void Free(void* p) { free(p); } }; -// If CMake macro WITH_GPU is OFF, C++ compiler won't generate the +// If CMake macro PADDLE_WITH_GPU is OFF, C++ compiler won't generate the // following specialization that depends on the CUDA library. -#ifdef WITH_GPU +#ifdef PADDLE_WITH_GPU template <> class CPUAllocator { public: @@ -51,12 +56,12 @@ public: if (cudaMallocHost(&p, size) != cudaSuccess) { return NULL; } - return *p; + return p; } void Free(void* p) { cudaFreeHost(p); } }; -#endif // WITH_GPU +#endif // PADDLE_WITH_GPU } // namespace detail } // namespace memory diff --git a/paddle/memory/detail/cpu_allocator_test.cc b/paddle/memory/detail/cpu_allocator_test.cc index 0aa33a22fd..464bc84e5c 100644 --- a/paddle/memory/detail/cpu_allocator_test.cc +++ b/paddle/memory/detail/cpu_allocator_test.cc @@ -22,11 +22,17 @@ TEST(CPUAllocator, NonStaging) { a.Free(p); } -#ifdef WITH_GPU +#ifdef PADDLE_WITH_GPU TEST(CPUAllocator, Staging) { paddle::memory::detail::CPUAllocator a; - void* p = a.Alloc(4096); - EXPECT_NE(p, nullptr); - a.Free(p); + + int devices; + if (cudaGetDeviceCount(&devices) == cudaSuccess && devices > 0) { + void* p = a.Alloc(4096); + EXPECT_NE(p, nullptr); + a.Free(p); + } else { + EXPECT_EQ(a.Alloc(4096), nullptr); + } } -#endif // WITH_GPU +#endif // PADDLE_WITH_GPU diff --git a/paddle/memory/memory.cc b/paddle/memory/memory.cc index 5f1253ede6..b617923731 100644 --- a/paddle/memory/memory.cc +++ b/paddle/memory/memory.cc @@ -19,7 +19,11 @@ namespace memory { template <> void* Alloc(CPUPlace, size_t size) { - return GetCPUBuddyAllocator()->Alloc(size); + return GetCPUBuddyAllocator(false /*non-staging*/)->Alloc(size); +} + +void* AllocStaging(CPUPlace, size_t size) { + return GetCPUBuddyAllocator(true /*staging*/)->Alloc(size); } template <> @@ -29,9 +33,14 @@ void* Alloc(GPUPlace pl, size_t size) { template <> void Free(CPUPlace, void* p) { - return GetCPUBuddyAllocator()->Free(p); + return GetCPUBuddyAllocator(false /*non-staging*/)->Free(p); +} + +void FreeStaging(CPUPlace, void* p) { + return GetCPUBuddyAllocator(false /*non-staging*/)->Free(p); } +#ifdef PADDLE_WITH_GPU template <> void* Alloc(GPUPlace pl, void* p) { return GetGPUBuddyAllocator(pl.device)->Free(p); @@ -46,6 +55,7 @@ template <> size_t Alloc(GPUPlace pl) { return GetGPUBuddyAllocator(pl.device)->Used(); } +#endif // PADDLE_WITH_GPU } // namespace memory } // namespace paddle diff --git a/paddle/memory/memory.h b/paddle/memory/memory.h index ae8ac6ca52..8c15a133bb 100644 --- a/paddle/memory/memory.h +++ b/paddle/memory/memory.h @@ -19,9 +19,19 @@ limitations under the License. */ namespace paddle { namespace memory { -typename void* Alloc(Place, size_t); -typename void Free(Place, void*); -typename size_t Used(Place); +template +void* Alloc(Place, size_t); +template +void Free(Place, void*); +template +size_t Used(Place); + +// Staging memory means "pinned" host memory that can be mapped into +// the CUDA memory space and accessed by the device rapidly. Don't +// allocate too much staging memory; otherwise system performance will +// degrade because the OS cannot find enough swap memory space. +void* AllocStaging(CPUPlace, size_t); +void* FreeStaging(CPUPlace, size_t); } // namespace memory } // namespace paddle From ce938ae5f9baea2b2d136154ee9a696b394929e1 Mon Sep 17 00:00:00 2001 From: liaogang Date: Mon, 26 Jun 2017 23:32:46 +0800 Subject: [PATCH 03/43] FIX: Pinned memory --- paddle/memory/README.md | 1 + paddle/memory/detail/CMakeLists.txt | 6 +--- paddle/memory/detail/cpu_allocator.h | 39 ++++++++++++---------- paddle/memory/detail/cpu_allocator_test.cc | 16 +++------ 4 files changed, 27 insertions(+), 35 deletions(-) diff --git a/paddle/memory/README.md b/paddle/memory/README.md index e5f7880e4c..96a331a486 100644 --- a/paddle/memory/README.md +++ b/paddle/memory/README.md @@ -97,6 +97,7 @@ class BuddyAllocator { struct Block { size_t size; Block* left, right; + size_t index; // allocator id }; ... }; diff --git a/paddle/memory/detail/CMakeLists.txt b/paddle/memory/detail/CMakeLists.txt index c425e9f947..fb8a11062d 100644 --- a/paddle/memory/detail/CMakeLists.txt +++ b/paddle/memory/detail/CMakeLists.txt @@ -1,5 +1 @@ -if(${WITH_GPU}) - nv_test(cpu_allocator_test SRCS cpu_allocator_test.cc) # nv_test links CUDA, but -else(${WITH_GPU}) - cc_test(cpu_allocator_test SRCS cpu_allocator_test.cc) # cc_test doesn't. -endif(${WITH_GPU}) +cc_test(cpu_allocator_test SRCS cpu_allocator_test.cc) diff --git a/paddle/memory/detail/cpu_allocator.h b/paddle/memory/detail/cpu_allocator.h index 0d8ea3f52b..a487fecef4 100644 --- a/paddle/memory/detail/cpu_allocator.h +++ b/paddle/memory/detail/cpu_allocator.h @@ -14,20 +14,19 @@ limitations under the License. */ #pragma once -#include // for malloc and free #include // for size_t +#include // for malloc and free -#ifdef PADDLE_WITH_GPU -#include -#include -#endif // PADDLE_WITH_GPU +#ifndef _WIN32 +#include // for mlock and munlock +#endif namespace paddle { namespace memory { namespace detail { -// CPUAllocator calls cudaMallocHost, which returns -// pinned and mlocked memory as staging areas for data exchange +// CPUAllocator calls mlock, which returns +// pinned and locked memory as staging areas for data exchange // between host and device. Allocates too much would reduce the // amount of memory available to the system for paging. So, by // default, we should use CPUAllocator. @@ -35,33 +34,37 @@ template class CPUAllocator { public: void* Alloc(size_t size); - void Free(void* p); + void Free(void* p, size_t size); }; template <> class CPUAllocator { public: - void* Alloc(size_t size) { return malloc(size); } - void Free(void* p) { free(p); } + void* Alloc(size_t size) { return std::malloc(size); } + void Free(void* p, size_t size) { std::free(p); } }; -// If CMake macro PADDLE_WITH_GPU is OFF, C++ compiler won't generate the -// following specialization that depends on the CUDA library. -#ifdef PADDLE_WITH_GPU template <> class CPUAllocator { public: void* Alloc(size_t size) { - void* p; - if (cudaMallocHost(&p, size) != cudaSuccess) { - return NULL; + void* p = std::malloc(size); + if (p == nullptr) { + return p; } +#ifndef _WIN32 + mlock(p, size); +#endif return p; } - void Free(void* p) { cudaFreeHost(p); } + void Free(void* p, size_t size) { +#ifndef _WIN32 + munlock(p, size); +#endif + std::free(p); + } }; -#endif // PADDLE_WITH_GPU } // namespace detail } // namespace memory diff --git a/paddle/memory/detail/cpu_allocator_test.cc b/paddle/memory/detail/cpu_allocator_test.cc index 464bc84e5c..4e45266cd8 100644 --- a/paddle/memory/detail/cpu_allocator_test.cc +++ b/paddle/memory/detail/cpu_allocator_test.cc @@ -19,20 +19,12 @@ TEST(CPUAllocator, NonStaging) { paddle::memory::detail::CPUAllocator a; void* p = a.Alloc(4096); EXPECT_NE(p, nullptr); - a.Free(p); + a.Free(p, 4096); } -#ifdef PADDLE_WITH_GPU TEST(CPUAllocator, Staging) { paddle::memory::detail::CPUAllocator a; - - int devices; - if (cudaGetDeviceCount(&devices) == cudaSuccess && devices > 0) { - void* p = a.Alloc(4096); - EXPECT_NE(p, nullptr); - a.Free(p); - } else { - EXPECT_EQ(a.Alloc(4096), nullptr); - } + void* p = a.Alloc(4096); + EXPECT_NE(p, nullptr); + a.Free(p, 4096); } -#endif // PADDLE_WITH_GPU From ce70df86b1e8c892cdde5312caa0c2699f368f7d Mon Sep 17 00:00:00 2001 From: liaogang Date: Tue, 27 Jun 2017 00:15:36 +0800 Subject: [PATCH 04/43] Add gpu_allocator --- paddle/memory/.clang-format | 5 ++ paddle/memory/detail/CMakeLists.txt | 1 + paddle/memory/detail/cpu_allocator.h | 6 +- paddle/memory/detail/gpu_allocator.h | 92 ++++++++++++++++++++++ paddle/memory/detail/gpu_allocator_test.cc | 30 +++++++ 5 files changed, 131 insertions(+), 3 deletions(-) create mode 100644 paddle/memory/.clang-format create mode 100644 paddle/memory/detail/gpu_allocator.h create mode 100644 paddle/memory/detail/gpu_allocator_test.cc diff --git a/paddle/memory/.clang-format b/paddle/memory/.clang-format new file mode 100644 index 0000000000..29282dc87e --- /dev/null +++ b/paddle/memory/.clang-format @@ -0,0 +1,5 @@ +--- +Language: Cpp +BasedOnStyle: Google +Standard: Cpp11 +... diff --git a/paddle/memory/detail/CMakeLists.txt b/paddle/memory/detail/CMakeLists.txt index fb8a11062d..81ca8a0bbf 100644 --- a/paddle/memory/detail/CMakeLists.txt +++ b/paddle/memory/detail/CMakeLists.txt @@ -1 +1,2 @@ cc_test(cpu_allocator_test SRCS cpu_allocator_test.cc) +nv_test(gpu_allocator_test SRCS gpu_allocator_test.cc) diff --git a/paddle/memory/detail/cpu_allocator.h b/paddle/memory/detail/cpu_allocator.h index a487fecef4..17753ccef7 100644 --- a/paddle/memory/detail/cpu_allocator.h +++ b/paddle/memory/detail/cpu_allocator.h @@ -32,21 +32,21 @@ namespace detail { // default, we should use CPUAllocator. template class CPUAllocator { -public: + public: void* Alloc(size_t size); void Free(void* p, size_t size); }; template <> class CPUAllocator { -public: + public: void* Alloc(size_t size) { return std::malloc(size); } void Free(void* p, size_t size) { std::free(p); } }; template <> class CPUAllocator { -public: + public: void* Alloc(size_t size) { void* p = std::malloc(size); if (p == nullptr) { diff --git a/paddle/memory/detail/gpu_allocator.h b/paddle/memory/detail/gpu_allocator.h new file mode 100644 index 0000000000..9452c41fb8 --- /dev/null +++ b/paddle/memory/detail/gpu_allocator.h @@ -0,0 +1,92 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include // for size_t + +#include +#include + +namespace paddle { +namespace memory { +namespace detail { + +inline void throw_on_error(cudaError_t e, const char* message) { + if (e) { + throw thrust::system_error(e, thrust::cuda_category(), message); + } +} + +// GPUAllocator calls cudaHostMalloc, which returns +// pinned and locked memory as staging areas for data exchange +// between host and device. Allocates too much would reduce the +// amount of memory available to the system for paging. So, by +// default, we should use GPUAllocator. +template +class GPUAllocator { +public: + void* Alloc(size_t size); + void Free(void* p, size_t size); +}; + +template <> +class GPUAllocator { +public: + void* Alloc(size_t size) { + void* p = 0; + cudaError_t result = cudaMalloc(&p, size); + if (result == cudaSuccess) { + return p; + } + // clear last error + cudaGetLastError(); + return nullptr; + } + + void Free(void* p, size_t size) { + // Purposefully allow cudaErrorCudartUnloading, because + // that is returned if you ever call cudaFree after the + // driver has already shutdown. This happens only if the + // process is terminating, in which case we don't care if + // cudaFree succeeds. + auto err = cudaFree(p); + if (err != cudaErrorCudartUnloading) { + throw_on_error(err, "cudaFree failed"); + } + } +}; + +template <> +class GPUAllocator { +public: + void* Alloc(size_t size) { + void* p = 0; + cudaError_t result = cudaMallocHost(&p, size); + if (result == cudaSuccess) { + return p; + } + // clear last error + cudaGetLastError(); + return nullptr; + } + + void Free(void* p, size_t size) { + throw_on_error(cudaFreeHost(p), "cudaFreeHost failed"); + } +}; + +} // namespace detail +} // namespace memory +} // namespace paddle diff --git a/paddle/memory/detail/gpu_allocator_test.cc b/paddle/memory/detail/gpu_allocator_test.cc new file mode 100644 index 0000000000..18c1c9ab43 --- /dev/null +++ b/paddle/memory/detail/gpu_allocator_test.cc @@ -0,0 +1,30 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/memory/detail/gpu_allocator.h" +#include "gtest/gtest.h" + +TEST(GPUAllocator, NonStaging) { + paddle::memory::detail::GPUAllocator a; + void* p = a.Alloc(4096); + EXPECT_NE(p, nullptr); + a.Free(p, 4096); +} + +TEST(GPUAllocator, Staging) { + paddle::memory::detail::GPUAllocator a; + void* p = a.Alloc(4096); + EXPECT_NE(p, nullptr); + a.Free(p, 4096); +} From e02859c0f53dfe4616976b015d4fefd8aaa6eb39 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Mon, 26 Jun 2017 15:27:01 -0700 Subject: [PATCH 05/43] Replace {cpu,gpu}_allocator.h and {cpu,gpu}_allocator_test.cc by system_allocator{.h,_test.cc} --- paddle/memory/CMakeLists.txt | 6 ++ paddle/memory/detail/CMakeLists.txt | 3 +- paddle/memory/detail/cpu_allocator.h | 71 ----------------- paddle/memory/detail/cpu_allocator_test.cc | 30 ------- .../{gpu_allocator.h => system_allocator.h} | 79 +++++++++++-------- ...cator_test.cc => system_allocator_test.cc} | 20 ++++- paddle/memory/memory.cc | 67 +++++++--------- paddle/memory/memory.h | 16 +--- 8 files changed, 106 insertions(+), 186 deletions(-) delete mode 100644 paddle/memory/detail/cpu_allocator.h delete mode 100644 paddle/memory/detail/cpu_allocator_test.cc rename paddle/memory/detail/{gpu_allocator.h => system_allocator.h} (58%) rename paddle/memory/detail/{gpu_allocator_test.cc => system_allocator_test.cc} (69%) diff --git a/paddle/memory/CMakeLists.txt b/paddle/memory/CMakeLists.txt index 3943c3cfad..8662512496 100644 --- a/paddle/memory/CMakeLists.txt +++ b/paddle/memory/CMakeLists.txt @@ -1 +1,7 @@ add_subdirectory(detail) + +if(${WITH_GPU}) + nv_library(memory SRCS memory.cc) +else(${WITH_GPU}) + cc_library(memory SRCS memroy.cc) +endif(${WITH_GPU}) diff --git a/paddle/memory/detail/CMakeLists.txt b/paddle/memory/detail/CMakeLists.txt index 81ca8a0bbf..3b5bbd7a12 100644 --- a/paddle/memory/detail/CMakeLists.txt +++ b/paddle/memory/detail/CMakeLists.txt @@ -1,2 +1 @@ -cc_test(cpu_allocator_test SRCS cpu_allocator_test.cc) -nv_test(gpu_allocator_test SRCS gpu_allocator_test.cc) +cc_test(system_allocator_test SRCS system_allocator_test.cc) diff --git a/paddle/memory/detail/cpu_allocator.h b/paddle/memory/detail/cpu_allocator.h deleted file mode 100644 index 17753ccef7..0000000000 --- a/paddle/memory/detail/cpu_allocator.h +++ /dev/null @@ -1,71 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include // for size_t -#include // for malloc and free - -#ifndef _WIN32 -#include // for mlock and munlock -#endif - -namespace paddle { -namespace memory { -namespace detail { - -// CPUAllocator calls mlock, which returns -// pinned and locked memory as staging areas for data exchange -// between host and device. Allocates too much would reduce the -// amount of memory available to the system for paging. So, by -// default, we should use CPUAllocator. -template -class CPUAllocator { - public: - void* Alloc(size_t size); - void Free(void* p, size_t size); -}; - -template <> -class CPUAllocator { - public: - void* Alloc(size_t size) { return std::malloc(size); } - void Free(void* p, size_t size) { std::free(p); } -}; - -template <> -class CPUAllocator { - public: - void* Alloc(size_t size) { - void* p = std::malloc(size); - if (p == nullptr) { - return p; - } -#ifndef _WIN32 - mlock(p, size); -#endif - return p; - } - - void Free(void* p, size_t size) { -#ifndef _WIN32 - munlock(p, size); -#endif - std::free(p); - } -}; - -} // namespace detail -} // namespace memory -} // namespace paddle diff --git a/paddle/memory/detail/cpu_allocator_test.cc b/paddle/memory/detail/cpu_allocator_test.cc deleted file mode 100644 index 4e45266cd8..0000000000 --- a/paddle/memory/detail/cpu_allocator_test.cc +++ /dev/null @@ -1,30 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/memory/detail/cpu_allocator.h" -#include "gtest/gtest.h" - -TEST(CPUAllocator, NonStaging) { - paddle::memory::detail::CPUAllocator a; - void* p = a.Alloc(4096); - EXPECT_NE(p, nullptr); - a.Free(p, 4096); -} - -TEST(CPUAllocator, Staging) { - paddle::memory::detail::CPUAllocator a; - void* p = a.Alloc(4096); - EXPECT_NE(p, nullptr); - a.Free(p, 4096); -} diff --git a/paddle/memory/detail/gpu_allocator.h b/paddle/memory/detail/system_allocator.h similarity index 58% rename from paddle/memory/detail/gpu_allocator.h rename to paddle/memory/detail/system_allocator.h index 9452c41fb8..0a64553188 100644 --- a/paddle/memory/detail/gpu_allocator.h +++ b/paddle/memory/detail/system_allocator.h @@ -14,20 +14,58 @@ limitations under the License. */ #pragma once -#include // for size_t +#include // for size_t +#include // for mlock and munlock +#include // for malloc and free -#include +#ifndef PADDLE_ONLY_CPU #include +#include +#endif // PADDLE_ONLY_CPU namespace paddle { namespace memory { namespace detail { +class SystemAllocator { + public: + virtual void* Alloc(size_t size) = 0; + virtual void* Free(void* p) = 0; +}; + +// CPUAllocator calls mlock, which returns pinned +// and locked memory as staging areas for data exchange between host +// and device. Allocates too much would reduce the amount of memory +// available to the system for paging. So, by default, we should use +// CPUAllocator. +template +class CPUAllocator : public SystemAllocator { + public: + virtual void* Alloc(size_t size) { + void* p = std::malloc(size); + if (p != nullptr && lock_memory) { + mlock(p, size); + } + return p; + } + + virtual void Free(void* p, size_t size) { + if (p != nullptr && lock_memory) { + munlock(p, size); + } + std::free(p); + } +}; + +#ifndef PADDLE_ONLY_CPU // The following code are for CUDA. + +namespace { inline void throw_on_error(cudaError_t e, const char* message) { if (e) { throw thrust::system_error(e, thrust::cuda_category(), message); } } +} // namespace // GPUAllocator calls cudaHostMalloc, which returns // pinned and locked memory as staging areas for data exchange @@ -36,17 +74,11 @@ inline void throw_on_error(cudaError_t e, const char* message) { // default, we should use GPUAllocator. template class GPUAllocator { -public: - void* Alloc(size_t size); - void Free(void* p, size_t size); -}; - -template <> -class GPUAllocator { -public: + public: void* Alloc(size_t size) { void* p = 0; - cudaError_t result = cudaMalloc(&p, size); + cudaError_t result = + staging ? cudaMallocHost(&p, size) : cudaMalloc(&p, size); if (result == cudaSuccess) { return p; } @@ -60,32 +92,15 @@ public: // that is returned if you ever call cudaFree after the // driver has already shutdown. This happens only if the // process is terminating, in which case we don't care if - // cudaFree succeeds. - auto err = cudaFree(p); + // cudaFree succeeds. + auto err = staging ? cudaFreeHost(p) : cudaFree(p); if (err != cudaErrorCudartUnloading) { - throw_on_error(err, "cudaFree failed"); + throw_on_error(err, "cudaFree failed"); } } }; -template <> -class GPUAllocator { -public: - void* Alloc(size_t size) { - void* p = 0; - cudaError_t result = cudaMallocHost(&p, size); - if (result == cudaSuccess) { - return p; - } - // clear last error - cudaGetLastError(); - return nullptr; - } - - void Free(void* p, size_t size) { - throw_on_error(cudaFreeHost(p), "cudaFreeHost failed"); - } -}; +#endif // PADDLE_ONLY_CPU } // namespace detail } // namespace memory diff --git a/paddle/memory/detail/gpu_allocator_test.cc b/paddle/memory/detail/system_allocator_test.cc similarity index 69% rename from paddle/memory/detail/gpu_allocator_test.cc rename to paddle/memory/detail/system_allocator_test.cc index 18c1c9ab43..4e7b8018b6 100644 --- a/paddle/memory/detail/gpu_allocator_test.cc +++ b/paddle/memory/detail/system_allocator_test.cc @@ -12,9 +12,25 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/memory/detail/gpu_allocator.h" +#include "paddle/memory/detail/system_allocator.h" #include "gtest/gtest.h" +TEST(CPUAllocator, NoLockMem) { + paddle::memory::detail::CPUAllocator a; + void* p = a.Alloc(4096); + EXPECT_NE(p, nullptr); + a.Free(p, 4096); +} + +TEST(CPUAllocator, LockMem) { + paddle::memory::detail::CPUAllocator a; + void* p = a.Alloc(4096); + EXPECT_NE(p, nullptr); + a.Free(p, 4096); +} + +#ifndef PADDLE_ONLY_CPU + TEST(GPUAllocator, NonStaging) { paddle::memory::detail::GPUAllocator a; void* p = a.Alloc(4096); @@ -28,3 +44,5 @@ TEST(GPUAllocator, Staging) { EXPECT_NE(p, nullptr); a.Free(p, 4096); } + +#endif // PADDLE_ONLY_CPU diff --git a/paddle/memory/memory.cc b/paddle/memory/memory.cc index b617923731..ca3c01ebdb 100644 --- a/paddle/memory/memory.cc +++ b/paddle/memory/memory.cc @@ -14,48 +14,41 @@ limitations under the License. */ #include "paddle/memory/memory.h" +#include "paddle/memory/detail/cpu_allocator.h" +#include "paddle/memory/detail/gpu_allocator.h" + namespace paddle { namespace memory { -template <> -void* Alloc(CPUPlace, size_t size) { - return GetCPUBuddyAllocator(false /*non-staging*/)->Alloc(size); -} - -void* AllocStaging(CPUPlace, size_t size) { - return GetCPUBuddyAllocator(true /*staging*/)->Alloc(size); -} - -template <> -void* Alloc(GPUPlace pl, size_t size) { - return GetGPUBuddyAllocator(pl.device)->Alloc(size); -} - -template <> -void Free(CPUPlace, void* p) { - return GetCPUBuddyAllocator(false /*non-staging*/)->Free(p); -} - -void FreeStaging(CPUPlace, void* p) { - return GetCPUBuddyAllocator(false /*non-staging*/)->Free(p); -} - -#ifdef PADDLE_WITH_GPU -template <> -void* Alloc(GPUPlace pl, void* p) { - return GetGPUBuddyAllocator(pl.device)->Free(p); -} - -template <> -size_t Used(CPUPlace) { +void Alloc(paddle::platform::Place pl, size_t size) { +#ifndef PADDLE_ONLY_CPU + if (paddle::platform::is_gpu_place(pl)) { + return GetGPUBuddyAllocator(pl.device)->Alloc(size); + } +#endif // PADDLE_ONLY_CPU + PADDLE_ASSERT(paddle::platform::is_cpu_place(pl)); + return GetCPUBuddyAllocator()->Alloc(size); +} + +void Free(paddle::platform::Place pl, void* p) { +#ifndef PADDLE_ONLY_CPU + if (paddle::platform::is_gpu_place(pl)) { + GetGPUBuddyAllocator(pl.device)->Free(p); + } +#endif // PADDLE_ONLY_CPU + PADDLE_ASSERT(paddle::platform::is_cpu_place(pl)); + GetCPUBuddyAllocator()->Free(p); +} + +size_t Used(paddle::platform::Place pl) { +#ifndef PADDLE_ONLY_CPU + if (paddle::platform::is_gpu_place(pl)) { + return GetGPUBuddyAllocator(pl.device)->Used(); + } +#endif // PADDLE_ONLY_CPU + PADDLE_ASSERT(paddle::platform::is_cpu_place(pl)); return GetCPUBuddyAllocator()->Used(); } -template <> -size_t Alloc(GPUPlace pl) { - return GetGPUBuddyAllocator(pl.device)->Used(); -} -#endif // PADDLE_WITH_GPU - } // namespace memory } // namespace paddle diff --git a/paddle/memory/memory.h b/paddle/memory/memory.h index 8c15a133bb..0bc609205e 100644 --- a/paddle/memory/memory.h +++ b/paddle/memory/memory.h @@ -19,19 +19,9 @@ limitations under the License. */ namespace paddle { namespace memory { -template -void* Alloc(Place, size_t); -template -void Free(Place, void*); -template -size_t Used(Place); - -// Staging memory means "pinned" host memory that can be mapped into -// the CUDA memory space and accessed by the device rapidly. Don't -// allocate too much staging memory; otherwise system performance will -// degrade because the OS cannot find enough swap memory space. -void* AllocStaging(CPUPlace, size_t); -void* FreeStaging(CPUPlace, size_t); +void* Alloc(paddle::framework::Place, size_t); +void Free(paddle::framework::Place, void*); +size_t Used(paddle::framework::Place); } // namespace memory } // namespace paddle From 6250d108bfd39afb3b2beba438ecb22eca8991bc Mon Sep 17 00:00:00 2001 From: liaogang Date: Tue, 27 Jun 2017 09:51:55 +0800 Subject: [PATCH 06/43] FIX: clang-format --- paddle/memory/detail/gpu_allocator.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/paddle/memory/detail/gpu_allocator.h b/paddle/memory/detail/gpu_allocator.h index 9452c41fb8..682afdf7d3 100644 --- a/paddle/memory/detail/gpu_allocator.h +++ b/paddle/memory/detail/gpu_allocator.h @@ -16,8 +16,8 @@ limitations under the License. */ #include // for size_t -#include #include +#include namespace paddle { namespace memory { @@ -36,14 +36,14 @@ inline void throw_on_error(cudaError_t e, const char* message) { // default, we should use GPUAllocator. template class GPUAllocator { -public: + public: void* Alloc(size_t size); void Free(void* p, size_t size); }; template <> class GPUAllocator { -public: + public: void* Alloc(size_t size) { void* p = 0; cudaError_t result = cudaMalloc(&p, size); @@ -60,22 +60,22 @@ public: // that is returned if you ever call cudaFree after the // driver has already shutdown. This happens only if the // process is terminating, in which case we don't care if - // cudaFree succeeds. + // cudaFree succeeds. auto err = cudaFree(p); if (err != cudaErrorCudartUnloading) { - throw_on_error(err, "cudaFree failed"); + throw_on_error(err, "cudaFree failed"); } } }; template <> class GPUAllocator { -public: + public: void* Alloc(size_t size) { void* p = 0; cudaError_t result = cudaMallocHost(&p, size); if (result == cudaSuccess) { - return p; + return p; } // clear last error cudaGetLastError(); From f149d183f7d78fdaa171f2afabaf8a138596c8ff Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Mon, 26 Jun 2017 20:41:33 -0700 Subject: [PATCH 07/43] Add system_allocator --- paddle/memory/detail/CMakeLists.txt | 6 +- paddle/memory/detail/system_allocator.h | 84 ++++++++++++------- paddle/memory/detail/system_allocator_test.cc | 44 +++++----- 3 files changed, 81 insertions(+), 53 deletions(-) diff --git a/paddle/memory/detail/CMakeLists.txt b/paddle/memory/detail/CMakeLists.txt index 3b5bbd7a12..c16dfadeb2 100644 --- a/paddle/memory/detail/CMakeLists.txt +++ b/paddle/memory/detail/CMakeLists.txt @@ -1 +1,5 @@ -cc_test(system_allocator_test SRCS system_allocator_test.cc) +if(${WITH_GPU}) + nv_test(system_allocator_test SRCS system_allocator_test.cc) +else(${WITH_GPU}) + cc_test(system_allocator_test SRCS system_allocator_test.cc) +endif(${WITH_GPU}) diff --git a/paddle/memory/detail/system_allocator.h b/paddle/memory/detail/system_allocator.h index 0a64553188..1768f9a0da 100644 --- a/paddle/memory/detail/system_allocator.h +++ b/paddle/memory/detail/system_allocator.h @@ -23,14 +23,31 @@ limitations under the License. */ #include #endif // PADDLE_ONLY_CPU +#include "paddle/platform/assert.h" + namespace paddle { namespace memory { namespace detail { -class SystemAllocator { +class CPUDeleter { public: - virtual void* Alloc(size_t size) = 0; - virtual void* Free(void* p) = 0; + CPUDeleter(void* ptr, size_t size, bool locked) + : ptr_(ptr), size_(size), locked_(locked) {} + + void* Ptr() { return ptr_; } + + void operator()(void* ptr) { + PADDLE_ASSERT(ptr == ptr_); + if (ptr_ != nullptr && locked_) { + munlock(ptr_, size_); + } + std::free(ptr_); + } + + private: + void* ptr_; + size_t size_; + bool locked_; }; // CPUAllocator calls mlock, which returns pinned @@ -39,21 +56,14 @@ class SystemAllocator { // available to the system for paging. So, by default, we should use // CPUAllocator. template -class CPUAllocator : public SystemAllocator { +class CPUAllocator { public: - virtual void* Alloc(size_t size) { + static CPUDeleter Alloc(size_t size) { void* p = std::malloc(size); if (p != nullptr && lock_memory) { mlock(p, size); } - return p; - } - - virtual void Free(void* p, size_t size) { - if (p != nullptr && lock_memory) { - munlock(p, size); - } - std::free(p); + return CPUDeleter(p, size, lock_memory); } }; @@ -67,6 +77,32 @@ inline void throw_on_error(cudaError_t e, const char* message) { } } // namespace +class GPUDeleter { + public: + GPUDeleter(void* ptr, size_t size, bool staging) + : ptr_(ptr), size_(size), staging_(staging) {} + + void* Ptr() { return ptr_; } + + void operator()(void* ptr) { + PADDLE_ASSERT(ptr == ptr_); + // Purposefully allow cudaErrorCudartUnloading, because + // that is returned if you ever call cudaFree after the + // driver has already shutdown. This happens only if the + // process is terminating, in which case we don't care if + // cudaFree succeeds. + cudaError_t err = staging_ ? cudaFreeHost(ptr) : cudaFree(ptr); + if (err != cudaErrorCudartUnloading) { + throw_on_error(err, "cudaFree{Host} failed"); + } + } + + private: + void* ptr_; + size_t size_; + bool staging_; +}; + // GPUAllocator calls cudaHostMalloc, which returns // pinned and locked memory as staging areas for data exchange // between host and device. Allocates too much would reduce the @@ -75,28 +111,14 @@ inline void throw_on_error(cudaError_t e, const char* message) { template class GPUAllocator { public: - void* Alloc(size_t size) { + static GPUDeleter Alloc(size_t size) { void* p = 0; cudaError_t result = staging ? cudaMallocHost(&p, size) : cudaMalloc(&p, size); - if (result == cudaSuccess) { - return p; - } - // clear last error - cudaGetLastError(); - return nullptr; - } - - void Free(void* p, size_t size) { - // Purposefully allow cudaErrorCudartUnloading, because - // that is returned if you ever call cudaFree after the - // driver has already shutdown. This happens only if the - // process is terminating, in which case we don't care if - // cudaFree succeeds. - auto err = staging ? cudaFreeHost(p) : cudaFree(p); - if (err != cudaErrorCudartUnloading) { - throw_on_error(err, "cudaFree failed"); + if (result != cudaSuccess) { + cudaGetLastError(); // clear error if there is any. } + return GPUDeleter(result == cudaSuccess ? p : nullptr, size, staging); } }; diff --git a/paddle/memory/detail/system_allocator_test.cc b/paddle/memory/detail/system_allocator_test.cc index 4e7b8018b6..fec70a65b7 100644 --- a/paddle/memory/detail/system_allocator_test.cc +++ b/paddle/memory/detail/system_allocator_test.cc @@ -13,36 +13,38 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/memory/detail/system_allocator.h" + +#include +#include + #include "gtest/gtest.h" -TEST(CPUAllocator, NoLockMem) { - paddle::memory::detail::CPUAllocator a; - void* p = a.Alloc(4096); - EXPECT_NE(p, nullptr); - a.Free(p, 4096); +template +void TestAllocator() { + { + auto d = Allocator::Alloc(sizeof(int)); + EXPECT_NE(d.Ptr(), nullptr); + std::unique_ptr p(static_cast(d.Ptr()), d); + } + { + auto d = Allocator::Alloc(0); + EXPECT_EQ(d.Ptr(), nullptr); + std::unique_ptr p(static_cast(d.Ptr()), d); + } } +TEST(CPUAllocator, NoLockMem) { + TestAllocator>(); +} TEST(CPUAllocator, LockMem) { - paddle::memory::detail::CPUAllocator a; - void* p = a.Alloc(4096); - EXPECT_NE(p, nullptr); - a.Free(p, 4096); + TestAllocator>(); } #ifndef PADDLE_ONLY_CPU - -TEST(GPUAllocator, NonStaging) { - paddle::memory::detail::GPUAllocator a; - void* p = a.Alloc(4096); - EXPECT_NE(p, nullptr); - a.Free(p, 4096); +TEST(GPUAllocator, NoStaging) { + TestAllocator>(); } - TEST(GPUAllocator, Staging) { - paddle::memory::detail::GPUAllocator a; - void* p = a.Alloc(4096); - EXPECT_NE(p, nullptr); - a.Free(p, 4096); + TestAllocator>(); } - #endif // PADDLE_ONLY_CPU From ab91232cf6bad3c9ff5595c6d655eb538a651f24 Mon Sep 17 00:00:00 2001 From: qijun Date: Tue, 27 Jun 2017 20:16:52 +0800 Subject: [PATCH 08/43] add cmake external project for eigen --- CMakeLists.txt | 1 + cmake/external/eigen.cmake | 20 ++++++++++++++++ paddle/framework/ddim_test.cc | 44 +++++++++++++++++++++++++++++++++++ 3 files changed, 65 insertions(+) create mode 100644 cmake/external/eigen.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 3c719d35ec..9be75f4a7d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -92,6 +92,7 @@ include(external/openblas) # download, build, install openblas include(external/swig) # download, build, install swig include(external/warpctc) # download, build, install warpctc include(external/any) # download libn::any +include(external/eigen) # download eigen3 include(generic) # simplify cmake module include(package) # set paddle packages diff --git a/cmake/external/eigen.cmake b/cmake/external/eigen.cmake new file mode 100644 index 0000000000..543504a274 --- /dev/null +++ b/cmake/external/eigen.cmake @@ -0,0 +1,20 @@ +INCLUDE(ExternalProject) + +SET(EIGEN_SOURCE_DIR ${THIRD_PARTY_PATH}/eigen3) + +INCLUDE_DIRECTORIES(${EIGEN_SOURCE_DIR}/src/) + +ExternalProject_Add( + eigen3 + ${EXTERNAL_PROJECT_LOG_ARGS} + URL "https://bitbucket.org/eigen/eigen/get/f3a22f35b044.tar.gz" + URL_MD5 "4645c66075982da6fa0bcf6b20f3e8f7" + PREFIX ${EIGEN_SOURCE_DIR} + UPDATE_COMMAND "" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) + +LIST(APPEND external_project_dependencies eigen3) \ No newline at end of file diff --git a/paddle/framework/ddim_test.cc b/paddle/framework/ddim_test.cc index 36eef02370..3e4ad840f3 100644 --- a/paddle/framework/ddim_test.cc +++ b/paddle/framework/ddim_test.cc @@ -1,6 +1,9 @@ #include #include +#include "eigen3/Eigen/Core" +#include "eigen3/Eigen/Dense" +#include "eigen3/unsupported/Eigen/CXX11/Tensor" #include "gtest/gtest.h" #include "paddle/framework/ddim.h" @@ -61,3 +64,44 @@ TEST(DDim, Print) { ss << ddim; EXPECT_EQ("2, 3, 4", ss.str()); } + +template +using Vec = + Eigen::TensorMap, + Eigen::Aligned>; + +template +using Matrix = + Eigen::TensorMap, + Eigen::Aligned>; + +template +void print(T* input, int size) { + for (int i = 0; i < size; i++) { + std::cout << input[i] << " "; + } + std::cout << std::endl; +} + +TEST(Eigen, start) { + int size = 4; + + float* t_a = (float*)malloc(size * sizeof(float)); + float* t_b = (float*)malloc(size * sizeof(float)); + float* t_c = (float*)malloc(size * sizeof(float)); + for (int i = 0; i < size; i++) { + t_a[i] = i; + t_b[i] = i; + } + Vec a(t_a, size); + Vec b(t_b, size); + Vec c(t_c, size); + + Eigen::DefaultDevice dd; + c.device(dd) = a + b; + print(t_c, size); + + free(t_a); + free(t_b); + free(t_c); +} From c5d9ca8617869e4d8bc12b9302c5594279ab93ad Mon Sep 17 00:00:00 2001 From: qijun Date: Tue, 27 Jun 2017 15:06:00 +0000 Subject: [PATCH 09/43] fix eigen include path --- cmake/external/eigen.cmake | 8 +++---- paddle/framework/ddim_test.cc | 44 ----------------------------------- 2 files changed, 4 insertions(+), 48 deletions(-) diff --git a/cmake/external/eigen.cmake b/cmake/external/eigen.cmake index 543504a274..253d436bcc 100644 --- a/cmake/external/eigen.cmake +++ b/cmake/external/eigen.cmake @@ -2,13 +2,13 @@ INCLUDE(ExternalProject) SET(EIGEN_SOURCE_DIR ${THIRD_PARTY_PATH}/eigen3) -INCLUDE_DIRECTORIES(${EIGEN_SOURCE_DIR}/src/) +INCLUDE_DIRECTORIES(${EIGEN_SOURCE_DIR}/src/eigen3) ExternalProject_Add( eigen3 ${EXTERNAL_PROJECT_LOG_ARGS} - URL "https://bitbucket.org/eigen/eigen/get/f3a22f35b044.tar.gz" - URL_MD5 "4645c66075982da6fa0bcf6b20f3e8f7" + URL "https://bitbucket.org/eigen/eigen/get/3.3.4.tar.gz" + URL_MD5 "1a47e78efe365a97de0c022d127607c3" PREFIX ${EIGEN_SOURCE_DIR} UPDATE_COMMAND "" CONFIGURE_COMMAND "" @@ -17,4 +17,4 @@ ExternalProject_Add( TEST_COMMAND "" ) -LIST(APPEND external_project_dependencies eigen3) \ No newline at end of file +LIST(APPEND external_project_dependencies eigen3) diff --git a/paddle/framework/ddim_test.cc b/paddle/framework/ddim_test.cc index 3e4ad840f3..36eef02370 100644 --- a/paddle/framework/ddim_test.cc +++ b/paddle/framework/ddim_test.cc @@ -1,9 +1,6 @@ #include #include -#include "eigen3/Eigen/Core" -#include "eigen3/Eigen/Dense" -#include "eigen3/unsupported/Eigen/CXX11/Tensor" #include "gtest/gtest.h" #include "paddle/framework/ddim.h" @@ -64,44 +61,3 @@ TEST(DDim, Print) { ss << ddim; EXPECT_EQ("2, 3, 4", ss.str()); } - -template -using Vec = - Eigen::TensorMap, - Eigen::Aligned>; - -template -using Matrix = - Eigen::TensorMap, - Eigen::Aligned>; - -template -void print(T* input, int size) { - for (int i = 0; i < size; i++) { - std::cout << input[i] << " "; - } - std::cout << std::endl; -} - -TEST(Eigen, start) { - int size = 4; - - float* t_a = (float*)malloc(size * sizeof(float)); - float* t_b = (float*)malloc(size * sizeof(float)); - float* t_c = (float*)malloc(size * sizeof(float)); - for (int i = 0; i < size; i++) { - t_a[i] = i; - t_b[i] = i; - } - Vec a(t_a, size); - Vec b(t_b, size); - Vec c(t_c, size); - - Eigen::DefaultDevice dd; - c.device(dd) = a + b; - print(t_c, size); - - free(t_a); - free(t_b); - free(t_c); -} From dd08d337c0138c9def5f7ce95f88bae5599e5f92 Mon Sep 17 00:00:00 2001 From: liaogang Date: Wed, 28 Jun 2017 01:30:57 +0800 Subject: [PATCH 10/43] FIX: fix cmake type error --- CMakeLists.txt | 2 ++ paddle/CMakeLists.txt | 10 +--------- paddle/memory/CMakeLists.txt | 2 +- 3 files changed, 4 insertions(+), 10 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3c719d35ec..b779caefb9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -27,6 +27,7 @@ if(NOT CMAKE_CROSSCOMPILING) endif(NOT CMAKE_CROSSCOMPILING) find_package(Git REQUIRED) find_package(Threads REQUIRED) +find_package(Boost QUIET) include(simd) @@ -109,6 +110,7 @@ include_directories("${PROJ_ROOT}") include_directories("${PROJ_ROOT}/paddle/cuda/include") include_directories("${CMAKE_CURRENT_BINARY_DIR}/proto") include_directories("${CMAKE_CURRENT_BINARY_DIR}/go/pserver/cclient") +include_directories(${Boost_INCLUDE_DIRS}) set(EXTERNAL_LIBS ${GFLAGS_LIBRARIES} diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index 0cddb95244..979b68e827 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -10,17 +10,9 @@ add_subdirectory(trainer) add_subdirectory(scripts) add_subdirectory(optimizer) add_subdirectory(strings) -add_subdirectory(memory) - -# Do not build go directory until go cmake is working smoothly. -# if(CMAKE_Go_COMPILER) -# add_subdirectory(go) -# endif() - -find_package(Boost QUIET) if(Boost_FOUND) - include_directories(${Boost_INCLUDE_DIRS}) + add_subdirectory(memory) add_subdirectory(platform) add_subdirectory(framework) endif() diff --git a/paddle/memory/CMakeLists.txt b/paddle/memory/CMakeLists.txt index 8662512496..e74ce75c93 100644 --- a/paddle/memory/CMakeLists.txt +++ b/paddle/memory/CMakeLists.txt @@ -3,5 +3,5 @@ add_subdirectory(detail) if(${WITH_GPU}) nv_library(memory SRCS memory.cc) else(${WITH_GPU}) - cc_library(memory SRCS memroy.cc) + cc_library(memory SRCS memory.cc) endif(${WITH_GPU}) From dde0da9e0ffee7a49510061a139ab2abc7ab55b9 Mon Sep 17 00:00:00 2001 From: liaogang Date: Wed, 28 Jun 2017 01:31:24 +0800 Subject: [PATCH 11/43] ENH: Add cuda.h in platform --- paddle/platform/cuda.h | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 paddle/platform/cuda.h diff --git a/paddle/platform/cuda.h b/paddle/platform/cuda.h new file mode 100644 index 0000000000..864a5d3340 --- /dev/null +++ b/paddle/platform/cuda.h @@ -0,0 +1,41 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#ifndef PADDLE_ONLY_CPU + +#include +#include + +namespace paddle { +namespace platform { + +inline void throw_on_error(cudaError_t e, const char* message) { + if (e) { + throw thrust::system_error(e, thrust::cuda_category(), message); + } +} + +int GetDeviceCount(void) { + int count; + throw_on_error(cudaGetDeviceCount(&count), + "cudaGetDeviceCount failed"); + return count; +} + +} // namespace platform +} // namespace paddle + +#endif // PADDLE_ONLY_CPU From 29c7512b3ce13ca7b89d3ff3f4aea2c7d7f27478 Mon Sep 17 00:00:00 2001 From: liaogang Date: Wed, 28 Jun 2017 01:31:46 +0800 Subject: [PATCH 12/43] FIX: fix memory.h/cc --- paddle/memory/memory.cc | 23 ++++++++++++++--------- paddle/memory/memory.h | 8 ++++---- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/paddle/memory/memory.cc b/paddle/memory/memory.cc index ca3c01ebdb..0d123d99e2 100644 --- a/paddle/memory/memory.cc +++ b/paddle/memory/memory.cc @@ -13,41 +13,46 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/memory/memory.h" +#include "paddle/memory/detail/buddy_allocator.h" +#include "paddle/memory/detail/system_allocator.h" +#include "paddle/platform/assert.h" -#include "paddle/memory/detail/cpu_allocator.h" -#include "paddle/memory/detail/gpu_allocator.h" +#include namespace paddle { namespace memory { -void Alloc(paddle::platform::Place pl, size_t size) { +void* Alloc(platform::Place pl, size_t size) { #ifndef PADDLE_ONLY_CPU if (paddle::platform::is_gpu_place(pl)) { - return GetGPUBuddyAllocator(pl.device)->Alloc(size); + size_t gpu_id = boost::get(pl).device; + return detail::GetGPUBuddyAllocator(gpu_id)->Alloc(size); } #endif // PADDLE_ONLY_CPU PADDLE_ASSERT(paddle::platform::is_cpu_place(pl)); - return GetCPUBuddyAllocator()->Alloc(size); + return detail::GetCPUBuddyAllocator()->Alloc(size); } void Free(paddle::platform::Place pl, void* p) { #ifndef PADDLE_ONLY_CPU if (paddle::platform::is_gpu_place(pl)) { - GetGPUBuddyAllocator(pl.device)->Free(p); + size_t gpu_id = boost::get(pl).device; + detail::GetGPUBuddyAllocator(gpu_id)->Free(p); } #endif // PADDLE_ONLY_CPU PADDLE_ASSERT(paddle::platform::is_cpu_place(pl)); - GetCPUBuddyAllocator()->Free(p); + detail::GetCPUBuddyAllocator()->Free(p); } size_t Used(paddle::platform::Place pl) { #ifndef PADDLE_ONLY_CPU if (paddle::platform::is_gpu_place(pl)) { - return GetGPUBuddyAllocator(pl.device)->Used(); + size_t gpu_id = boost::get(pl).device; + return detail::GetGPUBuddyAllocator(gpu_id)->Used(); } #endif // PADDLE_ONLY_CPU PADDLE_ASSERT(paddle::platform::is_cpu_place(pl)); - return GetCPUBuddyAllocator()->Used(); + return detail::GetCPUBuddyAllocator()->Used(); } } // namespace memory diff --git a/paddle/memory/memory.h b/paddle/memory/memory.h index 0bc609205e..a33092bade 100644 --- a/paddle/memory/memory.h +++ b/paddle/memory/memory.h @@ -14,14 +14,14 @@ limitations under the License. */ #pragma once -#include "paddle/frameowork/place.h" +#include "paddle/platform/place.h" namespace paddle { namespace memory { -void* Alloc(paddle::framework::Place, size_t); -void Free(paddle::framework::Place, void*); -size_t Used(paddle::framework::Place); +void* Alloc(paddle::platform::Place, size_t); +void Free(paddle::platform::Place, void*); +size_t Used(paddle::platform::Place); } // namespace memory } // namespace paddle From b22dd12854150c31b9cb9e3e550bdee4b5df5977 Mon Sep 17 00:00:00 2001 From: liaogang Date: Wed, 28 Jun 2017 01:32:06 +0800 Subject: [PATCH 13/43] ENH: Add buddy allocator draft --- paddle/memory/detail/CMakeLists.txt | 4 +- paddle/memory/detail/buddy_allocator.h | 79 ++++++++++++++++++++++++++ 2 files changed, 81 insertions(+), 2 deletions(-) create mode 100644 paddle/memory/detail/buddy_allocator.h diff --git a/paddle/memory/detail/CMakeLists.txt b/paddle/memory/detail/CMakeLists.txt index c16dfadeb2..cd5622203f 100644 --- a/paddle/memory/detail/CMakeLists.txt +++ b/paddle/memory/detail/CMakeLists.txt @@ -1,5 +1,5 @@ if(${WITH_GPU}) - nv_test(system_allocator_test SRCS system_allocator_test.cc) + nv_test(system_allocator_test SRCS system_allocator_test.cc DEPS gflags glog) else(${WITH_GPU}) - cc_test(system_allocator_test SRCS system_allocator_test.cc) + cc_test(system_allocator_test SRCS system_allocator_test.cc DEPS gflags glog) endif(${WITH_GPU}) diff --git a/paddle/memory/detail/buddy_allocator.h b/paddle/memory/detail/buddy_allocator.h new file mode 100644 index 0000000000..35e96fd507 --- /dev/null +++ b/paddle/memory/detail/buddy_allocator.h @@ -0,0 +1,79 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/memory/detail/system_allocator.h" + +namespace paddle { +namespace memory { +namespace detail { + +template +class BuddyAllocator { + public: + // TODO(gangliao): This is a draft, add Buddy Allocator Algorithm soon + BuddyAllocator() {} + ~BuddyAllocator() {} + + public: + void* Alloc(size_t size) { + return Allocator::Alloc(size); + } + void Free(void*) { + // Because all info like size are stored in meta data, + // thus it's duplicate if add the parameter `size` in + // `Free(void*)` interface. + } + size_t Used(); + + public: + BuddyAllocator(const BuddyAllocator&) = delete; + BuddyAllocator& operator=(const BuddyAllocator&) = delete; + + private: + size_t min_alloc_size_; + size_t max_alloc_size_; + + private: + std::mutex mutex_; +}; + +BuddyAllocator* GetCPUBuddyAllocator() { + static BuddyAllocator* a = nullptr; + if (a == nullptr) { + a = new BuddyAllocator(); + } + return a; +} + +#ifndef PADDLE_ONLY_CPU // The following code are for CUDA. + +BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) { + static BuddyAllocator** as = NULL; + if (as == NULL) { + int gpu_num = platform::GetDeviceCount(); + as = new BuddyAllocator*[gpu_num]; + for (int gpu = 0; gpu < gpu_num; gpu++) { + as[gpu] = new BuddyAllocator(); + } + } + return as[gpu_id]; +} + +#endif // PADDLE_ONLY_CPU + +} // namespace detail +} // namespace memory +} // namespace paddle From 79373dabc8d2e4edc87fbef40efdfa1f54b35a9f Mon Sep 17 00:00:00 2001 From: liaogang Date: Wed, 28 Jun 2017 01:33:06 +0800 Subject: [PATCH 14/43] TEST: Add test for system allocator and deleter --- paddle/memory/detail/system_allocator.h | 108 ++++++------------ paddle/memory/detail/system_allocator_test.cc | 40 ++++--- 2 files changed, 60 insertions(+), 88 deletions(-) diff --git a/paddle/memory/detail/system_allocator.h b/paddle/memory/detail/system_allocator.h index 1768f9a0da..f411019854 100644 --- a/paddle/memory/detail/system_allocator.h +++ b/paddle/memory/detail/system_allocator.h @@ -18,107 +18,69 @@ limitations under the License. */ #include // for mlock and munlock #include // for malloc and free -#ifndef PADDLE_ONLY_CPU -#include -#include -#endif // PADDLE_ONLY_CPU - +#include #include "paddle/platform/assert.h" +#include "paddle/platform/cuda.h" + +DEFINE_bool(uses_pinned_memory, false, + "If set, allocate cpu/gpu pinned memory."); namespace paddle { namespace memory { namespace detail { -class CPUDeleter { - public: - CPUDeleter(void* ptr, size_t size, bool locked) - : ptr_(ptr), size_(size), locked_(locked) {} - - void* Ptr() { return ptr_; } - - void operator()(void* ptr) { - PADDLE_ASSERT(ptr == ptr_); - if (ptr_ != nullptr && locked_) { - munlock(ptr_, size_); - } - std::free(ptr_); - } - - private: - void* ptr_; - size_t size_; - bool locked_; -}; - -// CPUAllocator calls mlock, which returns pinned -// and locked memory as staging areas for data exchange between host -// and device. Allocates too much would reduce the amount of memory -// available to the system for paging. So, by default, we should use -// CPUAllocator. -template +// If uses_pinned_memory is true, CPUAllocator calls mlock, which +// returns pinned and locked memory as staging areas for data exchange +// between host and device. Allocates too much would reduce the amount +// of memory available to the system for paging. So, by default, we +// should set false to uses_pinned_memory. class CPUAllocator { public: - static CPUDeleter Alloc(size_t size) { + static void* Alloc(size_t size) { void* p = std::malloc(size); - if (p != nullptr && lock_memory) { + if (p != nullptr && FLAGS_uses_pinned_memory) { mlock(p, size); } - return CPUDeleter(p, size, lock_memory); + return p; } -}; - -#ifndef PADDLE_ONLY_CPU // The following code are for CUDA. - -namespace { -inline void throw_on_error(cudaError_t e, const char* message) { - if (e) { - throw thrust::system_error(e, thrust::cuda_category(), message); - } -} -} // namespace - -class GPUDeleter { - public: - GPUDeleter(void* ptr, size_t size, bool staging) - : ptr_(ptr), size_(size), staging_(staging) {} - - void* Ptr() { return ptr_; } - void operator()(void* ptr) { - PADDLE_ASSERT(ptr == ptr_); - // Purposefully allow cudaErrorCudartUnloading, because - // that is returned if you ever call cudaFree after the - // driver has already shutdown. This happens only if the - // process is terminating, in which case we don't care if - // cudaFree succeeds. - cudaError_t err = staging_ ? cudaFreeHost(ptr) : cudaFree(ptr); - if (err != cudaErrorCudartUnloading) { - throw_on_error(err, "cudaFree{Host} failed"); + static void Free(void* p, size_t size) { + if (p != nullptr && FLAGS_uses_pinned_memory) { + munlock(p, size); } + std::free(p); } - - private: - void* ptr_; - size_t size_; - bool staging_; }; +#ifndef PADDLE_ONLY_CPU // The following code are for CUDA. + // GPUAllocator calls cudaHostMalloc, which returns // pinned and locked memory as staging areas for data exchange // between host and device. Allocates too much would reduce the // amount of memory available to the system for paging. So, by // default, we should use GPUAllocator. -template class GPUAllocator { public: - static GPUDeleter Alloc(size_t size) { + static void* Alloc(size_t size) { void* p = 0; - cudaError_t result = - staging ? cudaMallocHost(&p, size) : cudaMalloc(&p, size); + cudaError_t result = FLAGS_uses_pinned_memory ? cudaMallocHost(&p, size) + : cudaMalloc(&p, size); if (result != cudaSuccess) { cudaGetLastError(); // clear error if there is any. } - return GPUDeleter(result == cudaSuccess ? p : nullptr, size, staging); + return result == cudaSuccess ? p : nullptr; + } + + static void Free(void* p, size_t size) { + // Purposefully allow cudaErrorCudartUnloading, because + // that is returned if you ever call cudaFree after the + // driver has already shutdown. This happens only if the + // process is terminating, in which case we don't care if + // cudaFree succeeds. + cudaError_t err = FLAGS_uses_pinned_memory ? cudaFreeHost(p) : cudaFree(p); + if (err != cudaErrorCudartUnloading) { + platform::throw_on_error(err, "cudaFree{Host} failed"); + } } }; diff --git a/paddle/memory/detail/system_allocator_test.cc b/paddle/memory/detail/system_allocator_test.cc index fec70a65b7..829d3558ba 100644 --- a/paddle/memory/detail/system_allocator_test.cc +++ b/paddle/memory/detail/system_allocator_test.cc @@ -17,34 +17,44 @@ limitations under the License. */ #include #include +#include "glog/logging.h" #include "gtest/gtest.h" template -void TestAllocator() { - { - auto d = Allocator::Alloc(sizeof(int)); - EXPECT_NE(d.Ptr(), nullptr); - std::unique_ptr p(static_cast(d.Ptr()), d); - } - { - auto d = Allocator::Alloc(0); - EXPECT_EQ(d.Ptr(), nullptr); - std::unique_ptr p(static_cast(d.Ptr()), d); - } +void TestAllocator(void* p) { + p = Allocator::Alloc(1024); + + int* i = static_cast(p); + std::shared_ptr ptr(i, [](int* p) { Allocator::Free(p, 1024); }); + + EXPECT_NE(p, nullptr); } TEST(CPUAllocator, NoLockMem) { - TestAllocator>(); + void* p = nullptr; + FLAGS_uses_pinned_memory = false; + TestAllocator(p); + EXPECT_EQ(p, nullptr); } + TEST(CPUAllocator, LockMem) { - TestAllocator>(); + void* p = nullptr; + FLAGS_uses_pinned_memory = true; + TestAllocator(p); + EXPECT_EQ(p, nullptr); } #ifndef PADDLE_ONLY_CPU TEST(GPUAllocator, NoStaging) { - TestAllocator>(); + void* p = nullptr; + FLAGS_uses_pinned_memory = false; + TestAllocator(p); + EXPECT_EQ(p, nullptr); } TEST(GPUAllocator, Staging) { - TestAllocator>(); + void* p = nullptr; + FLAGS_uses_pinned_memory = true; + TestAllocator(p); + EXPECT_EQ(p, nullptr); } #endif // PADDLE_ONLY_CPU From b8f5922d88e5f7949eb9a469f761ad49981d677a Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Tue, 27 Jun 2017 16:32:24 -0700 Subject: [PATCH 15/43] Make CPUAllocator and GPUAllocator subclasses of SystemAllocator --- paddle/memory/detail/CMakeLists.txt | 6 +- paddle/memory/detail/system_allocator.h | 80 +++++-------------- paddle/memory/detail/system_allocator_test.cc | 57 +++++++------ 3 files changed, 59 insertions(+), 84 deletions(-) diff --git a/paddle/memory/detail/CMakeLists.txt b/paddle/memory/detail/CMakeLists.txt index cd5622203f..72d3749ad7 100644 --- a/paddle/memory/detail/CMakeLists.txt +++ b/paddle/memory/detail/CMakeLists.txt @@ -1,5 +1,7 @@ if(${WITH_GPU}) - nv_test(system_allocator_test SRCS system_allocator_test.cc DEPS gflags glog) + nv_library(system_allocator SRCS system_allocator.cc DEPS gflags) + nv_test(system_allocator_test SRCS system_allocator_test.cc DEPS system_allocator gflags) else(${WITH_GPU}) - cc_test(system_allocator_test SRCS system_allocator_test.cc DEPS gflags glog) + cc_library(system_allocator SRCS system_allocator.cc DEPS gflags) + cc_test(system_allocator_test SRCS system_allocator_test.cc DEPS system_allocator gflags) endif(${WITH_GPU}) diff --git a/paddle/memory/detail/system_allocator.h b/paddle/memory/detail/system_allocator.h index f411019854..184b383f7f 100644 --- a/paddle/memory/detail/system_allocator.h +++ b/paddle/memory/detail/system_allocator.h @@ -14,76 +14,38 @@ limitations under the License. */ #pragma once -#include // for size_t -#include // for mlock and munlock -#include // for malloc and free - -#include -#include "paddle/platform/assert.h" -#include "paddle/platform/cuda.h" - -DEFINE_bool(uses_pinned_memory, false, - "If set, allocate cpu/gpu pinned memory."); +#include // for size_t namespace paddle { namespace memory { namespace detail { -// If uses_pinned_memory is true, CPUAllocator calls mlock, which -// returns pinned and locked memory as staging areas for data exchange -// between host and device. Allocates too much would reduce the amount -// of memory available to the system for paging. So, by default, we -// should set false to uses_pinned_memory. -class CPUAllocator { +// SystemAllocator is the parent class of CPUAllocator and +// GPUAllocator. A BuddyAllocator object uses a SystemAllocator* +// pointing to the underlying system allocator. An alternative to +// this class hierarchy is to pass a system allocator class to +// BuddyAllocator as a template parameter. This approach makes +// BuddyAllocator a class template, and it's very complicated +// algorithm would make the buddy_allocator.h messy. +class SystemAllocator { public: - static void* Alloc(size_t size) { - void* p = std::malloc(size); - if (p != nullptr && FLAGS_uses_pinned_memory) { - mlock(p, size); - } - return p; - } - - static void Free(void* p, size_t size) { - if (p != nullptr && FLAGS_uses_pinned_memory) { - munlock(p, size); - } - std::free(p); - } + virtual ~SystemAllocator() {} + virtual void* Alloc(size_t size) = 0; + virtual void Free(void* p, size_t size) = 0; }; -#ifndef PADDLE_ONLY_CPU // The following code are for CUDA. - -// GPUAllocator calls cudaHostMalloc, which returns -// pinned and locked memory as staging areas for data exchange -// between host and device. Allocates too much would reduce the -// amount of memory available to the system for paging. So, by -// default, we should use GPUAllocator. -class GPUAllocator { +class CPUAllocator : public SystemAllocator { public: - static void* Alloc(size_t size) { - void* p = 0; - cudaError_t result = FLAGS_uses_pinned_memory ? cudaMallocHost(&p, size) - : cudaMalloc(&p, size); - if (result != cudaSuccess) { - cudaGetLastError(); // clear error if there is any. - } - return result == cudaSuccess ? p : nullptr; - } - - static void Free(void* p, size_t size) { - // Purposefully allow cudaErrorCudartUnloading, because - // that is returned if you ever call cudaFree after the - // driver has already shutdown. This happens only if the - // process is terminating, in which case we don't care if - // cudaFree succeeds. - cudaError_t err = FLAGS_uses_pinned_memory ? cudaFreeHost(p) : cudaFree(p); - if (err != cudaErrorCudartUnloading) { - platform::throw_on_error(err, "cudaFree{Host} failed"); - } - } + virtual void* Alloc(size_t size); + virtual void Free(void* p, size_t size); }; +#ifndef PADDLE_ONLY_CPU +class GPUAllocator : public SystemAllocator { + public: + virtual void* Alloc(size_t size); + virtual void Free(void* p, size_t size); +}; #endif // PADDLE_ONLY_CPU } // namespace detail diff --git a/paddle/memory/detail/system_allocator_test.cc b/paddle/memory/detail/system_allocator_test.cc index 829d3558ba..c461d8ac62 100644 --- a/paddle/memory/detail/system_allocator_test.cc +++ b/paddle/memory/detail/system_allocator_test.cc @@ -17,44 +17,55 @@ limitations under the License. */ #include #include -#include "glog/logging.h" +#include "gflags/gflags.h" #include "gtest/gtest.h" -template -void TestAllocator(void* p) { - p = Allocator::Alloc(1024); +DECLARE_bool(use_pinned_memory); - int* i = static_cast(p); - std::shared_ptr ptr(i, [](int* p) { Allocator::Free(p, 1024); }); +void TestAllocator(paddle::memory::detail::SystemAllocator* a, size_t size) { + bool freed = false; + { + void* p = a->Alloc(size); + if (size > 0) { + EXPECT_NE(p, nullptr); + } else { + EXPECT_EQ(p, nullptr); + } - EXPECT_NE(p, nullptr); + int* i = static_cast(p); + std::shared_ptr ptr(i, [&freed, a, size](void* p) { + freed = true; + a->Free(p, size); + }); + } + EXPECT_TRUE(freed); } TEST(CPUAllocator, NoLockMem) { - void* p = nullptr; - FLAGS_uses_pinned_memory = false; - TestAllocator(p); - EXPECT_EQ(p, nullptr); + FLAGS_use_pinned_memory = false; + paddle::memory::detail::CPUAllocator a; + TestAllocator(&a, 2048); + TestAllocator(&a, 0); } TEST(CPUAllocator, LockMem) { - void* p = nullptr; - FLAGS_uses_pinned_memory = true; - TestAllocator(p); - EXPECT_EQ(p, nullptr); + FLAGS_use_pinned_memory = true; + paddle::memory::detail::CPUAllocator a; + TestAllocator(&a, 2048); + TestAllocator(&a, 0); } #ifndef PADDLE_ONLY_CPU TEST(GPUAllocator, NoStaging) { - void* p = nullptr; - FLAGS_uses_pinned_memory = false; - TestAllocator(p); - EXPECT_EQ(p, nullptr); + FLAGS_use_pinned_memory = false; + paddle::memory::detail::GPUAllocator a; + TestAllocator(&a, 2048); + TestAllocator(&a, 0); } TEST(GPUAllocator, Staging) { - void* p = nullptr; - FLAGS_uses_pinned_memory = true; - TestAllocator(p); - EXPECT_EQ(p, nullptr); + FLAGS_use_pinned_memory = true; + paddle::memory::detail::GPUAllocator a; + TestAllocator(&a, 2048); + TestAllocator(&a, 0); } #endif // PADDLE_ONLY_CPU From 2f52cb7909c5e8f372015454e3af33166713bfa7 Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Tue, 27 Jun 2017 23:46:54 +0000 Subject: [PATCH 16/43] fix pserver test --- go/pserver/client_test.go | 2 +- go/pserver/service_test.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/go/pserver/client_test.go b/go/pserver/client_test.go index 6ecf1fa08a..4a62ae88a4 100644 --- a/go/pserver/client_test.go +++ b/go/pserver/client_test.go @@ -31,7 +31,7 @@ func init() { port[i] = p go func(l net.Listener) { - s, err := pserver.NewService("", time.Second*5) + s, err := pserver.NewService("", 1, time.Second*5) if err != nil { panic(err) } diff --git a/go/pserver/service_test.go b/go/pserver/service_test.go index f317535592..1d84f15d78 100644 --- a/go/pserver/service_test.go +++ b/go/pserver/service_test.go @@ -10,7 +10,7 @@ import ( ) func TestFull(t *testing.T) { - s, err := pserver.NewService("", time.Second*5) + s, err := pserver.NewService("", 1, time.Second*5) if err != nil { t.Error(err) } @@ -75,7 +75,7 @@ func TestFull(t *testing.T) { } func TestMultipleInit(t *testing.T) { - s, err := pserver.NewService("", time.Second*5) + s, err := pserver.NewService("", 1, time.Second*5) if err != nil { t.Error(err) } @@ -91,7 +91,7 @@ func TestMultipleInit(t *testing.T) { } func TestUninitialized(t *testing.T) { - s, err := pserver.NewService("", time.Second*5) + s, err := pserver.NewService("", 1, time.Second*5) err = s.SendGrad(pserver.Gradient{}, nil) if err.Error() != pserver.Uninitialized { t.FailNow() @@ -99,7 +99,7 @@ func TestUninitialized(t *testing.T) { } func TestBlockUntilInitialized(t *testing.T) { - s, err := pserver.NewService("", time.Second*5) + s, err := pserver.NewService("", 1, time.Second*5) if err != nil { t.Error(err) } From 3e087f763e9c6c15a4f1d542fb3bdc327f7441c7 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Tue, 27 Jun 2017 16:48:25 -0700 Subject: [PATCH 17/43] Add buddy_allocator.cc and system_allocator.cc --- paddle/memory/detail/buddy_allocator.cc | 35 ++++++++ paddle/memory/detail/buddy_allocator.h | 76 ++++++++-------- paddle/memory/detail/system_allocator.cc | 90 +++++++++++++++++++ paddle/memory/detail/system_allocator_test.cc | 24 ++--- 4 files changed, 177 insertions(+), 48 deletions(-) create mode 100644 paddle/memory/detail/buddy_allocator.cc create mode 100644 paddle/memory/detail/system_allocator.cc diff --git a/paddle/memory/detail/buddy_allocator.cc b/paddle/memory/detail/buddy_allocator.cc new file mode 100644 index 0000000000..895bf319d7 --- /dev/null +++ b/paddle/memory/detail/buddy_allocator.cc @@ -0,0 +1,35 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include "paddle/memory/detail/system_allocator.h" + +namespace paddle { +namespace memory { +namespace detail { + +BuddyAllocator::BuddyAllocator(size_t pool_size, size_t max_pools, + SystemAllocator* system_allocator) + : pool_size_(pool_size), + max_pools_(max_pools), + system_allocator_(system_allocator) { + PADDLE_ASSERT(pool_size > 0); + PADDLE_ASSERT(max_pools > 0); + PADDLE_ASSERT(system_allocator != nullptr); +} + +} // namespace detail +} // namespace memory +} // namespace paddle diff --git a/paddle/memory/detail/buddy_allocator.h b/paddle/memory/detail/buddy_allocator.h index 35e96fd507..129b137ed7 100644 --- a/paddle/memory/detail/buddy_allocator.h +++ b/paddle/memory/detail/buddy_allocator.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #pragma once @@ -20,34 +20,38 @@ namespace paddle { namespace memory { namespace detail { -template class BuddyAllocator { - public: - // TODO(gangliao): This is a draft, add Buddy Allocator Algorithm soon - BuddyAllocator() {} - ~BuddyAllocator() {} - - public: - void* Alloc(size_t size) { - return Allocator::Alloc(size); - } - void Free(void*) { - // Because all info like size are stored in meta data, - // thus it's duplicate if add the parameter `size` in - // `Free(void*)` interface. - } - size_t Used(); + public: + BuddyAllocator(size_t pool_size, size_t max_pools, + SystemAllocator* system_allocator); + ~BuddyAllocator(); + + void* Alloc(size_t size); + void Free(void*); + size_t Used(); + + private: + struct Block { + size_t size_; + Block* left_; // left buddy + Block* right_; // right buddy + }; + + // Initially, there is only one pool. If a Alloc founds not enough + // memory from that pool, and there has not been max_num_pools_, + // create a new pool by calling system_allocator_.Alloc(pool_size_). + std::vector pools_; + + size_t pool_size_; // the size of each pool; + size_t max_num_pools_; // the size of all pools; - public: - BuddyAllocator(const BuddyAllocator&) = delete; - BuddyAllocator& operator=(const BuddyAllocator&) = delete; + SystemAllocator* system_allocator_; - private: - size_t min_alloc_size_; - size_t max_alloc_size_; + std::mutex mutex_; - private: - std::mutex mutex_; + // Disable copy and assignment. + BuddyAllocator(const BuddyAllocator&) = delete; + BuddyAllocator& operator=(const BuddyAllocator&) = delete; }; BuddyAllocator* GetCPUBuddyAllocator() { @@ -63,16 +67,16 @@ BuddyAllocator* GetCPUBuddyAllocator() { BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) { static BuddyAllocator** as = NULL; if (as == NULL) { - int gpu_num = platform::GetDeviceCount(); + int gpu_num = platform::GetDeviceCount(); as = new BuddyAllocator*[gpu_num]; for (int gpu = 0; gpu < gpu_num; gpu++) { - as[gpu] = new BuddyAllocator(); + as[gpu] = new BuddyAllocator(); } } return as[gpu_id]; } -#endif // PADDLE_ONLY_CPU +#endif // PADDLE_ONLY_CPU } // namespace detail } // namespace memory diff --git a/paddle/memory/detail/system_allocator.cc b/paddle/memory/detail/system_allocator.cc new file mode 100644 index 0000000000..50bec926f8 --- /dev/null +++ b/paddle/memory/detail/system_allocator.cc @@ -0,0 +1,90 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/memory/detail/system_allocator.h" + +#include // for malloc and free +#include // for mlock and munlock + +#include "gflags/gflags.h" +#include "paddle/platform/assert.h" +#include "paddle/platform/cuda.h" + +// If use_pinned_memory is true, CPUAllocator calls mlock, which +// returns pinned and locked memory as staging areas for data exchange +// between host and device. Allocates too much would reduce the amount +// of memory available to the system for paging. So, by default, we +// should set false to use_pinned_memory. +DEFINE_bool(use_pinned_memory, false, + "If set, allocate cpu/gpu pinned memory."); + +namespace paddle { +namespace memory { +namespace detail { + +void* CPUAllocator::Alloc(size_t size) { + // According to http://www.cplusplus.com/reference/cstdlib/malloc/, + // malloc might not return nullptr if size is zero, but the returned + // pointer shall not be dereferenced -- so we make it nullptr. + if (size <= 0) return nullptr; + + void* p = malloc(size); + if (p != nullptr && FLAGS_use_pinned_memory) { + mlock(p, size); + } + return p; +} + +void CPUAllocator::Free(void* p, size_t size) { + if (p != nullptr && FLAGS_use_pinned_memory) { + munlock(p, size); + } + free(p); +} + +#ifndef PADDLE_ONLY_CPU + +void* GPUAllocator::Alloc(size_t size) { + // CUDA documentation doesn't explain if cudaMalloc returns nullptr + // if size is 0. We just make sure it does. + if (size <= 0) { + return nullptr; + } + + void* p = 0; + cudaError_t result = + FLAGS_use_pinned_memory ? cudaMallocHost(&p, size) : cudaMalloc(&p, size); + if (result != cudaSuccess) { + cudaGetLastError(); // clear error if there is any. + } + return result == cudaSuccess ? p : nullptr; +} + +void GPUAllocator::Free(void* p, size_t size) { + // Purposefully allow cudaErrorCudartUnloading, because + // that is returned if you ever call cudaFree after the + // driver has already shutdown. This happens only if the + // process is terminating, in which case we don't care if + // cudaFree succeeds. + cudaError_t err = FLAGS_use_pinned_memory ? cudaFreeHost(p) : cudaFree(p); + if (err != cudaErrorCudartUnloading) { + platform::throw_on_error(err, "cudaFree{Host} failed"); + } +} + +#endif // PADDLE_ONLY_CPU + +} // namespace detail +} // namespace memory +} // namespace paddle diff --git a/paddle/memory/detail/system_allocator_test.cc b/paddle/memory/detail/system_allocator_test.cc index c461d8ac62..9bd5706a4e 100644 --- a/paddle/memory/detail/system_allocator_test.cc +++ b/paddle/memory/detail/system_allocator_test.cc @@ -22,10 +22,10 @@ limitations under the License. */ DECLARE_bool(use_pinned_memory); -void TestAllocator(paddle::memory::detail::SystemAllocator* a, size_t size) { +void TestAllocator(paddle::memory::detail::SystemAllocator& a, size_t size) { bool freed = false; { - void* p = a->Alloc(size); + void* p = a.Alloc(size); if (size > 0) { EXPECT_NE(p, nullptr); } else { @@ -33,9 +33,9 @@ void TestAllocator(paddle::memory::detail::SystemAllocator* a, size_t size) { } int* i = static_cast(p); - std::shared_ptr ptr(i, [&freed, a, size](void* p) { + std::shared_ptr ptr(i, [&](void* p) { freed = true; - a->Free(p, size); + a.Free(p, size); }); } EXPECT_TRUE(freed); @@ -44,28 +44,28 @@ void TestAllocator(paddle::memory::detail::SystemAllocator* a, size_t size) { TEST(CPUAllocator, NoLockMem) { FLAGS_use_pinned_memory = false; paddle::memory::detail::CPUAllocator a; - TestAllocator(&a, 2048); - TestAllocator(&a, 0); + TestAllocator(a, 2048); + TestAllocator(a, 0); } TEST(CPUAllocator, LockMem) { FLAGS_use_pinned_memory = true; paddle::memory::detail::CPUAllocator a; - TestAllocator(&a, 2048); - TestAllocator(&a, 0); + TestAllocator(a, 2048); + TestAllocator(a, 0); } #ifndef PADDLE_ONLY_CPU TEST(GPUAllocator, NoStaging) { FLAGS_use_pinned_memory = false; paddle::memory::detail::GPUAllocator a; - TestAllocator(&a, 2048); - TestAllocator(&a, 0); + TestAllocator(a, 2048); + TestAllocator(a, 0); } TEST(GPUAllocator, Staging) { FLAGS_use_pinned_memory = true; paddle::memory::detail::GPUAllocator a; - TestAllocator(&a, 2048); - TestAllocator(&a, 0); + TestAllocator(a, 2048); + TestAllocator(a, 0); } #endif // PADDLE_ONLY_CPU From 80642bee00c3f723d213b0475749aeee60d89795 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Wed, 28 Jun 2017 08:22:58 +0800 Subject: [PATCH 18/43] fix_xmap and refine flowers dataset --- python/paddle/v2/dataset/__init__.py | 3 +- python/paddle/v2/dataset/flowers.py | 67 ++++++++++--------- .../paddle/v2/dataset/tests/flowers_test.py | 4 +- python/paddle/v2/reader/decorator.py | 47 +++++++------ .../paddle/v2/reader/tests/decorator_test.py | 18 ++--- 5 files changed, 72 insertions(+), 67 deletions(-) diff --git a/python/paddle/v2/dataset/__init__.py b/python/paddle/v2/dataset/__init__.py index 26252d5bbd..2e4beb6882 100644 --- a/python/paddle/v2/dataset/__init__.py +++ b/python/paddle/v2/dataset/__init__.py @@ -25,8 +25,9 @@ import uci_housing import sentiment import wmt14 import mq2007 +import flowers __all__ = [ 'mnist', 'imikolov', 'imdb', 'cifar', 'movielens', 'conll05', 'sentiment' - 'uci_housing', 'wmt14', 'mq2007' + 'uci_housing', 'wmt14', 'mq2007', 'flowers' ] diff --git a/python/paddle/v2/dataset/flowers.py b/python/paddle/v2/dataset/flowers.py index 07c13cf719..a181f3881a 100644 --- a/python/paddle/v2/dataset/flowers.py +++ b/python/paddle/v2/dataset/flowers.py @@ -13,18 +13,18 @@ # limitations under the License. """ This module will download dataset from -http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html +http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html and parse train/test set intopaddle reader creators. -This set contains images of flowers belonging to 102 different categories. +This set contains images of flowers belonging to 102 different categories. The images were acquired by searching the web and taking pictures. There are a minimum of 40 images for each category. The database was used in: Nilsback, M-E. and Zisserman, A. Automated flower classification over a large - number of classes.Proceedings of the Indian Conference on Computer Vision, -Graphics and Image Processing (2008) + number of classes.Proceedings of the Indian Conference on Computer Vision, +Graphics and Image Processing (2008) http://www.robots.ox.ac.uk/~vgg/publications/papers/nilsback08.{pdf,ps.gz}. """ @@ -34,9 +34,9 @@ from common import download import tarfile import scipy.io as scio from paddle.v2.image import * +from paddle.v2.reader import * import os import numpy as np -import paddle.v2 as paddle from multiprocessing import cpu_count __all__ = ['train', 'test', 'valid'] @@ -53,8 +53,8 @@ def default_mapper(sample): map image bytes data to type needed by model input layer ''' img, label = sample - img = paddle.image.load_image_bytes(img) - img = paddle.image.simple_transform(img, 256, 224, True) + img = load_image_bytes(img) + img = simple_transform(img, 256, 224, True) return img.flatten().astype('float32'), label @@ -63,22 +63,23 @@ def reader_creator(data_file, setid_file, dataset_name, mapper=default_mapper, - buffered_size=1024): + buffered_size=1024, + useXmap=True): ''' - 1. read images from tar file and + 1. read images from tar file and merge images into batch files in 102flowers.tgz_batch/ 2. get a reader to read sample from batch file - - :param data_file: downloaded data file + + :param data_file: downloaded data file :type data_file: string - :param label_file: downloaded label file + :param label_file: downloaded label file :type label_file: string :param setid_file: downloaded setid file containing information about how to split dataset :type setid_file: string :param dataset_name: data set name (tstid|trnid|valid) :type dataset_name: string - :param mapper: a function to map image bytes data to type + :param mapper: a function to map image bytes data to type needed by model input layer :type mapper: callable :param buffered_size: the size of buffer used to process images @@ -105,15 +106,17 @@ def reader_creator(data_file, for sample, label in itertools.izip(data, batch['label']): yield sample, int(label) - return paddle.reader.xmap_readers(mapper, reader, - cpu_count(), buffered_size) + if useXmap: + return xmap_readers(mapper, reader, cpu_count(), buffered_size) + else: + return map_readers(mapper, reader) -def train(mapper=default_mapper, buffered_size=1024): +def train(mapper=default_mapper, buffered_size=1024, useXmap=True): ''' - Create flowers training set reader. - It returns a reader, each sample in the reader is - image pixels in [0, 1] and label in [1, 102] + Create flowers training set reader. + It returns a reader, each sample in the reader is + image pixels in [0, 1] and label in [1, 102] translated from original color image by steps: 1. resize to 256*256 2. random crop to 224*224 @@ -128,15 +131,15 @@ def train(mapper=default_mapper, buffered_size=1024): return reader_creator( download(DATA_URL, 'flowers', DATA_MD5), download(LABEL_URL, 'flowers', LABEL_MD5), - download(SETID_URL, 'flowers', SETID_MD5), 'trnid', mapper, - buffered_size) + download(SETID_URL, 'flowers', SETID_MD5), 'tstid', mapper, + buffered_size, useXmap) -def test(mapper=default_mapper, buffered_size=1024): +def test(mapper=default_mapper, buffered_size=1024, useXmap=True): ''' - Create flowers test set reader. - It returns a reader, each sample in the reader is - image pixels in [0, 1] and label in [1, 102] + Create flowers test set reader. + It returns a reader, each sample in the reader is + image pixels in [0, 1] and label in [1, 102] translated from original color image by steps: 1. resize to 256*256 2. random crop to 224*224 @@ -151,15 +154,15 @@ def test(mapper=default_mapper, buffered_size=1024): return reader_creator( download(DATA_URL, 'flowers', DATA_MD5), download(LABEL_URL, 'flowers', LABEL_MD5), - download(SETID_URL, 'flowers', SETID_MD5), 'tstid', mapper, - buffered_size) + download(SETID_URL, 'flowers', SETID_MD5), 'trnid', mapper, + buffered_size, useXmap) -def valid(mapper=default_mapper, buffered_size=1024): +def valid(mapper=default_mapper, buffered_size=1024, useXmap=True): ''' - Create flowers validation set reader. - It returns a reader, each sample in the reader is - image pixels in [0, 1] and label in [1, 102] + Create flowers validation set reader. + It returns a reader, each sample in the reader is + image pixels in [0, 1] and label in [1, 102] translated from original color image by steps: 1. resize to 256*256 2. random crop to 224*224 @@ -175,7 +178,7 @@ def valid(mapper=default_mapper, buffered_size=1024): download(DATA_URL, 'flowers', DATA_MD5), download(LABEL_URL, 'flowers', LABEL_MD5), download(SETID_URL, 'flowers', SETID_MD5), 'valid', mapper, - buffered_size) + buffered_size, useXmap) def fetch(): diff --git a/python/paddle/v2/dataset/tests/flowers_test.py b/python/paddle/v2/dataset/tests/flowers_test.py index cc0626f4fe..a8ae9a07ac 100644 --- a/python/paddle/v2/dataset/tests/flowers_test.py +++ b/python/paddle/v2/dataset/tests/flowers_test.py @@ -31,13 +31,13 @@ class TestFlowers(unittest.TestCase): def test_train(self): instances, max_label_value = self.check_reader( paddle.v2.dataset.flowers.train()) - self.assertEqual(instances, 1020) + self.assertEqual(instances, 6149) self.assertEqual(max_label_value, 102) def test_test(self): instances, max_label_value = self.check_reader( paddle.v2.dataset.flowers.test()) - self.assertEqual(instances, 6149) + self.assertEqual(instances, 1020) self.assertEqual(max_label_value, 102) def test_valid(self): diff --git a/python/paddle/v2/reader/decorator.py b/python/paddle/v2/reader/decorator.py index e432003129..45a4288751 100644 --- a/python/paddle/v2/reader/decorator.py +++ b/python/paddle/v2/reader/decorator.py @@ -166,12 +166,12 @@ def buffered(reader, size): The buffered data reader will read and save data entries into a buffer. Reading from the buffered data reader will proceed as long as the buffer is not empty. - + :param reader: the data reader to read from. :type reader: callable :param size: max buffer size. :type size: int - + :returns: the buffered data reader. """ @@ -238,7 +238,7 @@ def xmap_readers(mapper, reader, process_num, buffer_size, order=False): :type mapper: callable :param reader: the data reader to read from :type reader: callable - :param process_num: process number to handle original sample + :param process_num: process number to handle original sample :type process_num: int :param buffer_size: max buffer size :type buffer_size: int @@ -248,9 +248,6 @@ def xmap_readers(mapper, reader, process_num, buffer_size, order=False): :rtype: callable """ end = XmapEndSignal() - in_queue = Queue(buffer_size) - out_queue = Queue(buffer_size) - out_order = [0] # define a worker to read samples from reader to in_queue def read_worker(reader, in_queue): @@ -266,12 +263,6 @@ def xmap_readers(mapper, reader, process_num, buffer_size, order=False): in_order += 1 in_queue.put(end) - # start a read worker in a thread - target = order_read_worker if order else read_worker - t = Thread(target=target, args=(reader, in_queue)) - t.daemon = True - t.start() - # define a worker to handle samples from in_queue by mapper # and put mapped samples into out_queue def handle_worker(in_queue, out_queue, mapper): @@ -298,19 +289,27 @@ def xmap_readers(mapper, reader, process_num, buffer_size, order=False): in_queue.put(end) out_queue.put(end) - # start several handle_workers - target = order_handle_worker if order else handle_worker - args = (in_queue, out_queue, mapper, out_order) if order else ( - in_queue, out_queue, mapper) - workers = [] - for i in xrange(process_num): - worker = Thread(target=target, args=args) - worker.daemon = True - workers.append(worker) - for w in workers: - w.start() - def xreader(): + in_queue = Queue(buffer_size) + out_queue = Queue(buffer_size) + out_order = [0] + # start a read worker in a thread + target = order_read_worker if order else read_worker + t = Thread(target=target, args=(reader, in_queue)) + t.daemon = True + t.start() + # start several handle_workers + target = order_handle_worker if order else handle_worker + args = (in_queue, out_queue, mapper, out_order) if order else ( + in_queue, out_queue, mapper) + workers = [] + for i in xrange(process_num): + worker = Thread(target=target, args=args) + worker.daemon = True + workers.append(worker) + for w in workers: + w.start() + sample = out_queue.get() while not isinstance(sample, XmapEndSignal): yield sample diff --git a/python/paddle/v2/reader/tests/decorator_test.py b/python/paddle/v2/reader/tests/decorator_test.py index bb3c5d220b..5a92951b10 100644 --- a/python/paddle/v2/reader/tests/decorator_test.py +++ b/python/paddle/v2/reader/tests/decorator_test.py @@ -132,15 +132,17 @@ class TestXmap(unittest.TestCase): for order in orders: for tNum in thread_nums: for size in buffered_size: - result = [] - for i in paddle.v2.reader.xmap_readers(mapper, + reader = paddle.v2.reader.xmap_readers(mapper, reader_creator_10(0), - tNum, size, order)(): - result.append(i) - if not order: - result.sort() - for idx, e in enumerate(result): - self.assertEqual(e, mapper(idx)) + tNum, size, order) + for n in xrange(3): + result = [] + for i in reader(): + result.append(i) + if not order: + result.sort() + for idx, e in enumerate(result): + self.assertEqual(e, mapper(idx)) if __name__ == '__main__': From 4cc9680cc60296f6071fa34893fda4f3d6806b97 Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Wed, 28 Jun 2017 01:16:28 +0000 Subject: [PATCH 19/43] Make pserver able to get server index without etcd (decouple pserver with etcd) The pserver need to have server index for saving model on the distributed file system. The server index comes from etcd if etcd is used, or user can manually specify them. So we need pserver.NewService() to take index as an argument. Since index could come from etcd, it would be strange if pserver takes an index as argument, at the same time get the index from etcd. so we will need to decouple pserver with etcd. --- go/cmd/pserver/pserver.go | 8 +- go/master/etcd_client.go | 4 +- go/pserver/client_test.go | 3 +- go/pserver/etcd_client.go | 181 +++++++++++++++++++++++++++++++++++++ go/pserver/service.go | 156 ++------------------------------ go/pserver/service_test.go | 8 +- 6 files changed, 201 insertions(+), 159 deletions(-) create mode 100644 go/pserver/etcd_client.go diff --git a/go/cmd/pserver/pserver.go b/go/cmd/pserver/pserver.go index 6c85b1804b..8a42d4f8af 100644 --- a/go/cmd/pserver/pserver.go +++ b/go/cmd/pserver/pserver.go @@ -30,7 +30,13 @@ func main() { log.SetLevel(level) timeout := time.Second * time.Duration((*etcdTimeout)) - s, err := pserver.NewService(*etcdEndpoint, *numPservers, timeout) + e := pserver.NewEtcdClient(*etcdEndpoint, *numPservers, timeout) + idx, err := e.Register() + if err != nil { + panic(err) + } + + s, err := pserver.NewService(idx) if err != nil { panic(err) } diff --git a/go/master/etcd_client.go b/go/master/etcd_client.go index b7293a7598..f7b4638577 100644 --- a/go/master/etcd_client.go +++ b/go/master/etcd_client.go @@ -18,8 +18,8 @@ const ( DefaultAddrPath = "/master/addr" ) -// EtcdClient is the etcd client that master uses for fault tolerance -// and service registry. +// EtcdClient is the etcd client that the master uses for fault +// tolerance and service registry. type EtcdClient struct { lockPath string statePath string diff --git a/go/pserver/client_test.go b/go/pserver/client_test.go index 4a62ae88a4..5bd16118a7 100644 --- a/go/pserver/client_test.go +++ b/go/pserver/client_test.go @@ -7,7 +7,6 @@ import ( "strconv" "strings" "testing" - "time" "github.com/PaddlePaddle/Paddle/go/pserver" ) @@ -31,7 +30,7 @@ func init() { port[i] = p go func(l net.Listener) { - s, err := pserver.NewService("", 1, time.Second*5) + s, err := pserver.NewService(0) if err != nil { panic(err) } diff --git a/go/pserver/etcd_client.go b/go/pserver/etcd_client.go new file mode 100644 index 0000000000..4d88243edd --- /dev/null +++ b/go/pserver/etcd_client.go @@ -0,0 +1,181 @@ +package pserver + +import ( + "context" + "errors" + "strconv" + "strings" + "time" + + "github.com/PaddlePaddle/Paddle/go/utils/networkhelper" + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/clientv3/concurrency" + log "github.com/sirupsen/logrus" +) + +// EtcdClient is the etcd client that the pserver uses for fault +// tolerance, service registry and coordination. +type EtcdClient struct { + numPservers int + etcdEndpoints string + etcdClient *clientv3.Client + // etcdTimeout is also used as retry intervals. + etcdTimeout time.Duration + // FIXME: ensure GetExternalIP gets the correct ip for trainers to connect. + externalIP string + // desired number of pservers in the job. + // assume desired will not change during one training job. + desired int +} + +// NewEtcdClient creates an EtcdClient +func NewEtcdClient(endpoints string, numPservers int, timeout time.Duration) *EtcdClient { + return &EtcdClient{ + etcdTimeout: timeout, + numPservers: numPservers, + etcdEndpoints: endpoints, + } +} + +// Register registers the pserver on etcd +// +// Register returns the index of the current pserver. +func (e *EtcdClient) Register() (int, error) { + + var err error + e.externalIP, err = networkhelper.GetExternalIP() + if err != nil { + return 0, err + } + + // initialize connection to etcd. + ep := strings.Split(e.etcdEndpoints, ",") + for { + cli, err := clientv3.New(clientv3.Config{ + Endpoints: ep, + DialTimeout: e.etcdTimeout, + }) + if err != nil { + log.Errorf("connect to etcd error: %v", err) + time.Sleep(e.etcdTimeout) + continue + } + e.etcdClient = cli + log.Debugf("inited client to %s", e.etcdEndpoints) + break + } + // init /ps_desired using transaction, for multiple pservers may want to write + // it at the same time. + for { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + _, err := e.initDesiredPsercers(ctx, e.numPservers) + cancel() + if err != nil { + log.Warn(err) + time.Sleep(e.etcdTimeout) + continue + } + break + } + // TODO: when implementing extending or reducing pservers, /ps_desired is + // changed, then we need to watch /ps_desired node for events. For now, just + // write once when init and read from it. + // wait and set s.desired init value + for { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + resp, err := e.etcdClient.Get(ctx, PsDesired) + cancel() + if err != nil { + log.Errorf("getting %s error: %v", PsDesired, err) + time.Sleep(e.etcdTimeout) + continue + } + if len(resp.Kvs) != 0 { + e.desired, err = strconv.Atoi(string(resp.Kvs[0].Value)) + if err != nil { + log.Errorf("value of %s invalid %v\n", PsDesired, err) + time.Sleep(e.etcdTimeout) + // NOTE: wait util ps_desired value change + continue + } + break + } + } + + var pserverIdx int + // try register pserver node on etcd + for { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + var err error + pserverIdx, err = e.registerPserverEtcd(ctx) + cancel() + if err != nil { + log.Warn(err) + time.Sleep(e.etcdTimeout) + continue + } + break + } + + return pserverIdx, nil +} + +func (e *EtcdClient) initDesiredPsercers(ctx context.Context, numPservers int) (*clientv3.TxnResponse, error) { + return concurrency.NewSTM(e.etcdClient, func(c concurrency.STM) error { + dsStr := c.Get(PsDesired) + if dsStr == "" { + c.Put(PsDesired, strconv.Itoa(numPservers)) + } + return nil + }, concurrency.WithAbortContext(ctx), concurrency.WithIsolation(concurrency.RepeatableReads)) +} + +// registerPserverEtcd registers pserver node on etcd using transaction. +func (e *EtcdClient) registerPserverEtcd(ctx context.Context) (int, error) { + var idx int + _, err := concurrency.NewSTM(e.etcdClient, func(c concurrency.STM) error { + registered := false + for i := 0; i < e.desired; i++ { + psKey := "/ps/" + strconv.Itoa(i) + log.Debugf("checking %s", psKey) + ps := c.Get(psKey) + log.Debugf("got value (%s) for key: %s", ps, psKey) + + if ps == "" { + resp, err := e.etcdClient.Grant(context.TODO(), 5) + if err != nil { + log.Fatal(err) + } + // find the first id and write info + c.Put(psKey, e.externalIP, clientv3.WithLease(resp.ID)) + log.Debugf("set pserver node %s with value %s", psKey, e.externalIP) + ch, kaerr := e.etcdClient.KeepAlive(context.TODO(), resp.ID) + if kaerr != nil { + log.Errorf("keepalive etcd node error: %v", kaerr) + return kaerr + } + + // Eat the keep alive message so etcd + // will not expire the lease. + go func(ch <-chan *clientv3.LeaseKeepAliveResponse) { + ka := <-ch + log.Debugf("keepalive: %d\n", ka.TTL) + }(ch) + log.Debug("register finished") + idx = i + registered = true + break + } + } + if registered == true { + return nil + } + return errors.New("not registerd, may due to already have enough pservers") + }, concurrency.WithAbortContext(ctx), concurrency.WithIsolation(concurrency.RepeatableReads)) + + if err != nil { + return 0, err + } + + return idx, nil +} diff --git a/go/pserver/service.go b/go/pserver/service.go index f966595fdc..f386ebea1e 100644 --- a/go/pserver/service.go +++ b/go/pserver/service.go @@ -1,18 +1,9 @@ package pserver import ( - "context" "errors" "fmt" - "strconv" - "strings" "sync" - "time" - - "github.com/PaddlePaddle/Paddle/go/utils/networkhelper" - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/clientv3/concurrency" - log "github.com/sirupsen/logrus" ) // ElementType is the type of elements of a Parameter. @@ -55,160 +46,25 @@ type Gradient Parameter // Service is the RPC service for pserver. type Service struct { initialized chan struct{} + idx int mu sync.Mutex opt *optimizer paramMap map[string]Parameter - - etcdEndpoints string - etcdClient *clientv3.Client - // etcdTimeout is also used as retry intervals. - etcdTimeout time.Duration - // desired number of pservers in the job. - // assume desired will not change during one training job. - desired int - // FIXME: ensure GetExternalIP gets the correct ip for trainers to connect. - externalIP string } // NewService creates a new service, will bypass etcd registration if no // endpoints specified. -func NewService(endpoints string, numPservers int, timeout time.Duration) (*Service, error) { - s := &Service{opt: newOptimizer(sgd, 0.005)} +func NewService(idx int) (*Service, error) { + s := &Service{ + idx: idx, + opt: newOptimizer(sgd, 0.005), + } s.paramMap = make(map[string]Parameter) s.initialized = make(chan struct{}) - s.etcdEndpoints = endpoints - s.etcdTimeout = timeout - - var err error - s.externalIP, err = networkhelper.GetExternalIP() - if err != nil { - return nil, err - } - - if endpoints != "" { - // initialize connection to etcd, try - ep := strings.Split(s.etcdEndpoints, ",") - for { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: ep, - DialTimeout: s.etcdTimeout, - }) - if err != nil { - log.Errorf("connect to etcd error: %v", err) - time.Sleep(s.etcdTimeout) - continue - } - s.etcdClient = cli - log.Debugf("inited client to %s", s.etcdEndpoints) - break - } - // init /ps_desired using transaction, for multiple pservers may want to write - // it at the same time. - for { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - _, err := s.initDesiredPsercers(ctx, numPservers) - cancel() - if err != nil { - log.Warn(err) - time.Sleep(s.etcdTimeout) - continue - } - break - } - // TODO: when implementing extending or reducing pservers, /ps_desired is - // changed, then we need to watch /ps_desired node for events. For now, just - // write once when init and read from it. - // wait and set s.desired init value - for { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - resp, err := s.etcdClient.Get(ctx, PsDesired) - cancel() - if err != nil { - log.Errorf("getting %s error: %v", PsDesired, err) - time.Sleep(s.etcdTimeout) - continue - } - if len(resp.Kvs) != 0 { - s.desired, err = strconv.Atoi(string(resp.Kvs[0].Value)) - if err != nil { - log.Errorf("value of %s invalid %v\n", PsDesired, err) - time.Sleep(s.etcdTimeout) - // NOTE: wait util ps_desired value change - continue - } - break - } - } - // try register pserver node on etcd - for { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - _, err := s.registerPserverEtcd(ctx) - cancel() - if err != nil { - log.Warn(err) - time.Sleep(s.etcdTimeout) - continue - } - break - } - } // if endpoints != "" - // Bypass etcd registration if no endpoints specified return s, nil } -func (s *Service) initDesiredPsercers(ctx context.Context, numPservers int) (*clientv3.TxnResponse, error) { - return concurrency.NewSTM(s.etcdClient, func(c concurrency.STM) error { - dsStr := c.Get(PsDesired) - if dsStr == "" { - c.Put(PsDesired, strconv.Itoa(numPservers)) - } - return nil - }, concurrency.WithAbortContext(ctx), concurrency.WithIsolation(concurrency.RepeatableReads)) -} - -// registerPserverEtcd registers pserver node on etcd using transaction. -func (s *Service) registerPserverEtcd(ctx context.Context) (*clientv3.TxnResponse, error) { - return concurrency.NewSTM(s.etcdClient, func(c concurrency.STM) error { - registered := false - for i := 0; i < s.desired; i++ { - psKey := "/ps/" + strconv.Itoa(i) - log.Debugf("checking %s", psKey) - ps := c.Get(psKey) - log.Debugf("got value (%s) for key: %s", ps, psKey) - - if ps == "" { - resp, err := s.etcdClient.Grant(context.TODO(), 5) - if err != nil { - log.Fatal(err) - } - // find the first id and write info - c.Put(psKey, s.externalIP, clientv3.WithLease(resp.ID)) - log.Debugf("set pserver node %s with value %s", psKey, s.externalIP) - ch, kaerr := s.etcdClient.KeepAlive(context.TODO(), resp.ID) - if kaerr != nil { - log.Errorf("keepalive etcd node error: %v", kaerr) - return kaerr - } - - // Eat the keep alive message so etcd - // will not expire the lease. - go func(ch <-chan *clientv3.LeaseKeepAliveResponse) { - ka := <-ch - log.Debugf("keepalive: %d\n", ka.TTL) - }(ch) - log.Debug("register finished") - registered = true - break - } - } - if registered == true { - return nil - } - return errors.New("not registerd, may due to already have enough pservers") - }, concurrency.WithAbortContext(ctx), concurrency.WithIsolation(concurrency.RepeatableReads)) -} - // InitParam initializes a parameter. func (s *Service) InitParam(paramWithConfigs ParameterWithConfig, dummy *int) error { select { diff --git a/go/pserver/service_test.go b/go/pserver/service_test.go index 1d84f15d78..d9d887cffd 100644 --- a/go/pserver/service_test.go +++ b/go/pserver/service_test.go @@ -10,7 +10,7 @@ import ( ) func TestFull(t *testing.T) { - s, err := pserver.NewService("", 1, time.Second*5) + s, err := pserver.NewService(0) if err != nil { t.Error(err) } @@ -75,7 +75,7 @@ func TestFull(t *testing.T) { } func TestMultipleInit(t *testing.T) { - s, err := pserver.NewService("", 1, time.Second*5) + s, err := pserver.NewService(0) if err != nil { t.Error(err) } @@ -91,7 +91,7 @@ func TestMultipleInit(t *testing.T) { } func TestUninitialized(t *testing.T) { - s, err := pserver.NewService("", 1, time.Second*5) + s, err := pserver.NewService(0) err = s.SendGrad(pserver.Gradient{}, nil) if err.Error() != pserver.Uninitialized { t.FailNow() @@ -99,7 +99,7 @@ func TestUninitialized(t *testing.T) { } func TestBlockUntilInitialized(t *testing.T) { - s, err := pserver.NewService("", 1, time.Second*5) + s, err := pserver.NewService(0) if err != nil { t.Error(err) } From 98bb8ee295c6bbbb573f0ea33df1f7175a8b6b41 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Wed, 28 Jun 2017 10:01:23 +0800 Subject: [PATCH 20/43] fix dim problem --- paddle/py_paddle/dataprovider_converter.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/paddle/py_paddle/dataprovider_converter.py b/paddle/py_paddle/dataprovider_converter.py index edc2e02923..f8242d9f7b 100644 --- a/paddle/py_paddle/dataprovider_converter.py +++ b/paddle/py_paddle/dataprovider_converter.py @@ -109,7 +109,10 @@ class DenseScanner(IScanner): if len(self.__shape__) > 3: raise ValueError( "The dimension of input cannot be greater than 3.") - self.__dim__ = reduce(lambda x, y: x * y, self.__shape__) + if len(self.__shape__) == 0: + self.__dim__ = 1 + else: + self.__dim__ = reduce(lambda x, y: x * y, self.__shape__) if len(self.__shape__) == 1 and self.__dim__ != self.input_type.dim: raise ValueError( "The data size must be equal to it in data layer.") From 8b69c1348c17cf7aca83aacc2c63ef9eaad97467 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Wed, 28 Jun 2017 10:34:22 +0800 Subject: [PATCH 21/43] check shape of vector input, should not be a scalar --- paddle/py_paddle/dataprovider_converter.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/py_paddle/dataprovider_converter.py b/paddle/py_paddle/dataprovider_converter.py index f8242d9f7b..7df4a6967f 100644 --- a/paddle/py_paddle/dataprovider_converter.py +++ b/paddle/py_paddle/dataprovider_converter.py @@ -110,9 +110,9 @@ class DenseScanner(IScanner): raise ValueError( "The dimension of input cannot be greater than 3.") if len(self.__shape__) == 0: - self.__dim__ = 1 - else: - self.__dim__ = reduce(lambda x, y: x * y, self.__shape__) + raise ValueError( + "The input should be a vector, please check your input data.") + self.__dim__ = reduce(lambda x, y: x * y, self.__shape__) if len(self.__shape__) == 1 and self.__dim__ != self.input_type.dim: raise ValueError( "The data size must be equal to it in data layer.") From 3e9aa7fd8bfac7434057afcdd6ae62ea7a92bff1 Mon Sep 17 00:00:00 2001 From: liaogang Date: Wed, 28 Jun 2017 10:42:54 +0800 Subject: [PATCH 22/43] FIX: Pass CI --- cmake/generic.cmake | 4 ---- paddle/memory/CMakeLists.txt | 6 ------ paddle/memory/detail/buddy_allocator.cc | 2 +- paddle/memory/detail/buddy_allocator.h | 3 +++ 4 files changed, 4 insertions(+), 11 deletions(-) diff --git a/cmake/generic.cmake b/cmake/generic.cmake index 840155750e..69e8164a00 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -78,10 +78,6 @@ # # cc_test(example_test SRCS example_test.cc DEPS example glog gflags) -if(WITH_GPU) - add_definitions(-DPADDLE_WITH_GPU) -endif() - if(NOT APPLE) find_package(Threads REQUIRED) link_libraries(${CMAKE_THREAD_LIBS_INIT}) diff --git a/paddle/memory/CMakeLists.txt b/paddle/memory/CMakeLists.txt index e74ce75c93..3943c3cfad 100644 --- a/paddle/memory/CMakeLists.txt +++ b/paddle/memory/CMakeLists.txt @@ -1,7 +1 @@ add_subdirectory(detail) - -if(${WITH_GPU}) - nv_library(memory SRCS memory.cc) -else(${WITH_GPU}) - cc_library(memory SRCS memory.cc) -endif(${WITH_GPU}) diff --git a/paddle/memory/detail/buddy_allocator.cc b/paddle/memory/detail/buddy_allocator.cc index 895bf319d7..ebe680f5ee 100644 --- a/paddle/memory/detail/buddy_allocator.cc +++ b/paddle/memory/detail/buddy_allocator.cc @@ -14,7 +14,7 @@ #pragma once -#include "paddle/memory/detail/system_allocator.h" +#include "paddle/memory/detail/buddy_allocator.h" namespace paddle { namespace memory { diff --git a/paddle/memory/detail/buddy_allocator.h b/paddle/memory/detail/buddy_allocator.h index 129b137ed7..702c7d28ee 100644 --- a/paddle/memory/detail/buddy_allocator.h +++ b/paddle/memory/detail/buddy_allocator.h @@ -16,6 +16,9 @@ #include "paddle/memory/detail/system_allocator.h" +#include +#include + namespace paddle { namespace memory { namespace detail { From 9490d243dd0255021b288f9c2e43c57b30264b9b Mon Sep 17 00:00:00 2001 From: liaogang Date: Wed, 28 Jun 2017 10:46:14 +0800 Subject: [PATCH 23/43] ENH: clang-format --- paddle/memory/detail/buddy_allocator.h | 2 +- paddle/platform/cuda.h | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/paddle/memory/detail/buddy_allocator.h b/paddle/memory/detail/buddy_allocator.h index 702c7d28ee..82e6aaedc7 100644 --- a/paddle/memory/detail/buddy_allocator.h +++ b/paddle/memory/detail/buddy_allocator.h @@ -16,8 +16,8 @@ #include "paddle/memory/detail/system_allocator.h" -#include #include +#include namespace paddle { namespace memory { diff --git a/paddle/platform/cuda.h b/paddle/platform/cuda.h index 864a5d3340..8fe891f9ce 100644 --- a/paddle/platform/cuda.h +++ b/paddle/platform/cuda.h @@ -29,13 +29,12 @@ inline void throw_on_error(cudaError_t e, const char* message) { } int GetDeviceCount(void) { - int count; - throw_on_error(cudaGetDeviceCount(&count), - "cudaGetDeviceCount failed"); - return count; + int count; + throw_on_error(cudaGetDeviceCount(&count), "cudaGetDeviceCount failed"); + return count; } } // namespace platform } // namespace paddle -#endif // PADDLE_ONLY_CPU +#endif // PADDLE_ONLY_CPU From a402cf908160c5fb8a4f0d1aa9efdca7109c0375 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Wed, 28 Jun 2017 10:46:23 +0800 Subject: [PATCH 24/43] correct the demo code for dense_vector label input --- doc/getstarted/concepts/use_concepts_cn.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/getstarted/concepts/use_concepts_cn.rst b/doc/getstarted/concepts/use_concepts_cn.rst index e63ca11102..f15b11bd78 100644 --- a/doc/getstarted/concepts/use_concepts_cn.rst +++ b/doc/getstarted/concepts/use_concepts_cn.rst @@ -111,7 +111,7 @@ PaddlePaddle支持不同类型的输入数据,主要包括四种类型,和 # define training dataset reader def train_reader(): train_x = np.array([[1, 1], [1, 2], [3, 4], [5, 2]]) - train_y = np.array([-2, -3, -7, -7]) + train_y = np.array([[-2], [-3], [-7], [-7]]) def reader(): for i in xrange(train_y.shape[0]): yield train_x[i], train_y[i] From e93c3e4070c37dd6bdf31ec4d3fa4033f3208e2e Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Wed, 28 Jun 2017 11:07:52 +0800 Subject: [PATCH 25/43] fix format --- paddle/py_paddle/dataprovider_converter.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/paddle/py_paddle/dataprovider_converter.py b/paddle/py_paddle/dataprovider_converter.py index 7df4a6967f..218cb5ec56 100644 --- a/paddle/py_paddle/dataprovider_converter.py +++ b/paddle/py_paddle/dataprovider_converter.py @@ -111,7 +111,8 @@ class DenseScanner(IScanner): "The dimension of input cannot be greater than 3.") if len(self.__shape__) == 0: raise ValueError( - "The input should be a vector, please check your input data.") + "The input should be a vector, please check your input data." + ) self.__dim__ = reduce(lambda x, y: x * y, self.__shape__) if len(self.__shape__) == 1 and self.__dim__ != self.input_type.dim: raise ValueError( From 3a119efedad1a15f587c9415c70f661853a8d579 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 28 Jun 2017 16:18:22 +0800 Subject: [PATCH 26/43] Adding Enforce to platform Basically from caffe2::logging.h, but only expose `PADDLE_ENFORCE` interface. --- paddle/platform/CMakeLists.txt | 1 + paddle/platform/enforce.h | 116 ++++++++++++++++++++++++++++++++ paddle/platform/enforce_test.cc | 25 +++++++ 3 files changed, 142 insertions(+) create mode 100644 paddle/platform/enforce.h create mode 100644 paddle/platform/enforce_test.cc diff --git a/paddle/platform/CMakeLists.txt b/paddle/platform/CMakeLists.txt index 7abe2ab89e..8435410564 100644 --- a/paddle/platform/CMakeLists.txt +++ b/paddle/platform/CMakeLists.txt @@ -3,3 +3,4 @@ nv_test(cuda_test SRCS cuda_test.cu) cc_library(place SRCS place.cc) cc_test(place_test SRCS place_test.cc DEPS place glog gflags) cc_test(must_check_test SRCS must_check_test.cc) +cc_test(enforce_test SRCS enforce_test.cc) diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h new file mode 100644 index 0000000000..e501e80c55 --- /dev/null +++ b/paddle/platform/enforce.h @@ -0,0 +1,116 @@ +/* + Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#pragma once +#include +#include + +namespace paddle { +namespace platform { + +/** + * @brief Enforce exception. Inherits std::exception + * + * All enforce condition not met, will throw an EnforceNotMet exception. + */ +class EnforceNotMet : public std::exception { + public: + EnforceNotMet(const std::string& msg, const char* file, int fileline) + : file_(file), fileline_(fileline) { + std::ostringstream sout; + sout << msg << " at [" << file_ << ":" << fileline_ << "];"; + all_msg_ = sout.str(); + } + + const char* what() const noexcept override { return all_msg_.c_str(); } + + private: + std::string all_msg_; + const char* file_; + int fileline_; +}; + +namespace details { + +inline void MakeStringInternal(std::ostringstream& stream) {} + +template +inline void MakeStringInternal(std::ostringstream& stream, T v) { + stream << v; +} + +template +inline void MakeStringInternal(std::ostringstream& stream, T v, ARGS... args) { + MakeStringInternal(stream, v); + MakeStringInternal(stream, args...); +}; + +/** + * @brief Make string will concat all args into a string. + */ +template +inline std::string MakeString(ARGS... args) { + std::ostringstream sout; + details::MakeStringInternal(sout, args...); + return sout.str(); +} + +/** + * @brief special handle string + */ +template <> +inline std::string MakeString(std::string str) { + return str; +} + +/** + * @brief special handle const char* + */ +template <> +inline std::string MakeString(const char* str) { + return std::string(str); +} +} // namespace details + +// From https://stackoverflow.com/questions/30130930/ +// __buildin_expect is in C++ 11 standard. Since the condition which enforced +// should be true in most situation, it will make the compiler generate faster +// code by adding `UNLIKELY` macro. +#define UNLIKELY(condition) __builtin_expect(static_cast(condition), 0) + +/** + * @brief Throw a EnforceNotMet exception, automatically filled __FILE__ & + * __LINE__ + * + * This macro take __VA_ARGS__, user can pass any type if that type can + * serialize to std::ostream + */ +#define PADDLE_THROW(...) \ + do { \ + throw ::paddle::platform::EnforceNotMet( \ + ::paddle::platform::details::MakeString(__VA_ARGS__), __FILE__, \ + __LINE__); \ + } while (0) + +/** + * @brief Enforce a condition, otherwise throw an EnforceNotMet + */ +#define PADDLE_ENFORCE(condition, ...) \ + do { \ + if (UNLIKELY(!(condition))) { \ + PADDLE_THROW(__VA_ARGS__); \ + } \ + } while (0) + +} // namespace platform +} // namespace paddle diff --git a/paddle/platform/enforce_test.cc b/paddle/platform/enforce_test.cc new file mode 100644 index 0000000000..d3e945e972 --- /dev/null +++ b/paddle/platform/enforce_test.cc @@ -0,0 +1,25 @@ +#include +#include + +TEST(ENFORCE, OK) { + PADDLE_ENFORCE(true, "Enforce is ok", 123, "now", 0.345); + size_t val = 1; + const size_t limit = 10; + PADDLE_ENFORCE(val < limit, "Enforce is OK too"); +} + +TEST(ENFORCE, FAILED) { + bool in_catch = false; + try { + PADDLE_ENFORCE(false, "Enforce is not ok ", 123, " at all"); + } catch (paddle::platform::EnforceNotMet err) { + in_catch = true; + std::string msg = "Enforce is not ok 123 at all"; + const char* what = err.what(); + for (size_t i = 0; i < msg.length(); ++i) { + ASSERT_EQ(what[i], msg[i]); + } + } + + ASSERT_TRUE(in_catch); +} \ No newline at end of file From d2581f34e8179bdd7e0b9ce8a9d3e847758ff52d Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 28 Jun 2017 17:48:20 +0800 Subject: [PATCH 27/43] change copy right format --- paddle/platform/enforce.h | 22 ++++++++++------------ paddle/platform/enforce_test.cc | 11 +++++++++++ 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h index e501e80c55..fbd3405a24 100644 --- a/paddle/platform/enforce.h +++ b/paddle/platform/enforce.h @@ -1,15 +1,13 @@ -/* - Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include diff --git a/paddle/platform/enforce_test.cc b/paddle/platform/enforce_test.cc index d3e945e972..23b32444ad 100644 --- a/paddle/platform/enforce_test.cc +++ b/paddle/platform/enforce_test.cc @@ -1,3 +1,14 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + #include #include From fc5972ba2c0c2565d4255fda19f1b68f02c18e62 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Wed, 28 Jun 2017 19:54:25 +0800 Subject: [PATCH 28/43] fix requirement config for flowers dataset --- python/setup.py.in | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/setup.py.in b/python/setup.py.in index 86fc0fc5c0..aa6771709c 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -15,7 +15,8 @@ setup_requires=["requests", "protobuf==3.1", "recordio", "matplotlib", - "rarfile"] + "rarfile", + "scipy>=0.19.0"] if '${CMAKE_SYSTEM_PROCESSOR}' not in ['arm', 'armv7-a', 'aarch64']: setup_requires+=["opencv-python"] From b93e863a1c5f31e9404dee8a2a6684119b876a2a Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Wed, 28 Jun 2017 20:02:52 +0800 Subject: [PATCH 29/43] Fix bug in MultiGradientMachine. --- paddle/gserver/gradientmachines/MultiGradientMachine.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/gserver/gradientmachines/MultiGradientMachine.cpp b/paddle/gserver/gradientmachines/MultiGradientMachine.cpp index 8ef5e9d0c1..018da6c76d 100644 --- a/paddle/gserver/gradientmachines/MultiGradientMachine.cpp +++ b/paddle/gserver/gradientmachines/MultiGradientMachine.cpp @@ -601,7 +601,7 @@ void TrainerThread::backward() { void TrainerThread::backwardCallback(Parameter* para) { // CPU parameters are merged in the end - if (!para->useGpu()) return; + if (!para->useGpu() || para->isStatic()) return; int paramId = para->getID(); if (multiMachine_->getNumThreads() == 1) { From 9ad846ecee27ff1860debc4658090f1cfa75140f Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 28 Jun 2017 21:20:33 +0800 Subject: [PATCH 30/43] Remove must_check in paddle::platform --- paddle/platform/CMakeLists.txt | 1 - paddle/platform/must_check.h | 26 -------------------------- paddle/platform/must_check_test.cc | 10 ---------- paddle/utils/Error.h | 16 +++++++++++++++- 4 files changed, 15 insertions(+), 38 deletions(-) delete mode 100644 paddle/platform/must_check.h delete mode 100644 paddle/platform/must_check_test.cc diff --git a/paddle/platform/CMakeLists.txt b/paddle/platform/CMakeLists.txt index 7abe2ab89e..c7d7b14518 100644 --- a/paddle/platform/CMakeLists.txt +++ b/paddle/platform/CMakeLists.txt @@ -2,4 +2,3 @@ nv_test(cuda_test SRCS cuda_test.cu) cc_library(place SRCS place.cc) cc_test(place_test SRCS place_test.cc DEPS place glog gflags) -cc_test(must_check_test SRCS must_check_test.cc) diff --git a/paddle/platform/must_check.h b/paddle/platform/must_check.h deleted file mode 100644 index 4fcc62afc0..0000000000 --- a/paddle/platform/must_check.h +++ /dev/null @@ -1,26 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once -/** - * __must_check macro. It make the function's return value must be used, - * otherwise it will raise a compile warning. And also Paddle treat all compile - * warnings as errors. - */ -#ifdef __GNUC__ -#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) >= 30400 -#define __must_check __attribute__((warn_unused_result)) -#else -#define __must_check -#endif -#else -#define __must_check -#endif diff --git a/paddle/platform/must_check_test.cc b/paddle/platform/must_check_test.cc deleted file mode 100644 index 6ee3ea49ac..0000000000 --- a/paddle/platform/must_check_test.cc +++ /dev/null @@ -1,10 +0,0 @@ -#include -#include - -int __must_check SomeFunctionMustCheck() { return 0; } - -TEST(MustCheck, all) { - // This line should not be compiled, because the - // return value of SomeFunctionMustCheck marked as __must_check - // SomeFunctionMustCheck(); -} \ No newline at end of file diff --git a/paddle/utils/Error.h b/paddle/utils/Error.h index f3d535c69c..27ddaab3f0 100644 --- a/paddle/utils/Error.h +++ b/paddle/utils/Error.h @@ -19,7 +19,21 @@ limitations under the License. */ #include #include #include -#include "paddle/platform/must_check.h" + +/** + * __must_check macro. It make the function's return value must be used, + * otherwise it will raise a compile warning. And also Paddle treat all compile + * warnings as errors. + */ +#ifdef __GNUC__ +#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) >= 30400 +#define __must_check __attribute__((warn_unused_result)) +#else +#define __must_check +#endif +#else +#define __must_check +#endif namespace paddle { From 44e39246639fe5b3ba1dbf5158531f7eb4fc6175 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Wed, 28 Jun 2017 23:04:35 +0800 Subject: [PATCH 31/43] "fix client send empty gradients bug" --- go/pserver/client.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/go/pserver/client.go b/go/pserver/client.go index dda9159772..a8d2d710d8 100644 --- a/go/pserver/client.go +++ b/go/pserver/client.go @@ -123,6 +123,10 @@ func (c *Client) FinishInitParams() error { // SendGrads sends gradients to parameter servers for updating // parameters. func (c *Client) SendGrads(grads []Gradient) error { + if len(grads) == 0 { + log.Info("Send Empty Gradient") + return nil + } errCh := make(chan error, len(grads)) for _, g := range grads { go func(g Gradient) { From 01f44bff669442ffdb67a5baac14aa693cba08c6 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Wed, 28 Jun 2017 23:12:19 +0800 Subject: [PATCH 32/43] rename args and add comments 1. rename 'useXmap' to 'use_xmap' 2. add comments about exchanging train data and test data --- python/paddle/v2/dataset/flowers.py | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/python/paddle/v2/dataset/flowers.py b/python/paddle/v2/dataset/flowers.py index a181f3881a..158cfe158c 100644 --- a/python/paddle/v2/dataset/flowers.py +++ b/python/paddle/v2/dataset/flowers.py @@ -46,6 +46,12 @@ SETID_URL = 'http://www.robots.ox.ac.uk/~vgg/data/flowers/102/setid.mat' DATA_MD5 = '52808999861908f626f3c1f4e79d11fa' LABEL_MD5 = 'e0620be6f572b9609742df49c70aed4d' SETID_MD5 = 'a5357ecc9cb78c4bef273ce3793fc85c' +# In official 'readme', tstid is the flag of test data +# and trnid is the flag of train data. But test data is more than train data. +# So we exchange the train data and test data. +TRAIN_FLAG = 'tstid' +TEST_FLAG = 'trnid' +VALID_FLAG = 'valid' def default_mapper(sample): @@ -64,7 +70,7 @@ def reader_creator(data_file, dataset_name, mapper=default_mapper, buffered_size=1024, - useXmap=True): + use_xmap=True): ''' 1. read images from tar file and merge images into batch files in 102flowers.tgz_batch/ @@ -106,13 +112,13 @@ def reader_creator(data_file, for sample, label in itertools.izip(data, batch['label']): yield sample, int(label) - if useXmap: + if use_xmap: return xmap_readers(mapper, reader, cpu_count(), buffered_size) else: return map_readers(mapper, reader) -def train(mapper=default_mapper, buffered_size=1024, useXmap=True): +def train(mapper=default_mapper, buffered_size=1024, use_xmap=True): ''' Create flowers training set reader. It returns a reader, each sample in the reader is @@ -131,11 +137,11 @@ def train(mapper=default_mapper, buffered_size=1024, useXmap=True): return reader_creator( download(DATA_URL, 'flowers', DATA_MD5), download(LABEL_URL, 'flowers', LABEL_MD5), - download(SETID_URL, 'flowers', SETID_MD5), 'tstid', mapper, - buffered_size, useXmap) + download(SETID_URL, 'flowers', SETID_MD5), TRAIN_FLAG, mapper, + buffered_size, use_xmap) -def test(mapper=default_mapper, buffered_size=1024, useXmap=True): +def test(mapper=default_mapper, buffered_size=1024, use_xmap=True): ''' Create flowers test set reader. It returns a reader, each sample in the reader is @@ -154,11 +160,11 @@ def test(mapper=default_mapper, buffered_size=1024, useXmap=True): return reader_creator( download(DATA_URL, 'flowers', DATA_MD5), download(LABEL_URL, 'flowers', LABEL_MD5), - download(SETID_URL, 'flowers', SETID_MD5), 'trnid', mapper, - buffered_size, useXmap) + download(SETID_URL, 'flowers', SETID_MD5), TEST_FLAG, mapper, + buffered_size, use_xmap) -def valid(mapper=default_mapper, buffered_size=1024, useXmap=True): +def valid(mapper=default_mapper, buffered_size=1024, use_xmap=True): ''' Create flowers validation set reader. It returns a reader, each sample in the reader is @@ -177,8 +183,8 @@ def valid(mapper=default_mapper, buffered_size=1024, useXmap=True): return reader_creator( download(DATA_URL, 'flowers', DATA_MD5), download(LABEL_URL, 'flowers', LABEL_MD5), - download(SETID_URL, 'flowers', SETID_MD5), 'valid', mapper, - buffered_size, useXmap) + download(SETID_URL, 'flowers', SETID_MD5), VALID_FLAG, mapper, + buffered_size, use_xmap) def fetch(): From c9865824a718e8361941f669e4ca879be6c24bcb Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 29 Jun 2017 01:10:30 +0800 Subject: [PATCH 33/43] Support to init partial network parameters from the tar file. --- python/paddle/v2/parameters.py | 23 +++++---- python/paddle/v2/tests/test_parameters.py | 57 +++++++++++++++++++++-- 2 files changed, 65 insertions(+), 15 deletions(-) diff --git a/python/paddle/v2/parameters.py b/python/paddle/v2/parameters.py index ad20241b98..f730ea10bb 100644 --- a/python/paddle/v2/parameters.py +++ b/python/paddle/v2/parameters.py @@ -51,7 +51,7 @@ class Parameters(object): def __init__(self): self.__param_conf__ = dict() self.__gradient_machines__ = [] - self.__tmp_params__ = [] + self.__tmp_params__ = dict() def __append_config__(self, param_conf): """ @@ -128,13 +128,10 @@ class Parameters(object): if len(self.__gradient_machines__) == 0: # create new parameter in python numpy. - if len(self.__tmp_params__) != 0: - ret_list = [ - mat for name, mat in self.__tmp_params__ if name == key - ] - if len(ret_list) == 1: - return ret_list[0] - return np.ndarray(shape=shape, dtype=np.float32) + if key in self.__tmp_params__: + return self.__tmp_params__[key] + else: + return np.ndarray(shape=shape, dtype=np.float32) else: for each_gradient_machine in self.__gradient_machines__: param = __get_parameter_in_gradient_machine__( @@ -187,7 +184,7 @@ class Parameters(object): (shape, value.shape)) if len(self.__gradient_machines__) == 0: - self.__tmp_params__.append((key, value)) + self.__tmp_params__[key] = value else: for each_gradient_machine in self.__gradient_machines__: __copy_parameter_to_gradient_machine__(each_gradient_machine, @@ -231,7 +228,7 @@ class Parameters(object): raise ValueError("gradient_machine should be api.GradientMachine") if len(self.__tmp_params__) != 0: - for name, val in self.__tmp_params__: + for name, val in self.__tmp_params__.iteritems(): try: __copy_parameter_to_gradient_machine__(gradient_machine, name, val) @@ -302,6 +299,12 @@ class Parameters(object): params.deserialize(param_name, f) return params + def init_from_tar(self, f): + tar_param = self.from_tar(f) + for pname in tar_param.names(): + if pname in self.names(): + self.set(pname, tar_param.get(pname)) + def __get_parameter_in_gradient_machine__(gradient_machine, name): """ diff --git a/python/paddle/v2/tests/test_parameters.py b/python/paddle/v2/tests/test_parameters.py index 45372e7dd0..7ba8a939fb 100644 --- a/python/paddle/v2/tests/test_parameters.py +++ b/python/paddle/v2/tests/test_parameters.py @@ -20,14 +20,17 @@ import cStringIO import numpy -def __rand_param_config__(name): +def __rand_param_config__(name, psize=None): conf = ParameterConfig() conf.name = name size = 1 - for i in xrange(2): - dim = random.randint(1, 1000) - conf.dims.append(dim) - size *= dim + if psize is None: + for i in xrange(2): + dim = random.randint(1, 1000) + conf.dims.append(dim) + size *= dim + else: + size = psize conf.size = size assert conf.IsInitialized() return conf @@ -77,6 +80,50 @@ class TestParameters(unittest.TestCase): expected = numpy.array([[1, 1], [1, 2], [1, 1]], numpy.float32) assert numpy.logical_and.reduce(numpy.reshape(val == expected, 6)) + def test_init_from_tar(self): + def get_param(names, size): + p = parameters.Parameters() + for k, v in zip(names, size): + p.__append_config__(__rand_param_config__(k, v)) + for name in p.names(): + param = p.get(name) + param[:] = numpy.random.uniform( + -1.0, 1.0, size=p.get_shape(name)) + p.set(name, param) + return p + + def get_parames(): + name1 = ['param_0', 'param_1'] + size1 = [128, 256] + p1 = get_param(name1, size1) + file1 = cStringIO.StringIO() + p1.to_tar(file1) + file1.seek(0) + + name2 = ['param_0', 'param_1', 'param_2'] + size2 = [128, 256, 288] + p2 = get_param(name2, size2) + file2 = cStringIO.StringIO() + p2.to_tar(file2) + file2.seek(0) + return p1, file1, p2, file2 + + p1, file1, p2, file2 = get_parames() + p2.init_from_tar(file1) + for name in p1.names(): + self.assertEqual(p1.get_shape(name), p2.get_shape(name)) + v1 = p1.get(name) + v2 = p2.get(name) + self.assertTrue(numpy.isclose(v1, v2).all()) + + p1, file1, p2, file2 = get_parames() + p1.init_from_tar(file2) + for name in p1.names(): + self.assertEqual(p1.get_shape(name), p2.get_shape(name)) + v1 = p1.get(name) + v2 = p2.get(name) + self.assertTrue(numpy.isclose(v1, v2).all()) + if __name__ == '__main__': unittest.main() From 555540fcc1b44323161c3dfd56a6f3fc7307433c Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 29 Jun 2017 01:11:58 +0800 Subject: [PATCH 34/43] fix typo --- paddle/py_paddle/dataprovider_converter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/py_paddle/dataprovider_converter.py b/paddle/py_paddle/dataprovider_converter.py index 218cb5ec56..43614b9779 100644 --- a/paddle/py_paddle/dataprovider_converter.py +++ b/paddle/py_paddle/dataprovider_converter.py @@ -144,7 +144,7 @@ class DenseScanner(IScanner): if len(self.__shape__) > 1: # The last-two dimenstions are the frame height and width. # For example, the layout is CHW for 3-D feature of image. - # The H and W are the fram height and width. + # The H and W are the frame height and width. h, w = self.__shape__[-2:] argument.setSlotFrameHeight(self.pos, h) argument.setSlotFrameWidth(self.pos, w) From 6215f47c7c572edd94900a9ef4b90fce6726ee70 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Wed, 28 Jun 2017 14:44:40 -0700 Subject: [PATCH 35/43] Rename paddle/strings/ to paddle/string/ --- paddle/{strings => string}/CMakeLists.txt | 0 paddle/{strings => string}/stringpiece.cc | 0 paddle/{strings => string}/stringpiece.h | 0 paddle/{strings => string}/stringpiece_test.cc | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename paddle/{strings => string}/CMakeLists.txt (100%) rename paddle/{strings => string}/stringpiece.cc (100%) rename paddle/{strings => string}/stringpiece.h (100%) rename paddle/{strings => string}/stringpiece_test.cc (100%) diff --git a/paddle/strings/CMakeLists.txt b/paddle/string/CMakeLists.txt similarity index 100% rename from paddle/strings/CMakeLists.txt rename to paddle/string/CMakeLists.txt diff --git a/paddle/strings/stringpiece.cc b/paddle/string/stringpiece.cc similarity index 100% rename from paddle/strings/stringpiece.cc rename to paddle/string/stringpiece.cc diff --git a/paddle/strings/stringpiece.h b/paddle/string/stringpiece.h similarity index 100% rename from paddle/strings/stringpiece.h rename to paddle/string/stringpiece.h diff --git a/paddle/strings/stringpiece_test.cc b/paddle/string/stringpiece_test.cc similarity index 100% rename from paddle/strings/stringpiece_test.cc rename to paddle/string/stringpiece_test.cc From ea1d3acfb4012f491703266fa4caaf8e7e99e8c3 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Wed, 28 Jun 2017 14:52:54 -0700 Subject: [PATCH 36/43] Rename string/stringpiece* into string/piece --- paddle/CMakeLists.txt | 2 +- paddle/string/CMakeLists.txt | 4 +- paddle/string/piece.cc | 138 +++++++++++++++++ paddle/string/{stringpiece.h => piece.h} | 64 ++++---- .../{stringpiece_test.cc => piece_test.cc} | 100 +++++++------ paddle/string/stringpiece.cc | 141 ------------------ 6 files changed, 225 insertions(+), 224 deletions(-) create mode 100644 paddle/string/piece.cc rename paddle/string/{stringpiece.h => piece.h} (57%) rename paddle/string/{stringpiece_test.cc => piece_test.cc} (77%) delete mode 100644 paddle/string/stringpiece.cc diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index 979b68e827..307e99bbe3 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -9,7 +9,7 @@ add_subdirectory(pserver) add_subdirectory(trainer) add_subdirectory(scripts) add_subdirectory(optimizer) -add_subdirectory(strings) +add_subdirectory(string) if(Boost_FOUND) add_subdirectory(memory) diff --git a/paddle/string/CMakeLists.txt b/paddle/string/CMakeLists.txt index 4e55eecd48..0f39660a90 100644 --- a/paddle/string/CMakeLists.txt +++ b/paddle/string/CMakeLists.txt @@ -1,2 +1,2 @@ -cc_library(stringpiece SRCS stringpiece.cc) -cc_test(stringpiece_test SRCS stringpiece_test.cc DEPS stringpiece glog gflags) +cc_library(stringpiece SRCS piece.cc) +cc_test(stringpiece_test SRCS piece_test.cc DEPS stringpiece glog gflags) diff --git a/paddle/string/piece.cc b/paddle/string/piece.cc new file mode 100644 index 0000000000..b80afdec82 --- /dev/null +++ b/paddle/string/piece.cc @@ -0,0 +1,138 @@ +/* + Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "paddle/string/piece.h" + +#include + +#include +#include +#include + +namespace paddle { +namespace string { + +Piece::Piece() : data_(NULL), size_(0) {} + +Piece::Piece(const char* d, size_t n) : data_(d), size_(n) { + if (d == NULL && n != 0) + throw std::invalid_argument("Piece requires len to be 0 for NULL data"); +} + +Piece::Piece(const char* s) : data_(s) { size_ = (s == NULL) ? 0 : strlen(s); } + +Piece::Piece(const std::string& s) : data_(s.data()), size_(s.size()) {} + +char Piece::operator[](size_t n) const { + if (n >= len()) throw std::invalid_argument("index out of Piece length"); + return data_[n]; +} + +int Compare(Piece a, Piece b) { + const size_t min_len = (a.len() < b.len()) ? a.len() : b.len(); + int r = memcmp(a.data(), b.data(), min_len); + if (r == 0) { + if (a.len() < b.len()) + return -1; + else if (a.len() > b.len()) + return 1; + } + return r; +} + +bool operator==(Piece x, Piece y) { + return ((x.len() == y.len()) && + (x.data() == y.data() || memcmp(x.data(), y.data(), x.len()) == 0)); +} + +bool operator!=(Piece x, Piece y) { return !(x == y); } + +bool operator<(Piece x, Piece y) { return Compare(x, y) < 0; } +bool operator>(Piece x, Piece y) { return Compare(x, y) > 0; } + +bool operator<=(Piece x, Piece y) { return Compare(x, y) <= 0; } +bool operator>=(Piece x, Piece y) { return Compare(x, y) >= 0; } + +bool HasPrefix(Piece s, Piece x) { + return ((s.len() >= x.len()) && (memcmp(s.data(), x.data(), x.len()) == 0)); +} + +bool HasSuffix(Piece s, Piece x) { + return ((s.len() >= x.len()) && + (memcmp(s.data() + (s.len() - x.len()), x.data(), x.len()) == 0)); +} + +Piece SkipPrefix(Piece s, size_t n) { + if (n > s.len()) + throw std::invalid_argument("Skip distance larger than Piece length"); + return Piece(s.data() + n, s.len() - n); +} + +Piece SkipSuffix(Piece s, size_t n) { + if (n > s.len()) + throw std::invalid_argument("Skip distance larger than Piece length"); + return Piece(s.data(), s.len() - n); +} + +Piece TrimPrefix(Piece s, Piece x) { + return HasPrefix(s, x) ? SkipPrefix(s, x.len()) : s; +} + +Piece TrimSuffix(Piece s, Piece x) { + return HasSuffix(s, x) ? SkipSuffix(s, x.len()) : s; +} + +bool Contains(Piece s, Piece sub) { + return std::search(s.begin(), s.end(), sub.begin(), sub.end()) != s.end(); +} + +size_t Index(Piece s, Piece sub) { + auto e = std::search(s.begin(), s.end(), sub.begin(), sub.end()); + return e != s.end() ? e - s.data() : Piece::npos; +} + +size_t Find(Piece s, char c, size_t pos) { + if (pos >= s.len()) { + return Piece::npos; + } + const char* result = + reinterpret_cast(memchr(s.data() + pos, c, s.len() - pos)); + return result != nullptr ? result - s.data() : Piece::npos; +} + +size_t RFind(Piece s, char c, size_t pos) { + if (s.len() == 0) return Piece::npos; + for (const char* p = s.data() + std::min(pos, s.len() - 1); p >= s.data(); + p--) { + if (*p == c) { + return p - s.data(); + } + } + return Piece::npos; +} + +Piece SubStr(Piece s, size_t pos, size_t n) { + if (pos > s.len()) pos = s.len(); + if (n > s.len() - pos) n = s.len() - pos; + return Piece(s.data() + pos, n); +} + +std::ostream& operator<<(std::ostream& o, Piece piece) { + return o << piece.ToString(); +} + +} // namespace string +} // namespace paddle diff --git a/paddle/string/stringpiece.h b/paddle/string/piece.h similarity index 57% rename from paddle/string/stringpiece.h rename to paddle/string/piece.h index adff713e86..db7c3e6980 100644 --- a/paddle/string/stringpiece.h +++ b/paddle/string/piece.h @@ -20,33 +20,34 @@ #include namespace paddle { +namespace string { -// StringPiece points into a std::string object but doesn't own the +// Piece points into a std::string object but doesn't own the // string. It is for efficient access to strings. Like Go's string -// type. Not that StringPiece doesn't mutate the underlying string, +// type. Not that Piece doesn't mutate the underlying string, // so it is thread-safe given that the underlying string doesn't -// change. Because StringPiece contains a little data members, and +// change. Because Piece contains a little data members, and // its syntax is simple as it doesn't own/manage the string, it is -// cheap to construct StringPieces and pass them around. -class StringPiece { +// cheap to construct Pieces and pass them around. +class Piece { public: static const size_t npos = static_cast(-1); // We provide non-explicit singleton constructors so users can - // pass in a "const char*" or a "string" wherever a "StringPiece" + // pass in a "const char*" or a "string" wherever a "Piece" // is expected. These contructors ensure that if data_ is NULL, // size_ is 0. - StringPiece(); - StringPiece(const char* d, size_t n); - StringPiece(const char* d); - StringPiece(const std::string& s); + Piece(); + Piece(const char* d, size_t n); + Piece(const char* d); + Piece(const std::string& s); const char* data() const { return data_; } size_t len() const { return size_; } char operator[](size_t n) const; - // StringPiece doesn't own the string, so both iterator and const + // Piece doesn't own the string, so both iterator and const // iterator are const char* indeed. typedef const char* const_iterator; typedef const char* iterator; @@ -63,43 +64,44 @@ private: // Intentionally copyable }; -int Compare(StringPiece a, StringPiece b); +int Compare(Piece a, Piece b); -bool operator==(StringPiece x, StringPiece y); -bool operator!=(StringPiece x, StringPiece y); -bool operator<(StringPiece x, StringPiece y); -bool operator>(StringPiece x, StringPiece y); -bool operator<=(StringPiece x, StringPiece y); -bool operator>=(StringPiece x, StringPiece y); +bool operator==(Piece x, Piece y); +bool operator!=(Piece x, Piece y); +bool operator<(Piece x, Piece y); +bool operator>(Piece x, Piece y); +bool operator<=(Piece x, Piece y); +bool operator>=(Piece x, Piece y); -bool HasPrefix(StringPiece s, StringPiece prefix); -bool HasSuffix(StringPiece s, StringPiece suffix); +bool HasPrefix(Piece s, Piece prefix); +bool HasSuffix(Piece s, Piece suffix); -StringPiece SkipPrefix(StringPiece s, size_t n); -StringPiece SkipSuffix(StringPiece s, size_t n); +Piece SkipPrefix(Piece s, size_t n); +Piece SkipSuffix(Piece s, size_t n); // Skip the prefix (or suffix) if it matches with the string. -StringPiece TrimPrefix(StringPiece s, StringPiece prefix); -StringPiece TrimSuffix(StringPiece s, StringPiece suffix); +Piece TrimPrefix(Piece s, Piece prefix); +Piece TrimSuffix(Piece s, Piece suffix); // Returns if s contains sub. Any s except for empty s contains an // empty sub. -bool Contains(StringPiece s, StringPiece sub); +bool Contains(Piece s, Piece sub); // Return the first occurrence of sub in s, or npos. If both s and // sub is empty, it returns npos; otherwise, if only sub is empty, it // returns 0. -size_t Index(StringPiece s, StringPiece sub); +size_t Index(Piece s, Piece sub); // Return the first occurrence of c in s[pos:end], or npos. -size_t Find(StringPiece s, char c, size_t pos); +size_t Find(Piece s, char c, size_t pos); // Search range is [0..pos] inclusive. If pos == npos, search everything. -size_t RFind(StringPiece s, char c, size_t pos); +size_t RFind(Piece s, char c, size_t pos); -StringPiece SubStr(StringPiece s, size_t pos, size_t n); +Piece SubStr(Piece s, size_t pos, size_t n); -// allow StringPiece to be logged -std::ostream& operator<<(std::ostream& o, StringPiece piece); +// allow Piece to be logged +std::ostream& operator<<(std::ostream& o, Piece piece); +} // namespace string } // namespace paddle diff --git a/paddle/string/stringpiece_test.cc b/paddle/string/piece_test.cc similarity index 77% rename from paddle/string/stringpiece_test.cc rename to paddle/string/piece_test.cc index 2ba66a04f6..cf5152ff5a 100644 --- a/paddle/string/stringpiece_test.cc +++ b/paddle/string/piece_test.cc @@ -14,7 +14,7 @@ limitations under the License. */ -#include "paddle/strings/stringpiece.h" +#include "paddle/string/piece.h" #include @@ -22,42 +22,44 @@ TEST(StringPiece, Construct) { { - paddle::StringPiece s; + paddle::string::Piece s; EXPECT_EQ(NULL, s.data()); EXPECT_EQ(0U, s.len()); } - { EXPECT_THROW(paddle::StringPiece s(NULL, 10000U), std::invalid_argument); } { - paddle::StringPiece s(NULL); + EXPECT_THROW(paddle::string::Piece s(NULL, 10000U), std::invalid_argument); + } + { + paddle::string::Piece s(NULL); EXPECT_EQ(0U, s.len()); } { std::string a; EXPECT_EQ(0U, a.size()); - paddle::StringPiece s(a); + paddle::string::Piece s(a); EXPECT_EQ(0U, s.len()); } } TEST(StringPiece, CopyAndAssign) { - paddle::StringPiece empty; + paddle::string::Piece empty; EXPECT_EQ(0U, empty.len()); - paddle::StringPiece a("hello"); - paddle::StringPiece b = a; + paddle::string::Piece a("hello"); + paddle::string::Piece b = a; EXPECT_EQ(b.len(), strlen("hello")); EXPECT_EQ(a, b); std::string storage("hello"); - paddle::StringPiece c(storage); + paddle::string::Piece c(storage); EXPECT_EQ(a, c); EXPECT_NE(a.data(), c.data()); } TEST(StringPiece, Compare) { { - paddle::StringPiece a("hello"); - paddle::StringPiece b("world"); + paddle::string::Piece a("hello"); + paddle::string::Piece b("world"); EXPECT_TRUE(a != b); EXPECT_FALSE(a == b); EXPECT_TRUE(a < b); @@ -68,7 +70,7 @@ TEST(StringPiece, Compare) { EXPECT_GT(Compare(b, a), 0); } { - paddle::StringPiece a, b; + paddle::string::Piece a, b; EXPECT_TRUE(a == b); EXPECT_FALSE(a != b); EXPECT_FALSE(a < b); @@ -82,31 +84,31 @@ TEST(StringPiece, Compare) { TEST(StringPiece, ToString) { { - paddle::StringPiece s; + paddle::string::Piece s; EXPECT_EQ(std::string(""), s.ToString()); } { - paddle::StringPiece s(NULL); + paddle::string::Piece s(NULL); EXPECT_EQ(std::string(""), s.ToString()); } { - paddle::StringPiece s("hello"); + paddle::string::Piece s("hello"); EXPECT_EQ(std::string("hello"), s.ToString()); } } TEST(StringPiece, HasPrefixSuffix) { - using paddle::HasPrefix; - using paddle::HasSuffix; + using paddle::string::HasPrefix; + using paddle::string::HasSuffix; { - paddle::StringPiece s; + paddle::string::Piece s; EXPECT_FALSE(HasPrefix(s, "something")); EXPECT_TRUE(HasPrefix(s, "")); EXPECT_FALSE(HasSuffix(s, "something")); EXPECT_TRUE(HasSuffix(s, "")); } { - paddle::StringPiece s("app"); + paddle::string::Piece s("app"); EXPECT_TRUE(HasPrefix(s, "")); EXPECT_TRUE(HasPrefix(s, "a")); EXPECT_TRUE(HasPrefix(s, "ap")); @@ -120,10 +122,10 @@ TEST(StringPiece, HasPrefixSuffix) { } TEST(StringPiece, SkipPrefixSuffix) { - using paddle::SkipPrefix; - using paddle::SkipSuffix; + using paddle::string::SkipPrefix; + using paddle::string::SkipSuffix; { - paddle::StringPiece s; + paddle::string::Piece s; EXPECT_EQ("", SkipPrefix(s, 0)); EXPECT_THROW(SkipPrefix(s, 1), std::invalid_argument); @@ -131,7 +133,7 @@ TEST(StringPiece, SkipPrefixSuffix) { EXPECT_THROW(SkipSuffix(s, 1), std::invalid_argument); } { - paddle::StringPiece s("app"); + paddle::string::Piece s("app"); EXPECT_EQ("app", SkipPrefix(s, 0)); EXPECT_EQ("pp", SkipPrefix(s, 1)); EXPECT_EQ("p", SkipPrefix(s, 2)); @@ -147,10 +149,10 @@ TEST(StringPiece, SkipPrefixSuffix) { } TEST(StringPiece, TrimPrefixSuffix) { - using paddle::TrimPrefix; - using paddle::TrimSuffix; + using paddle::string::TrimPrefix; + using paddle::string::TrimSuffix; { - paddle::StringPiece s; + paddle::string::Piece s; EXPECT_EQ("", TrimPrefix(s, "")); EXPECT_EQ("", TrimPrefix(s, "something")); @@ -158,7 +160,7 @@ TEST(StringPiece, TrimPrefixSuffix) { EXPECT_EQ("", TrimSuffix(s, "something")); } { - paddle::StringPiece s("app"); + paddle::string::Piece s("app"); EXPECT_EQ("app", TrimPrefix(s, "")); EXPECT_EQ("pp", TrimPrefix(s, "a")); EXPECT_EQ("p", TrimPrefix(s, "ap")); @@ -174,14 +176,14 @@ TEST(StringPiece, TrimPrefixSuffix) { } TEST(StringPiece, Contains) { - using paddle::Contains; + using paddle::string::Contains; { - paddle::StringPiece s; + paddle::string::Piece s; EXPECT_FALSE(Contains(s, "")); EXPECT_FALSE(Contains(s, "something")); } { - paddle::StringPiece s("app"); + paddle::string::Piece s("app"); EXPECT_TRUE(Contains(s, "")); EXPECT_TRUE(Contains(s, "a")); EXPECT_TRUE(Contains(s, "p")); @@ -193,15 +195,15 @@ TEST(StringPiece, Contains) { } TEST(StringPiece, Index) { - using paddle::Index; - auto npos = paddle::StringPiece::npos; + using paddle::string::Index; + auto npos = paddle::string::Piece::npos; { - paddle::StringPiece s; + paddle::string::Piece s; EXPECT_EQ(npos, Index(s, "")); EXPECT_EQ(npos, Index(s, "something")); } { - paddle::StringPiece s("app"); + paddle::string::Piece s("app"); EXPECT_EQ(0U, Index(s, "")); EXPECT_EQ(0U, Index(s, "a")); EXPECT_EQ(1U, Index(s, "p")); @@ -213,14 +215,14 @@ TEST(StringPiece, Index) { } TEST(StringPiece, Find) { - using paddle::Find; - auto npos = paddle::StringPiece::npos; + using paddle::string::Find; + auto npos = paddle::string::Piece::npos; { - paddle::StringPiece s; + paddle::string::Piece s; EXPECT_EQ(npos, Find(s, 'a', 0U)); } { - paddle::StringPiece s("app"); + paddle::string::Piece s("app"); EXPECT_EQ(0U, Find(s, 'a', 0U)); EXPECT_EQ(1U, Find(s, 'p', 0U)); EXPECT_EQ(1U, Find(s, 'p', 1U)); @@ -230,14 +232,14 @@ TEST(StringPiece, Find) { } TEST(StringPiece, RFind) { - using paddle::RFind; - auto npos = paddle::StringPiece::npos; + using paddle::string::RFind; + auto npos = paddle::string::Piece::npos; { - paddle::StringPiece s; + paddle::string::Piece s; EXPECT_EQ(npos, RFind(s, 'a', 0U)); } { - paddle::StringPiece s("app"); + paddle::string::Piece s("app"); EXPECT_EQ(2U, RFind(s, 'p', 2U)); EXPECT_EQ(0U, RFind(s, 'a', 2U)); EXPECT_EQ(1U, RFind(s, 'p', 1U)); @@ -247,15 +249,15 @@ TEST(StringPiece, RFind) { } TEST(StringPiece, SubStr) { - using paddle::SubStr; + using paddle::string::SubStr; { - paddle::StringPiece s; + paddle::string::Piece s; EXPECT_EQ("", SubStr(s, 0, 0)); EXPECT_EQ("", SubStr(s, 0, 1)); EXPECT_EQ("", SubStr(s, 1, 0)); } { - paddle::StringPiece s("app"); + paddle::string::Piece s("app"); EXPECT_EQ("", SubStr(s, 0, 0)); EXPECT_EQ("", SubStr(s, 1, 0)); EXPECT_EQ("", SubStr(s, 2, 0)); @@ -279,15 +281,15 @@ TEST(StringPiece, SubStr) { } TEST(StringPiece, StreamOutput) { - using paddle::StringPiece; + using paddle::string::Piece; std::stringstream o; - o << StringPiece(); + o << paddle::string::Piece(); EXPECT_EQ("", o.str()); - o << StringPiece("hello"); + o << paddle::string::Piece("hello"); EXPECT_EQ("hello", o.str()); - o << StringPiece(); + o << paddle::string::Piece(); EXPECT_EQ("hello", o.str()); } diff --git a/paddle/string/stringpiece.cc b/paddle/string/stringpiece.cc deleted file mode 100644 index 415b3558d5..0000000000 --- a/paddle/string/stringpiece.cc +++ /dev/null @@ -1,141 +0,0 @@ -/* - Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -#include "paddle/strings/stringpiece.h" - -#include - -#include -#include -#include - -namespace paddle { - -StringPiece::StringPiece() : data_(NULL), size_(0) {} - -StringPiece::StringPiece(const char* d, size_t n) : data_(d), size_(n) { - if (d == NULL && n != 0) - throw std::invalid_argument( - "StringPiece requires len to be 0 for NULL data"); -} - -StringPiece::StringPiece(const char* s) : data_(s) { - size_ = (s == NULL) ? 0 : strlen(s); -} - -StringPiece::StringPiece(const std::string& s) - : data_(s.data()), size_(s.size()) {} - -char StringPiece::operator[](size_t n) const { - if (n >= len()) - throw std::invalid_argument("index out of StringPiece length"); - return data_[n]; -} - -int Compare(StringPiece a, StringPiece b) { - const size_t min_len = (a.len() < b.len()) ? a.len() : b.len(); - int r = memcmp(a.data(), b.data(), min_len); - if (r == 0) { - if (a.len() < b.len()) - return -1; - else if (a.len() > b.len()) - return 1; - } - return r; -} - -bool operator==(StringPiece x, StringPiece y) { - return ((x.len() == y.len()) && - (x.data() == y.data() || memcmp(x.data(), y.data(), x.len()) == 0)); -} - -bool operator!=(StringPiece x, StringPiece y) { return !(x == y); } - -bool operator<(StringPiece x, StringPiece y) { return Compare(x, y) < 0; } -bool operator>(StringPiece x, StringPiece y) { return Compare(x, y) > 0; } - -bool operator<=(StringPiece x, StringPiece y) { return Compare(x, y) <= 0; } -bool operator>=(StringPiece x, StringPiece y) { return Compare(x, y) >= 0; } - -bool HasPrefix(StringPiece s, StringPiece x) { - return ((s.len() >= x.len()) && (memcmp(s.data(), x.data(), x.len()) == 0)); -} - -bool HasSuffix(StringPiece s, StringPiece x) { - return ((s.len() >= x.len()) && - (memcmp(s.data() + (s.len() - x.len()), x.data(), x.len()) == 0)); -} - -StringPiece SkipPrefix(StringPiece s, size_t n) { - if (n > s.len()) - throw std::invalid_argument("Skip distance larger than StringPiece length"); - return StringPiece(s.data() + n, s.len() - n); -} - -StringPiece SkipSuffix(StringPiece s, size_t n) { - if (n > s.len()) - throw std::invalid_argument("Skip distance larger than StringPiece length"); - return StringPiece(s.data(), s.len() - n); -} - -StringPiece TrimPrefix(StringPiece s, StringPiece x) { - return HasPrefix(s, x) ? SkipPrefix(s, x.len()) : s; -} - -StringPiece TrimSuffix(StringPiece s, StringPiece x) { - return HasSuffix(s, x) ? SkipSuffix(s, x.len()) : s; -} - -bool Contains(StringPiece s, StringPiece sub) { - return std::search(s.begin(), s.end(), sub.begin(), sub.end()) != s.end(); -} - -size_t Index(StringPiece s, StringPiece sub) { - auto e = std::search(s.begin(), s.end(), sub.begin(), sub.end()); - return e != s.end() ? e - s.data() : StringPiece::npos; -} - -size_t Find(StringPiece s, char c, size_t pos) { - if (pos >= s.len()) { - return StringPiece::npos; - } - const char* result = - reinterpret_cast(memchr(s.data() + pos, c, s.len() - pos)); - return result != nullptr ? result - s.data() : StringPiece::npos; -} - -size_t RFind(StringPiece s, char c, size_t pos) { - if (s.len() == 0) return StringPiece::npos; - for (const char* p = s.data() + std::min(pos, s.len() - 1); p >= s.data(); - p--) { - if (*p == c) { - return p - s.data(); - } - } - return StringPiece::npos; -} - -StringPiece SubStr(StringPiece s, size_t pos, size_t n) { - if (pos > s.len()) pos = s.len(); - if (n > s.len() - pos) n = s.len() - pos; - return StringPiece(s.data() + pos, n); -} - -std::ostream& operator<<(std::ostream& o, StringPiece piece) { - return o << piece.ToString(); -} - -} // namespace paddle From 6cb7cb36911ec36be344a5800c142284983ae2f6 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Wed, 28 Jun 2017 17:23:17 -0700 Subject: [PATCH 37/43] Add paddle/string/printf and tests --- paddle/string/CMakeLists.txt | 2 + paddle/string/printf.h | 105 +++ paddle/string/printf_test.cc | 16 + paddle/string/tinyformat/tinyformat.h | 902 ++++++++++++++++++++++++++ 4 files changed, 1025 insertions(+) create mode 100644 paddle/string/printf.h create mode 100644 paddle/string/printf_test.cc create mode 100644 paddle/string/tinyformat/tinyformat.h diff --git a/paddle/string/CMakeLists.txt b/paddle/string/CMakeLists.txt index 0f39660a90..5becf62672 100644 --- a/paddle/string/CMakeLists.txt +++ b/paddle/string/CMakeLists.txt @@ -1,2 +1,4 @@ cc_library(stringpiece SRCS piece.cc) cc_test(stringpiece_test SRCS piece_test.cc DEPS stringpiece glog gflags) + +cc_test(stringprintf_test SRCS printf_test.cc DEPS glog gflags) diff --git a/paddle/string/printf.h b/paddle/string/printf.h new file mode 100644 index 0000000000..0767f8f5b5 --- /dev/null +++ b/paddle/string/printf.h @@ -0,0 +1,105 @@ +/* + Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Compared with std::stringstream, there are primary purpose of +// string::Printf: +// +// 1. Type-safe printing, with why and how explained in +// http://www.drdobbs.com/stringprintf-a-typesafe-printf-family-fo/184401999. +// Implementation includes +// +// https://github.com/c42f/tinyformat +// boost::format +// std::stringstream +// +// std::stringstream is not convenient enough in many cases. For example: +// +// std::cout << std::setprecision(2) << std::fixed << 1.23456 << "\n"; +// +// boost::format is the most convenient one. We can have +// +// std::cout << format("%2% %1%") % 36 % 77; +// +// or +// +// format fmter("%2% %1%"); +// fmter % 36; fmter % 77; +// std::cout << fmter.c_str(); +// +// But the overloading of % might be overkilling and it would be +// more efficient if it can write to std::cout directly. +// +// tinyformat has an interface compatible with the C-printf style, +// and it can writes to a stream or returns a std::string: +// +// std::cout << tfm::printf( +// "%s, %s %d, %.2d:%.2d\n", +// weekday, month, day, hour, min); +// +// or +// +// tfm::format(std::cout, +// "%s, %s %d, %.2d:%.2d\n", +// weekday, month, day, hour, min); +// +// 2. High-performance -- most printed strings are not too long and +// doens't need dynamic memory allocation. Many StringPrintf +// implementations doesn't enforce type-safe, but are +// high-performance, including +// +// https://developers.google.com/optimization/reference/base/stringprintf/ +// https://github.com/adobe/chromium/blob/master/base/stringprintf.h +// https://github.com/google/protobuf/blob/master/src/google/protobuf/stubs/stringprintf.h +// +// According to +// https://github.com/c42f/tinyformat#compile-time-and-code-bloat, +// boost::format runs too slow and results in large executable binary +// files. So here we port tinyformat. + +#pragma once + +#include +#include +#include "paddle/string/tinyformat/tinyformat.h" // https://github.com/c42f/tinyformat + +namespace paddle { +namespace string { + +template +void Fprintf(std::ostream& out, const char* fmt, const Args&... args) { + tinyformat::vformat(out, fmt, makeFormatList(args...)); +} + +template +std::string Sprintf(const char* fmt, const Args&... args) { + std::ostringstream oss; + tinyformat::format(oss, fmt, args...); + return oss.str(); +} + +template +void printf(const char* fmt, const Args&... args) { + tinyformat::format(std::cout, fmt, args...); +} + +template +void printfln(const char* fmt, const Args&... args) { + tinyformat::format(std::cout, fmt, args...); + std::cout << '\n'; +} + +} // namespace string +} // namespace paddle diff --git a/paddle/string/printf_test.cc b/paddle/string/printf_test.cc new file mode 100644 index 0000000000..d8f2454165 --- /dev/null +++ b/paddle/string/printf_test.cc @@ -0,0 +1,16 @@ +#include "paddle/string/printf.h" + +#include + +#include "gtest/gtest.h" + +TEST(StringPrintf, StringPrintf) { + std::string weekday = "Wednesday"; + const char* month = "July"; + size_t day = 27; + long hour = 14; + int min = 44; + EXPECT_EQ(std::string("Wednesday, July 27, 14:44"), + paddle::string::Sprintf( + "%s, %s %d, %.2d:%.2d", weekday, month, day, hour, min)); +} diff --git a/paddle/string/tinyformat/tinyformat.h b/paddle/string/tinyformat/tinyformat.h new file mode 100644 index 0000000000..f0e5e0160f --- /dev/null +++ b/paddle/string/tinyformat/tinyformat.h @@ -0,0 +1,902 @@ +// tinyformat.h +// Copyright (C) 2011, Chris Foster [chris42f (at) gmail (d0t) com] +// +// Boost Software License - Version 1.0 +// +// Permission is hereby granted, free of charge, to any person or organization +// obtaining a copy of the software and accompanying documentation covered by +// this license (the "Software") to use, reproduce, display, distribute, +// execute, and transmit the Software, and to prepare derivative works of the +// Software, and to permit third-parties to whom the Software is furnished to +// do so, all subject to the following: +// +// The copyright notices in the Software and this entire statement, including +// the above license grant, this restriction and the following disclaimer, +// must be included in all copies of the Software, in whole or in part, and +// all derivative works of the Software, unless such copies or derivative +// works are solely in the form of machine-executable object code generated by +// a source language processor. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +// SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +// FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//------------------------------------------------------------------------------ +// Tinyformat: A minimal type safe printf replacement +// +// tinyformat.h is a type safe printf replacement library in a single C++ +// header file. Design goals include: +// +// * Type safety and extensibility for user defined types. +// * C99 printf() compatibility, to the extent possible using std::ostream +// * Simplicity and minimalism. A single header file to include and distribute +// with your projects. +// * Augment rather than replace the standard stream formatting mechanism +// * C++98 support, with optional C++11 niceties +// +// +// Main interface example usage +// ---------------------------- +// +// To print a date to std::cout: +// +// std::string weekday = "Wednesday"; +// const char* month = "July"; +// size_t day = 27; +// long hour = 14; +// int min = 44; +// +// tfm::printf("%s, %s %d, %.2d:%.2d\n", weekday, month, day, hour, min); +// +// The strange types here emphasize the type safety of the interface; it is +// possible to print a std::string using the "%s" conversion, and a +// size_t using the "%d" conversion. A similar result could be achieved +// using either of the tfm::format() functions. One prints on a user provided +// stream: +// +// tfm::format(std::cerr, "%s, %s %d, %.2d:%.2d\n", +// weekday, month, day, hour, min); +// +// The other returns a std::string: +// +// std::string date = tfm::format("%s, %s %d, %.2d:%.2d\n", +// weekday, month, day, hour, min); +// std::cout << date; +// +// These are the three primary interface functions. There is also a +// convenience function printfln() which appends a newline to the usual result +// of printf() for super simple logging. +// +// +// User defined format functions +// ----------------------------- +// +// Simulating variadic templates in C++98 is pretty painful since it requires +// writing out the same function for each desired number of arguments. To make +// this bearable tinyformat comes with a set of macros which are used +// internally to generate the API, but which may also be used in user code. +// +// The three macros TINYFORMAT_ARGTYPES(n), TINYFORMAT_VARARGS(n) and +// TINYFORMAT_PASSARGS(n) will generate a list of n argument types, +// type/name pairs and argument names respectively when called with an integer +// n between 1 and 16. We can use these to define a macro which generates the +// desired user defined function with n arguments. To generate all 16 user +// defined function bodies, use the macro TINYFORMAT_FOREACH_ARGNUM. For an +// example, see the implementation of printf() at the end of the source file. +// +// Sometimes it's useful to be able to pass a list of format arguments through +// to a non-template function. The FormatList class is provided as a way to do +// this by storing the argument list in a type-opaque way. Continuing the +// example from above, we construct a FormatList using makeFormatList(): +// +// FormatListRef formatList = tfm::makeFormatList(weekday, month, day, hour, +// min); +// +// The format list can now be passed into any non-template function and used +// via a call to the vformat() function: +// +// tfm::vformat(std::cout, "%s, %s %d, %.2d:%.2d\n", formatList); +// +// +// Additional API information +// -------------------------- +// +// Error handling: Define TINYFORMAT_ERROR to customize the error handling for +// format strings which are unsupported or have the wrong number of format +// specifiers (calls assert() by default). +// +// User defined types: Uses operator<< for user defined types by default. +// Overload formatValue() for more control. + +#pragma once + +#include +#include +#include +#include + +namespace paddle { +namespace string { +namespace tinyformat { + +#ifndef TINYFORMAT_ERROR +#define TINYFORMAT_ERROR(reason) assert(0 && reason) +#endif + +//------------------------------------------------------------------------------ +namespace detail { + +// Test whether type T1 is convertible to type T2 +template +struct is_convertible { +private: + // two types of different size + struct fail { + char dummy[2]; + }; + struct succeed { + char dummy; + }; + // Try to convert a T1 to a T2 by plugging into tryConvert + static fail tryConvert(...); + static succeed tryConvert(const T2 &); + static const T1 &makeT1(); + +public: + // Standard trick: the (...) version of tryConvert will be chosen from + // the overload set only if the version taking a T2 doesn't match. + // Then we compare the sizes of the return types to check which + // function matched. Very neat, in a disgusting kind of way :) + static const bool value = sizeof(tryConvert(makeT1())) == sizeof(succeed); +}; + +// Format the value by casting to type fmtT. This default implementation +// should never be called. +template ::value> +struct formatValueAsType { + static void invoke(std::ostream & /*out*/, const T & /*value*/) { assert(0); } +}; +// Specialized version for types that can actually be converted to fmtT, as +// indicated by the "convertible" template parameter. +template +struct formatValueAsType { + static void invoke(std::ostream &out, const T &value) { + out << static_cast(value); + } +}; + +// Convert an arbitrary type to integer. The version with convertible=false +// throws an error. +template ::value> +struct convertToInt { + static int invoke(const T & /*value*/) { + TINYFORMAT_ERROR( + "tinyformat: Cannot convert from argument type to " + "integer for use as variable width or precision"); + return 0; + } +}; +// Specialization for convertToInt when conversion is possible +template +struct convertToInt { + static int invoke(const T &value) { return static_cast(value); } +}; + +// Format at most ntrunc characters to the given stream. +template +inline void formatTruncated(std::ostream &out, const T &value, int ntrunc) { + std::ostringstream tmp; + tmp << value; + std::string result = tmp.str(); + out.write(result.c_str(), + (std::min)(ntrunc, static_cast(result.size()))); +} +#define TINYFORMAT_DEFINE_FORMAT_TRUNCATED_CSTR(type) \ + inline void formatTruncated(std::ostream &out, type *value, int ntrunc) { \ + std::streamsize len = 0; \ + while (len < ntrunc && value[len] != 0) ++len; \ + out.write(value, len); \ + } +// Overload for const char* and char*. Could overload for signed & unsigned +// char too, but these are technically unneeded for printf compatibility. +TINYFORMAT_DEFINE_FORMAT_TRUNCATED_CSTR(const char) +TINYFORMAT_DEFINE_FORMAT_TRUNCATED_CSTR(char) +#undef TINYFORMAT_DEFINE_FORMAT_TRUNCATED_CSTR + +} // namespace detail + +//------------------------------------------------------------------------------ +// Variable formatting functions. May be overridden for user-defined types if +// desired. + +/// Format a value into a stream, delegating to operator<< by default. +/// +/// Users may override this for their own types. When this function is called, +/// the stream flags will have been modified according to the format string. +/// The format specification is provided in the range [fmtBegin, fmtEnd). For +/// truncating conversions, ntrunc is set to the desired maximum number of +/// characters, for example "%.7s" calls formatValue with ntrunc = 7. +/// +/// By default, formatValue() uses the usual stream insertion operator +/// operator<< to format the type T, with special cases for the %c and %p +/// conversions. +template +inline void formatValue(std::ostream &out, + const char * /*fmtBegin*/, + const char *fmtEnd, + int ntrunc, + const T &value) { + // The mess here is to support the %c and %p conversions: if these + // conversions are active we try to convert the type to a char or const + // void* respectively and format that instead of the value itself. For the + // %p conversion it's important to avoid dereferencing the pointer, which + // could otherwise lead to a crash when printing a dangling (const char*). + const bool canConvertToChar = detail::is_convertible::value; + const bool canConvertToVoidPtr = + detail::is_convertible::value; + if (canConvertToChar && *(fmtEnd - 1) == 'c') + detail::formatValueAsType::invoke(out, value); + else if (canConvertToVoidPtr && *(fmtEnd - 1) == 'p') + detail::formatValueAsType::invoke(out, value); + else if (ntrunc >= 0) { + // Take care not to overread C strings in truncating conversions like + // "%.4s" where at most 4 characters may be read. + detail::formatTruncated(out, value, ntrunc); + } else + out << value; +} + +// Overloaded version for char types to support printing as an integer +#define TINYFORMAT_DEFINE_FORMATVALUE_CHAR(charType) \ + inline void formatValue(std::ostream &out, \ + const char * /*fmtBegin*/, \ + const char *fmtEnd, \ + int /**/, \ + charType value) { \ + switch (*(fmtEnd - 1)) { \ + case 'u': \ + case 'd': \ + case 'i': \ + case 'o': \ + case 'X': \ + case 'x': \ + out << static_cast(value); \ + break; \ + default: \ + out << value; \ + break; \ + } \ + } +// per 3.9.1: char, signed char and unsigned char are all distinct types +TINYFORMAT_DEFINE_FORMATVALUE_CHAR(char) +TINYFORMAT_DEFINE_FORMATVALUE_CHAR(signed char) +TINYFORMAT_DEFINE_FORMATVALUE_CHAR(unsigned char) +#undef TINYFORMAT_DEFINE_FORMATVALUE_CHAR + +//------------------------------------------------------------------------------ +// Tools for emulating variadic templates in C++98. The basic idea here is +// stolen from the boost preprocessor metaprogramming library and cut down to +// be just general enough for what we need. + +#define TINYFORMAT_ARGTYPES(n) TINYFORMAT_ARGTYPES_##n +#define TINYFORMAT_VARARGS(n) TINYFORMAT_VARARGS_##n +#define TINYFORMAT_PASSARGS(n) TINYFORMAT_PASSARGS_##n +#define TINYFORMAT_PASSARGS_TAIL(n) TINYFORMAT_PASSARGS_TAIL_##n + +// To keep it as transparent as possible, the macros below have been generated +// using python via the excellent cog.py code generation script. This avoids +// the need for a bunch of complex (but more general) preprocessor tricks as +// used in boost.preprocessor. +// +// To rerun the code generation in place, use `cog.py -r tinyformat.h` +// (see http://nedbatchelder.com/code/cog). Alternatively you can just create +// extra versions by hand. + +/*[[[cog +maxParams = 16 + +def makeCommaSepLists(lineTemplate, elemTemplate, startInd=1): + for j in range(startInd,maxParams+1): + list = ', '.join([elemTemplate % {'i':i} for i in range(startInd,j+1)]) + cog.outl(lineTemplate % {'j':j, 'list':list}) + +makeCommaSepLists('#define TINYFORMAT_ARGTYPES_%(j)d %(list)s', + 'class T%(i)d') + +cog.outl() +makeCommaSepLists('#define TINYFORMAT_VARARGS_%(j)d %(list)s', + 'const T%(i)d& v%(i)d') + +cog.outl() +makeCommaSepLists('#define TINYFORMAT_PASSARGS_%(j)d %(list)s', 'v%(i)d') + +cog.outl() +cog.outl('#define TINYFORMAT_PASSARGS_TAIL_1') +makeCommaSepLists('#define TINYFORMAT_PASSARGS_TAIL_%(j)d , %(list)s', + 'v%(i)d', startInd = 2) + +cog.outl() +cog.outl('#define TINYFORMAT_FOREACH_ARGNUM(m) \\\n ' + + ' '.join(['m(%d)' % (j,) for j in range(1,maxParams+1)])) +]]]*/ +#define TINYFORMAT_ARGTYPES_1 class T1 +#define TINYFORMAT_ARGTYPES_2 class T1, class T2 +#define TINYFORMAT_ARGTYPES_3 class T1, class T2, class T3 +#define TINYFORMAT_ARGTYPES_4 class T1, class T2, class T3, class T4 +#define TINYFORMAT_ARGTYPES_5 class T1, class T2, class T3, class T4, class T5 +#define TINYFORMAT_ARGTYPES_6 \ + class T1, class T2, class T3, class T4, class T5, class T6 +#define TINYFORMAT_ARGTYPES_7 \ + class T1, class T2, class T3, class T4, class T5, class T6, class T7 +#define TINYFORMAT_ARGTYPES_8 \ + class T1, class T2, class T3, class T4, class T5, class T6, class T7, class T8 +#define TINYFORMAT_ARGTYPES_9 \ + class T1, class T2, class T3, class T4, class T5, class T6, class T7, \ + class T8, class T9 +#define TINYFORMAT_ARGTYPES_10 \ + class T1, class T2, class T3, class T4, class T5, class T6, class T7, \ + class T8, class T9, class T10 +#define TINYFORMAT_ARGTYPES_11 \ + class T1, class T2, class T3, class T4, class T5, class T6, class T7, \ + class T8, class T9, class T10, class T11 +#define TINYFORMAT_ARGTYPES_12 \ + class T1, class T2, class T3, class T4, class T5, class T6, class T7, \ + class T8, class T9, class T10, class T11, class T12 +#define TINYFORMAT_ARGTYPES_13 \ + class T1, class T2, class T3, class T4, class T5, class T6, class T7, \ + class T8, class T9, class T10, class T11, class T12, class T13 +#define TINYFORMAT_ARGTYPES_14 \ + class T1, class T2, class T3, class T4, class T5, class T6, class T7, \ + class T8, class T9, class T10, class T11, class T12, class T13, \ + class T14 +#define TINYFORMAT_ARGTYPES_15 \ + class T1, class T2, class T3, class T4, class T5, class T6, class T7, \ + class T8, class T9, class T10, class T11, class T12, class T13, \ + class T14, class T15 +#define TINYFORMAT_ARGTYPES_16 \ + class T1, class T2, class T3, class T4, class T5, class T6, class T7, \ + class T8, class T9, class T10, class T11, class T12, class T13, \ + class T14, class T15, class T16 + +#define TINYFORMAT_VARARGS_1 const T1 &v1 +#define TINYFORMAT_VARARGS_2 const T1 &v1, const T2 &v2 +#define TINYFORMAT_VARARGS_3 const T1 &v1, const T2 &v2, const T3 &v3 +#define TINYFORMAT_VARARGS_4 \ + const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4 +#define TINYFORMAT_VARARGS_5 \ + const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4, const T5 &v5 +#define TINYFORMAT_VARARGS_6 \ + const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4, const T5 &v5, \ + const T6 &v6 +#define TINYFORMAT_VARARGS_7 \ + const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4, const T5 &v5, \ + const T6 &v6, const T7 &v7 +#define TINYFORMAT_VARARGS_8 \ + const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4, const T5 &v5, \ + const T6 &v6, const T7 &v7, const T8 &v8 +#define TINYFORMAT_VARARGS_9 \ + const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4, const T5 &v5, \ + const T6 &v6, const T7 &v7, const T8 &v8, const T9 &v9 +#define TINYFORMAT_VARARGS_10 \ + const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4, const T5 &v5, \ + const T6 &v6, const T7 &v7, const T8 &v8, const T9 &v9, const T10 &v10 +#define TINYFORMAT_VARARGS_11 \ + const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4, const T5 &v5, \ + const T6 &v6, const T7 &v7, const T8 &v8, const T9 &v9, const T10 &v10, \ + const T11 &v11 +#define TINYFORMAT_VARARGS_12 \ + const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4, const T5 &v5, \ + const T6 &v6, const T7 &v7, const T8 &v8, const T9 &v9, const T10 &v10, \ + const T11 &v11, const T12 &v12 +#define TINYFORMAT_VARARGS_13 \ + const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4, const T5 &v5, \ + const T6 &v6, const T7 &v7, const T8 &v8, const T9 &v9, const T10 &v10, \ + const T11 &v11, const T12 &v12, const T13 &v13 +#define TINYFORMAT_VARARGS_14 \ + const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4, const T5 &v5, \ + const T6 &v6, const T7 &v7, const T8 &v8, const T9 &v9, const T10 &v10, \ + const T11 &v11, const T12 &v12, const T13 &v13, const T14 &v14 +#define TINYFORMAT_VARARGS_15 \ + const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4, const T5 &v5, \ + const T6 &v6, const T7 &v7, const T8 &v8, const T9 &v9, const T10 &v10, \ + const T11 &v11, const T12 &v12, const T13 &v13, const T14 &v14, \ + const T15 &v15 +#define TINYFORMAT_VARARGS_16 \ + const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4, const T5 &v5, \ + const T6 &v6, const T7 &v7, const T8 &v8, const T9 &v9, const T10 &v10, \ + const T11 &v11, const T12 &v12, const T13 &v13, const T14 &v14, \ + const T15 &v15, const T16 &v16 + +#define TINYFORMAT_PASSARGS_1 v1 +#define TINYFORMAT_PASSARGS_2 v1, v2 +#define TINYFORMAT_PASSARGS_3 v1, v2, v3 +#define TINYFORMAT_PASSARGS_4 v1, v2, v3, v4 +#define TINYFORMAT_PASSARGS_5 v1, v2, v3, v4, v5 +#define TINYFORMAT_PASSARGS_6 v1, v2, v3, v4, v5, v6 +#define TINYFORMAT_PASSARGS_7 v1, v2, v3, v4, v5, v6, v7 +#define TINYFORMAT_PASSARGS_8 v1, v2, v3, v4, v5, v6, v7, v8 +#define TINYFORMAT_PASSARGS_9 v1, v2, v3, v4, v5, v6, v7, v8, v9 +#define TINYFORMAT_PASSARGS_10 v1, v2, v3, v4, v5, v6, v7, v8, v9, v10 +#define TINYFORMAT_PASSARGS_11 v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11 +#define TINYFORMAT_PASSARGS_12 v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12 +#define TINYFORMAT_PASSARGS_13 \ + v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13 +#define TINYFORMAT_PASSARGS_14 \ + v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14 +#define TINYFORMAT_PASSARGS_15 \ + v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15 +#define TINYFORMAT_PASSARGS_16 \ + v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16 + +#define TINYFORMAT_PASSARGS_TAIL_1 +#define TINYFORMAT_PASSARGS_TAIL_2 , v2 +#define TINYFORMAT_PASSARGS_TAIL_3 , v2, v3 +#define TINYFORMAT_PASSARGS_TAIL_4 , v2, v3, v4 +#define TINYFORMAT_PASSARGS_TAIL_5 , v2, v3, v4, v5 +#define TINYFORMAT_PASSARGS_TAIL_6 , v2, v3, v4, v5, v6 +#define TINYFORMAT_PASSARGS_TAIL_7 , v2, v3, v4, v5, v6, v7 +#define TINYFORMAT_PASSARGS_TAIL_8 , v2, v3, v4, v5, v6, v7, v8 +#define TINYFORMAT_PASSARGS_TAIL_9 , v2, v3, v4, v5, v6, v7, v8, v9 +#define TINYFORMAT_PASSARGS_TAIL_10 , v2, v3, v4, v5, v6, v7, v8, v9, v10 +#define TINYFORMAT_PASSARGS_TAIL_11 , v2, v3, v4, v5, v6, v7, v8, v9, v10, v11 +#define TINYFORMAT_PASSARGS_TAIL_12 \ + , v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12 +#define TINYFORMAT_PASSARGS_TAIL_13 \ + , v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13 +#define TINYFORMAT_PASSARGS_TAIL_14 \ + , v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14 +#define TINYFORMAT_PASSARGS_TAIL_15 \ + , v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15 +#define TINYFORMAT_PASSARGS_TAIL_16 \ + , v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16 + +#define TINYFORMAT_FOREACH_ARGNUM(m) \ + m(1) m(2) m(3) m(4) m(5) m(6) m(7) m(8) m(9) m(10) m(11) m(12) m(13) m(14) \ + m(15) m(16) +//[[[end]]] + +namespace detail { + +// Type-opaque holder for an argument to format(), with associated actions on +// the type held as explicit function pointers. This allows FormatArg's for +// each argument to be allocated as a homogenous array inside FormatList +// whereas a naive implementation based on inheritance does not. +class FormatArg { +public: + FormatArg() {} + + template + FormatArg(const T &value) + : m_value(static_cast(&value)), + m_formatImpl(&formatImpl), + m_toIntImpl(&toIntImpl) {} + + void format(std::ostream &out, + const char *fmtBegin, + const char *fmtEnd, + int ntrunc) const { + m_formatImpl(out, fmtBegin, fmtEnd, ntrunc, m_value); + } + + int toInt() const { return m_toIntImpl(m_value); } + +private: + template + static void formatImpl(std::ostream &out, + const char *fmtBegin, + const char *fmtEnd, + int ntrunc, + const void *value) { + formatValue(out, fmtBegin, fmtEnd, ntrunc, *static_cast(value)); + } + + template + static int toIntImpl(const void *value) { + return convertToInt::invoke(*static_cast(value)); + } + + const void *m_value; + void (*m_formatImpl)(std::ostream &out, + const char *fmtBegin, + const char *fmtEnd, + int ntrunc, + const void *value); + int (*m_toIntImpl)(const void *value); +}; + +// Parse and return an integer from the string c, as atoi() +// On return, c is set to one past the end of the integer. +inline int parseIntAndAdvance(const char *&c) { + int i = 0; + for (; *c >= '0' && *c <= '9'; ++c) i = 10 * i + (*c - '0'); + return i; +} + +// Print literal part of format string and return next format spec +// position. +// +// Skips over any occurrences of '%%', printing a literal '%' to the +// output. The position of the first % character of the next +// nontrivial format spec is returned, or the end of string. +inline const char *printFormatStringLiteral(std::ostream &out, + const char *fmt) { + const char *c = fmt; + for (;; ++c) { + switch (*c) { + case '\0': + out.write(fmt, c - fmt); + return c; + case '%': + out.write(fmt, c - fmt); + if (*(c + 1) != '%') return c; + // for "%%", tack trailing % onto next literal section. + fmt = ++c; + break; + default: + break; + } + } +} + +// Parse a format string and set the stream state accordingly. +// +// The format mini-language recognized here is meant to be the one from C99, +// with the form "%[flags][width][.precision][length]type". +// +// Formatting options which can't be natively represented using the ostream +// state are returned in spacePadPositive (for space padded positive numbers) +// and ntrunc (for truncating conversions). argIndex is incremented if +// necessary to pull out variable width and precision . The function returns a +// pointer to the character after the end of the current format spec. +inline const char *streamStateFromFormat(std::ostream &out, + bool &spacePadPositive, + int &ntrunc, + const char *fmtStart, + const detail::FormatArg *formatters, + int &argIndex, + int numFormatters) { + if (*fmtStart != '%') { + TINYFORMAT_ERROR( + "tinyformat: Not enough conversion specifiers in format string"); + return fmtStart; + } + // Reset stream state to defaults. + out.width(0); + out.precision(6); + out.fill(' '); + // Reset most flags; ignore irrelevant unitbuf & skipws. + out.unsetf(std::ios::adjustfield | std::ios::basefield | + std::ios::floatfield | std::ios::showbase | std::ios::boolalpha | + std::ios::showpoint | std::ios::showpos | std::ios::uppercase); + bool precisionSet = false; + bool widthSet = false; + int widthExtra = 0; + const char *c = fmtStart + 1; + // 1) Parse flags + for (;; ++c) { + switch (*c) { + case '#': + out.setf(std::ios::showpoint | std::ios::showbase); + continue; + case '0': + // overridden by left alignment ('-' flag) + if (!(out.flags() & std::ios::left)) { + // Use internal padding so that numeric values are + // formatted correctly, eg -00010 rather than 000-10 + out.fill('0'); + out.setf(std::ios::internal, std::ios::adjustfield); + } + continue; + case '-': + out.fill(' '); + out.setf(std::ios::left, std::ios::adjustfield); + continue; + case ' ': + // overridden by show positive sign, '+' flag. + if (!(out.flags() & std::ios::showpos)) spacePadPositive = true; + continue; + case '+': + out.setf(std::ios::showpos); + spacePadPositive = false; + widthExtra = 1; + continue; + default: + break; + } + break; + } + // 2) Parse width + if (*c >= '0' && *c <= '9') { + widthSet = true; + out.width(parseIntAndAdvance(c)); + } + if (*c == '*') { + widthSet = true; + int width = 0; + if (argIndex < numFormatters) + width = formatters[argIndex++].toInt(); + else + TINYFORMAT_ERROR( + "tinyformat: Not enough arguments to read variable width"); + if (width < 0) { + // negative widths correspond to '-' flag set + out.fill(' '); + out.setf(std::ios::left, std::ios::adjustfield); + width = -width; + } + out.width(width); + ++c; + } + // 3) Parse precision + if (*c == '.') { + ++c; + int precision = 0; + if (*c == '*') { + ++c; + if (argIndex < numFormatters) + precision = formatters[argIndex++].toInt(); + else + TINYFORMAT_ERROR( + "tinyformat: Not enough arguments to read variable precision"); + } else { + if (*c >= '0' && *c <= '9') + precision = parseIntAndAdvance(c); + else if (*c == '-') // negative precisions ignored, treated as zero. + parseIntAndAdvance(++c); + } + out.precision(precision); + precisionSet = true; + } + // 4) Ignore any C99 length modifier + while (*c == 'l' || *c == 'h' || *c == 'L' || *c == 'j' || *c == 'z' || + *c == 't') + ++c; + // 5) We're up to the conversion specifier character. + // Set stream flags based on conversion specifier (thanks to the + // boost::format class for forging the way here). + bool intConversion = false; + switch (*c) { + case 'u': + case 'd': + case 'i': + out.setf(std::ios::dec, std::ios::basefield); + intConversion = true; + break; + case 'o': + out.setf(std::ios::oct, std::ios::basefield); + intConversion = true; + break; + case 'X': + out.setf(std::ios::uppercase); + case 'x': + case 'p': + out.setf(std::ios::hex, std::ios::basefield); + intConversion = true; + break; + case 'E': + out.setf(std::ios::uppercase); + case 'e': + out.setf(std::ios::scientific, std::ios::floatfield); + out.setf(std::ios::dec, std::ios::basefield); + break; + case 'F': + out.setf(std::ios::uppercase); + case 'f': + out.setf(std::ios::fixed, std::ios::floatfield); + break; + case 'G': + out.setf(std::ios::uppercase); + case 'g': + out.setf(std::ios::dec, std::ios::basefield); + // As in boost::format, let stream decide float format. + out.flags(out.flags() & ~std::ios::floatfield); + break; + case 'a': + case 'A': + TINYFORMAT_ERROR( + "tinyformat: the %a and %A conversion specs " + "are not supported"); + break; + case 'c': + // Handled as special case inside formatValue() + break; + case 's': + if (precisionSet) ntrunc = static_cast(out.precision()); + // Make %s print booleans as "true" and "false" + out.setf(std::ios::boolalpha); + break; + case 'n': + // Not supported - will cause problems! + TINYFORMAT_ERROR("tinyformat: %n conversion spec not supported"); + break; + case '\0': + TINYFORMAT_ERROR( + "tinyformat: Conversion spec incorrectly " + "terminated by end of string"); + return c; + default: + break; + } + if (intConversion && precisionSet && !widthSet) { + // "precision" for integers gives the minimum number of digits (to be + // padded with zeros on the left). This isn't really supported by the + // iostreams, but we can approximately simulate it with the width if + // the width isn't otherwise used. + out.width(out.precision() + widthExtra); + out.setf(std::ios::internal, std::ios::adjustfield); + out.fill('0'); + } + return c + 1; +} + +//------------------------------------------------------------------------------ +inline void formatImpl(std::ostream &out, + const char *fmt, + const detail::FormatArg *formatters, + int numFormatters) { + // Saved stream state + std::streamsize origWidth = out.width(); + std::streamsize origPrecision = out.precision(); + std::ios::fmtflags origFlags = out.flags(); + char origFill = out.fill(); + + for (int argIndex = 0; argIndex < numFormatters; ++argIndex) { + // Parse the format string + fmt = printFormatStringLiteral(out, fmt); + bool spacePadPositive = false; + int ntrunc = -1; + const char *fmtEnd = streamStateFromFormat(out, + spacePadPositive, + ntrunc, + fmt, + formatters, + argIndex, + numFormatters); + if (argIndex >= numFormatters) { + // Check args remain after reading any variable width/precision + TINYFORMAT_ERROR("tinyformat: Not enough format arguments"); + return; + } + const FormatArg &arg = formatters[argIndex]; + // Format the arg into the stream. + if (!spacePadPositive) + arg.format(out, fmt, fmtEnd, ntrunc); + else { + // The following is a special case with no direct correspondence + // between stream formatting and the printf() behaviour. Simulate + // it crudely by formatting into a temporary string stream and + // munging the resulting string. + std::ostringstream tmpStream; + tmpStream.copyfmt(out); + tmpStream.setf(std::ios::showpos); + arg.format(tmpStream, fmt, fmtEnd, ntrunc); + std::string result = tmpStream.str(); // allocates... yuck. + for (size_t i = 0, iend = result.size(); i < iend; ++i) + if (result[i] == '+') result[i] = ' '; + out << result; + } + fmt = fmtEnd; + } + + // Print remaining part of format string. + fmt = printFormatStringLiteral(out, fmt); + if (*fmt != '\0') + TINYFORMAT_ERROR( + "tinyformat: Too many conversion specifiers in format string"); + + // Restore stream state + out.width(origWidth); + out.precision(origPrecision); + out.flags(origFlags); + out.fill(origFill); +} + +} // namespace detail + +/// List of template arguments format(), held in a type-opaque way. +/// +/// A const reference to FormatList (typedef'd as FormatListRef) may be +/// conveniently used to pass arguments to non-template functions: All type +/// information has been stripped from the arguments, leaving just enough of a +/// common interface to perform formatting as required. +class FormatList { +public: + FormatList(detail::FormatArg *formatters, int N) + : m_formatters(formatters), m_N(N) {} + + friend void vformat(std::ostream &out, + const char *fmt, + const FormatList &list); + +private: + const detail::FormatArg *m_formatters; + int m_N; +}; + +/// Reference to type-opaque format list for passing to vformat() +typedef const FormatList &FormatListRef; + +namespace detail { + +// Format list subclass with fixed storage to avoid dynamic allocation +template +class FormatListN : public FormatList { +public: + template + FormatListN(const Args &... args) + : FormatList(&m_formatterStore[0], N), + m_formatterStore{FormatArg(args)...} { + static_assert(sizeof...(args) == N, "Number of args must be N"); + } + +private: + FormatArg m_formatterStore[N]; +}; + +// Special 0-arg version - MSVC says zero-sized C array in struct is nonstandard +template <> +class FormatListN<0> : public FormatList { +public: + FormatListN() : FormatList(0, 0) {} +}; + +} // namespace detail + +//------------------------------------------------------------------------------ +// Primary API functions + +/// Make type-agnostic format list from list of template arguments. +/// +/// The exact return type of this function is an implementation detail and +/// shouldn't be relied upon. Instead it should be stored as a FormatListRef: +/// +/// FormatListRef formatList = makeFormatList( /*...*/ ); +template +detail::FormatListN makeFormatList(const Args &... args) { + return detail::FormatListN(args...); +} + +/// Format list of arguments to the stream according to the given format string. +/// +/// The name vformat() is chosen for the semantic similarity to vprintf(): the +/// list of format arguments is held in a single function argument. +inline void vformat(std::ostream &out, const char *fmt, FormatListRef list) { + detail::formatImpl(out, fmt, list.m_formatters, list.m_N); +} + +/// Format list of arguments to the stream according to given format string. +template +void format(std::ostream &out, const char *fmt, const Args &... args) { + vformat(out, fmt, makeFormatList(args...)); +} + +/// Format list of arguments according to the given format string and return +/// the result as a string. +template +std::string format(const char *fmt, const Args &... args) { + std::ostringstream oss; + format(oss, fmt, args...); + return oss.str(); +} + +/// Format list of arguments to std::cout, according to the given format string +template +void printf(const char *fmt, const Args &... args) { + format(std::cout, fmt, args...); +} + +template +void printfln(const char *fmt, const Args &... args) { + format(std::cout, fmt, args...); + std::cout << '\n'; +} + +} // namespace tinyformat +} // namespace string +} // namespace paddle From 098f956c098f5980a231575ed663b4704b6df5c5 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Wed, 28 Jun 2017 17:36:32 -0700 Subject: [PATCH 38/43] Correct function implementation dependencies --- paddle/string/printf.h | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/paddle/string/printf.h b/paddle/string/printf.h index 0767f8f5b5..8b5ce63a8e 100644 --- a/paddle/string/printf.h +++ b/paddle/string/printf.h @@ -80,25 +80,19 @@ namespace string { template void Fprintf(std::ostream& out, const char* fmt, const Args&... args) { - tinyformat::vformat(out, fmt, makeFormatList(args...)); + tinyformat::vformat(out, fmt, tinyformat::makeFormatList(args...)); } template std::string Sprintf(const char* fmt, const Args&... args) { std::ostringstream oss; - tinyformat::format(oss, fmt, args...); + Fprintf(oss, fmt, args...); return oss.str(); } template -void printf(const char* fmt, const Args&... args) { - tinyformat::format(std::cout, fmt, args...); -} - -template -void printfln(const char* fmt, const Args&... args) { - tinyformat::format(std::cout, fmt, args...); - std::cout << '\n'; +void Printf(const char* fmt, const Args&... args) { + Fprintf(std::cout, fmt, args...); } } // namespace string From c5e8e27ba6783a947965900931ed41c9ef2123fb Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Thu, 29 Jun 2017 10:02:20 +0800 Subject: [PATCH 39/43] "change empty gradient to error" --- go/pserver/client.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/go/pserver/client.go b/go/pserver/client.go index a8d2d710d8..6938b9d5ce 100644 --- a/go/pserver/client.go +++ b/go/pserver/client.go @@ -1,6 +1,7 @@ package pserver import ( + "errors" "hash/fnv" "sort" "time" @@ -124,8 +125,7 @@ func (c *Client) FinishInitParams() error { // parameters. func (c *Client) SendGrads(grads []Gradient) error { if len(grads) == 0 { - log.Info("Send Empty Gradient") - return nil + return errors.New("no gradient received") } errCh := make(chan error, len(grads)) for _, g := range grads { From 11a8dfe78e1626a6535a1d0ba8220c2dd3fa050c Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 29 Jun 2017 10:57:40 +0800 Subject: [PATCH 40/43] Use Parameters.from_tar for static method. --- python/paddle/v2/parameters.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/v2/parameters.py b/python/paddle/v2/parameters.py index f730ea10bb..4c4ff4c7c2 100644 --- a/python/paddle/v2/parameters.py +++ b/python/paddle/v2/parameters.py @@ -300,7 +300,7 @@ class Parameters(object): return params def init_from_tar(self, f): - tar_param = self.from_tar(f) + tar_param = Parameters.from_tar(f) for pname in tar_param.names(): if pname in self.names(): self.set(pname, tar_param.get(pname)) From f0a3fb6e36e06512d537068ecd7c5f553a88da83 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 29 Jun 2017 11:10:59 +0800 Subject: [PATCH 41/43] Using paddle::string in enforce --- paddle/framework/CMakeLists.txt | 3 +- paddle/{platform => framework}/enforce.h | 63 +++---------------- .../{platform => framework}/enforce_test.cc | 9 ++- paddle/platform/CMakeLists.txt | 1 - 4 files changed, 14 insertions(+), 62 deletions(-) rename paddle/{platform => framework}/enforce.h (60%) rename paddle/{platform => framework}/enforce_test.cc (82%) diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index e3c3155aa9..b06ecc2628 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -1,6 +1,5 @@ cc_library(ddim SRCS ddim.cc) cc_test(ddim_test SRCS ddim_test.cc DEPS ddim) - nv_test(dim_test SRCS dim_test.cu DEPS ddim) - cc_test(variable_test SRCS variable_test.cc) +cc_test(enforce_test SRCS enforce_test.cc) diff --git a/paddle/platform/enforce.h b/paddle/framework/enforce.h similarity index 60% rename from paddle/platform/enforce.h rename to paddle/framework/enforce.h index fbd3405a24..56cb7f9564 100644 --- a/paddle/platform/enforce.h +++ b/paddle/framework/enforce.h @@ -10,11 +10,12 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include #include namespace paddle { -namespace platform { +namespace framework { /** * @brief Enforce exception. Inherits std::exception @@ -23,10 +24,9 @@ namespace platform { */ class EnforceNotMet : public std::exception { public: - EnforceNotMet(const std::string& msg, const char* file, int fileline) - : file_(file), fileline_(fileline) { + EnforceNotMet(const std::string& msg, const char* file, int fileline) { std::ostringstream sout; - sout << msg << " at [" << file_ << ":" << fileline_ << "];"; + sout << msg << " at [" << file << ":" << fileline << "];"; all_msg_ = sout.str(); } @@ -34,52 +34,8 @@ class EnforceNotMet : public std::exception { private: std::string all_msg_; - const char* file_; - int fileline_; }; -namespace details { - -inline void MakeStringInternal(std::ostringstream& stream) {} - -template -inline void MakeStringInternal(std::ostringstream& stream, T v) { - stream << v; -} - -template -inline void MakeStringInternal(std::ostringstream& stream, T v, ARGS... args) { - MakeStringInternal(stream, v); - MakeStringInternal(stream, args...); -}; - -/** - * @brief Make string will concat all args into a string. - */ -template -inline std::string MakeString(ARGS... args) { - std::ostringstream sout; - details::MakeStringInternal(sout, args...); - return sout.str(); -} - -/** - * @brief special handle string - */ -template <> -inline std::string MakeString(std::string str) { - return str; -} - -/** - * @brief special handle const char* - */ -template <> -inline std::string MakeString(const char* str) { - return std::string(str); -} -} // namespace details - // From https://stackoverflow.com/questions/30130930/ // __buildin_expect is in C++ 11 standard. Since the condition which enforced // should be true in most situation, it will make the compiler generate faster @@ -93,11 +49,10 @@ inline std::string MakeString(const char* str) { * This macro take __VA_ARGS__, user can pass any type if that type can * serialize to std::ostream */ -#define PADDLE_THROW(...) \ - do { \ - throw ::paddle::platform::EnforceNotMet( \ - ::paddle::platform::details::MakeString(__VA_ARGS__), __FILE__, \ - __LINE__); \ +#define PADDLE_THROW(...) \ + do { \ + throw ::paddle::framework::EnforceNotMet( \ + ::paddle::string::Sprintf(__VA_ARGS__), __FILE__, __LINE__); \ } while (0) /** @@ -110,5 +65,5 @@ inline std::string MakeString(const char* str) { } \ } while (0) -} // namespace platform +} // namespace framework } // namespace paddle diff --git a/paddle/platform/enforce_test.cc b/paddle/framework/enforce_test.cc similarity index 82% rename from paddle/platform/enforce_test.cc rename to paddle/framework/enforce_test.cc index 23b32444ad..f8da1a192f 100644 --- a/paddle/platform/enforce_test.cc +++ b/paddle/framework/enforce_test.cc @@ -10,10 +10,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include +#include TEST(ENFORCE, OK) { - PADDLE_ENFORCE(true, "Enforce is ok", 123, "now", 0.345); + PADDLE_ENFORCE(true, "Enforce is ok %d now %f", 123, 0.345); size_t val = 1; const size_t limit = 10; PADDLE_ENFORCE(val < limit, "Enforce is OK too"); @@ -22,8 +22,8 @@ TEST(ENFORCE, OK) { TEST(ENFORCE, FAILED) { bool in_catch = false; try { - PADDLE_ENFORCE(false, "Enforce is not ok ", 123, " at all"); - } catch (paddle::platform::EnforceNotMet err) { + PADDLE_ENFORCE(false, "Enforce is not ok %d at all", 123); + } catch (paddle::framework::EnforceNotMet err) { in_catch = true; std::string msg = "Enforce is not ok 123 at all"; const char* what = err.what(); @@ -31,6 +31,5 @@ TEST(ENFORCE, FAILED) { ASSERT_EQ(what[i], msg[i]); } } - ASSERT_TRUE(in_catch); } \ No newline at end of file diff --git a/paddle/platform/CMakeLists.txt b/paddle/platform/CMakeLists.txt index bc72e62be4..c7d7b14518 100644 --- a/paddle/platform/CMakeLists.txt +++ b/paddle/platform/CMakeLists.txt @@ -2,4 +2,3 @@ nv_test(cuda_test SRCS cuda_test.cu) cc_library(place SRCS place.cc) cc_test(place_test SRCS place_test.cc DEPS place glog gflags) -cc_test(enforce_test SRCS enforce_test.cc) From 23d6c594eca369820b5f4dfcd0a38a9f4cd6122e Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 29 Jun 2017 12:33:07 +0800 Subject: [PATCH 42/43] add comments --- python/paddle/v2/parameters.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/python/paddle/v2/parameters.py b/python/paddle/v2/parameters.py index 4c4ff4c7c2..bbaf8bfa97 100644 --- a/python/paddle/v2/parameters.py +++ b/python/paddle/v2/parameters.py @@ -284,6 +284,18 @@ class Parameters(object): @staticmethod def from_tar(f): + """ + Create a `Parameters` object from the given file. And + the `Parameters` only contains the parameters in this + file. It is adapted the parameters are same in the + defined network and the given file. For example, it + can be used in the inference. + + :param f: the initialized model file. + :type f: tar file + :return: A Parameters object. + :rtype: Parameters. + """ params = Parameters() tar = tarfile.TarFile(fileobj=f, mode='r') for finfo in tar: @@ -300,6 +312,15 @@ class Parameters(object): return params def init_from_tar(self, f): + """ + Different from `from_tar`, this interface can be used to + init partial network parameters from another saved model. + + :param f: the initialized model file. + :type f: tar file + :return: Nothing. + """ + tar_param = Parameters.from_tar(f) for pname in tar_param.names(): if pname in self.names(): From 9af8d86b7ceedbc244873ee5207392231bab540a Mon Sep 17 00:00:00 2001 From: Yancey Date: Thu, 29 Jun 2017 13:20:13 +0800 Subject: [PATCH 43/43] Trainer library discover master by etcd (#2551) * add trainer library * modifty file name * move trainer to master client * update * update * modify monitor master to receive a chan * update * use etcd client from etcd_client.go * update * update * remove etcd client without lock * update * update the comment * update commonts --- go/master/c/client.go | 30 +++++++++++++++++++++++++----- go/master/client.go | 24 ++++-------------------- go/master/client_internal_test.go | 13 ++++--------- go/master/client_test.go | 8 +++----- go/master/etcd_client.go | 28 ++++++++++++++++++++++++++++ 5 files changed, 64 insertions(+), 39 deletions(-) diff --git a/go/master/c/client.go b/go/master/c/client.go index b186474dc3..9e35e98600 100644 --- a/go/master/c/client.go +++ b/go/master/c/client.go @@ -13,10 +13,13 @@ typedef int paddle_master_client; import "C" import ( + "strings" "sync" + "time" "unsafe" "github.com/PaddlePaddle/Paddle/go/master" + "github.com/coreos/etcd/clientv3" log "github.com/sirupsen/logrus" ) @@ -48,16 +51,33 @@ func remove(client C.paddle_master_client) *master.Client { return h } -type addresser string - -func (a addresser) Address() string { - return string(a) +//export paddle_new_etcd_master_client +func paddle_new_etcd_master_client(etcdEndpoints *C.char, timeout int, bufSize int) C.paddle_master_client { + p := C.GoString(etcdEndpoints) + cli, err := clientv3.New(clientv3.Config{ + Endpoints: strings.Split(p, ","), + DialTimeout: time.Second * time.Duration(timeout), + }) + if err != nil { + panic(err) + } + ch := make(chan string, 1) + a, err := master.GetKey(cli, master.DefaultAddrPath, timeout) + if err != nil { + panic(err) + } + ch <- a + go master.WatchKey(cli, master.DefaultAddrPath, ch) + c := master.NewClient(ch, bufSize) + return add(c) } //export paddle_new_master_client func paddle_new_master_client(addr *C.char, bufSize int) C.paddle_master_client { a := C.GoString(addr) - c := master.NewClient(addresser(a), bufSize) + ch := make(chan string, 1) + ch <- a + c := master.NewClient(ch, bufSize) return add(c) } diff --git a/go/master/client.go b/go/master/client.go index 8451820c19..d3bea49d0a 100644 --- a/go/master/client.go +++ b/go/master/client.go @@ -2,18 +2,12 @@ package master import ( "os" - "time" "github.com/PaddlePaddle/Paddle/go/connection" "github.com/PaddlePaddle/recordio" log "github.com/sirupsen/logrus" ) -// Addresser provide the address of the master server. -type Addresser interface { - Address() string -} - // Client is the client of the master server. type Client struct { conn *connection.Conn @@ -24,11 +18,11 @@ type Client struct { // // bufSize is the record buffer size. NextRecord will read from this // buffer. -func NewClient(addr Addresser, bufSize int) *Client { +func NewClient(addrCh <-chan string, bufSize int) *Client { c := &Client{} c.conn = connection.New() c.ch = make(chan []byte, bufSize) - go c.monitorMaster(addr) + go c.monitorMaster(addrCh) go c.getRecords() return c } @@ -72,12 +66,10 @@ func (c *Client) getRecords() { } } -func (c *Client) monitorMaster(addr Addresser) { +func (c *Client) monitorMaster(addrCh <-chan string) { lastMaster := "" - monitor := func() { - // get the lastest address of the master server, + for curMaster := range addrCh { // connect to the new address once address changed. - curMaster := addr.Address() if curMaster != lastMaster { if curMaster == "" { err := c.conn.Close() @@ -94,18 +86,10 @@ func (c *Client) monitorMaster(addr Addresser) { // to retry next time. curMaster = lastMaster } - } } - lastMaster = curMaster } - - monitor() - ticker := time.NewTicker(10 * time.Second) - for _ = range ticker.C { - monitor() - } } // SetDataset set dataset for the master server to dispatch. diff --git a/go/master/client_internal_test.go b/go/master/client_internal_test.go index 251225780a..364dce7b58 100644 --- a/go/master/client_internal_test.go +++ b/go/master/client_internal_test.go @@ -26,12 +26,6 @@ func init() { log.SetLevel(log.ErrorLevel) } -type TestAddresser string - -func (a TestAddresser) Address() string { - return string(a) -} - func TestGetFinishTask(t *testing.T) { const path = "/tmp/master_client_test_0" @@ -45,7 +39,6 @@ func TestGetFinishTask(t *testing.T) { if err != nil { panic(err) } - go func(l net.Listener) { s, err := NewService(&InMemStore{}, chunkPerTask, time.Second, 1) if err != nil { @@ -82,9 +75,11 @@ func TestGetFinishTask(t *testing.T) { // Manually intialize client to avoid calling c.getRecords() c := &Client{} c.conn = connection.New() - go c.monitorMaster(TestAddresser(fmt.Sprintf(":%d", p))) + addr := fmt.Sprintf(":%d", p) + ch := make(chan string, 1) + ch <- addr + go c.monitorMaster(ch) c.SetDataset([]string{path}) - checkOnePass := func(i int) { var tasks []Task for idx := 0; idx < totalTask; idx++ { diff --git a/go/master/client_test.go b/go/master/client_test.go index 85a86761c2..c00aeebfd5 100644 --- a/go/master/client_test.go +++ b/go/master/client_test.go @@ -20,7 +20,6 @@ func TestNextRecord(t *testing.T) { path = "/tmp/master_client_TestFull" total = 50 ) - l, err := net.Listen("tcp", ":0") if err != nil { panic(err) @@ -31,7 +30,6 @@ func TestNextRecord(t *testing.T) { if err != nil { panic(err) } - go func(l net.Listener) { s, err := master.NewService(&master.InMemStore{}, 10, time.Second, 1) if err != nil { @@ -63,10 +61,10 @@ func TestNextRecord(t *testing.T) { } w.Close() f.Close() - - c := master.NewClient(master.TestAddresser(fmt.Sprintf(":%d", p)), 10) + curAddr := make(chan string, 1) + curAddr <- fmt.Sprintf(":%d", p) + c := master.NewClient(curAddr, 10) c.SetDataset([]string{path}) - for pass := 0; pass < 50; pass++ { received := make(map[byte]bool) for i := 0; i < total; i++ { diff --git a/go/master/etcd_client.go b/go/master/etcd_client.go index f7b4638577..e27c014792 100644 --- a/go/master/etcd_client.go +++ b/go/master/etcd_client.go @@ -142,3 +142,31 @@ func (e *EtcdClient) Load() ([]byte, error) { state := kvs[0].Value return state, nil } + +// GetKey gets the value by the specify key. +func GetKey(c *clientv3.Client, key string, timeout int) (string, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(timeout)) + resp, err := c.Get(ctx, key) + cancel() + if err != nil { + return "", err + } + kvs := resp.Kvs + if len(kvs) == 0 { + return "", nil + } + v := kvs[0].Value + return string(v), nil +} + +// WatchKey watches the specify key and send to valChan if there is some event. +func WatchKey(c *clientv3.Client, key string, valChan chan<- string) { + rch := c.Watch(context.Background(), key) + for wresp := range rch { + for _, ev := range wresp.Events { + // if received event is DELETE, the value will be an empty string + log.Infof("received event %s, %q : %q\n", ev.Type, ev.Kv.Key, ev.Kv.Value) + valChan <- string(ev.Kv.Value) + } + } +}