|
|
|
@ -15,6 +15,7 @@ limitations under the License. */
|
|
|
|
|
#include "paddle/utils/Locks.h"
|
|
|
|
|
#include <semaphore.h>
|
|
|
|
|
#include <unistd.h>
|
|
|
|
|
#include "paddle/utils/Logging.h"
|
|
|
|
|
|
|
|
|
|
namespace paddle {
|
|
|
|
|
class SemaphorePrivate {
|
|
|
|
@ -26,7 +27,10 @@ Semaphore::Semaphore(int initValue) : m(new SemaphorePrivate()) {
|
|
|
|
|
sem_init(&m->sem, 0, initValue);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Semaphore::~Semaphore() { sem_destroy(&m->sem); }
|
|
|
|
|
Semaphore::~Semaphore() {
|
|
|
|
|
sem_destroy(&m->sem);
|
|
|
|
|
delete m;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool Semaphore::timeWait(struct timespec* ts) {
|
|
|
|
|
return (0 == sem_timedwait(&m->sem, ts));
|
|
|
|
@ -36,70 +40,101 @@ void Semaphore::wait() { sem_wait(&m->sem); }
|
|
|
|
|
|
|
|
|
|
void Semaphore::post() { sem_post(&m->sem); }
|
|
|
|
|
|
|
|
|
|
#ifdef PADDLE_USE_PTHREAD_SPINLOCK
|
|
|
|
|
|
|
|
|
|
class SpinLockPrivate {
|
|
|
|
|
public:
|
|
|
|
|
inline SpinLockPrivate() {
|
|
|
|
|
#ifndef __ANDROID__
|
|
|
|
|
pthread_spin_init(&lock_, 0);
|
|
|
|
|
#else
|
|
|
|
|
lock_ = 0;
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
inline ~SpinLockPrivate() {
|
|
|
|
|
#ifndef __ANDROID__
|
|
|
|
|
pthread_spin_destroy(&lock_);
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
#ifndef __ANDROID__
|
|
|
|
|
inline SpinLockPrivate() { pthread_spin_init(&lock_, 0); }
|
|
|
|
|
inline ~SpinLockPrivate() { pthread_spin_destroy(&lock_); }
|
|
|
|
|
|
|
|
|
|
inline void lock() { pthread_spin_lock(&lock_); }
|
|
|
|
|
inline void unlock() { pthread_spin_unlock(&lock_); }
|
|
|
|
|
|
|
|
|
|
pthread_spinlock_t lock_;
|
|
|
|
|
#else
|
|
|
|
|
unsigned long lock_;
|
|
|
|
|
#endif
|
|
|
|
|
char padding_[64 - sizeof(lock_)];
|
|
|
|
|
char padding_[64 - sizeof(pthread_spinlock_t)];
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
SpinLock::SpinLock() : m(new SpinLockPrivate()) {}
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
|
|
SpinLock::~SpinLock() { delete m; }
|
|
|
|
|
#include <atomic>
|
|
|
|
|
class SpinLockPrivate {
|
|
|
|
|
public:
|
|
|
|
|
inline void lock() {
|
|
|
|
|
while (lock_.test_and_set(std::memory_order_acquire)) {
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
inline void unlock() { lock_.clear(std::memory_order_release); }
|
|
|
|
|
|
|
|
|
|
void SpinLock::lock() {
|
|
|
|
|
#ifndef __ANDROID__
|
|
|
|
|
pthread_spin_lock(&m->lock_);
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
std::atomic_flag lock_ = ATOMIC_FLAG_INIT;
|
|
|
|
|
char padding_[64 - sizeof(lock_)]; // Padding to cache line size
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
void SpinLock::unlock() {
|
|
|
|
|
#ifndef __ANDROID__
|
|
|
|
|
pthread_spin_unlock(&m->lock_);
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
SpinLock::SpinLock() : m(new SpinLockPrivate()) {}
|
|
|
|
|
SpinLock::~SpinLock() { delete m; }
|
|
|
|
|
void SpinLock::lock() { m->lock(); }
|
|
|
|
|
void SpinLock::unlock() { m->unlock(); }
|
|
|
|
|
|
|
|
|
|
#ifdef PADDLE_USE_PTHREAD_BARRIER
|
|
|
|
|
|
|
|
|
|
class ThreadBarrierPrivate {
|
|
|
|
|
public:
|
|
|
|
|
#ifndef __ANDROID__
|
|
|
|
|
pthread_barrier_t barrier_;
|
|
|
|
|
#else
|
|
|
|
|
unsigned long barrier_;
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
inline explicit ThreadBarrierPrivate(int count) {
|
|
|
|
|
pthread_barrier_init(&barrier_, nullptr, count);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
inline ~ThreadBarrierPrivate() { pthread_barrier_destroy(&barrier_); }
|
|
|
|
|
|
|
|
|
|
inline void wait() { pthread_barrier_wait(&barrier_); }
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
ThreadBarrier::ThreadBarrier(int count) : m(new ThreadBarrierPrivate()) {
|
|
|
|
|
#ifndef __ANDROID__
|
|
|
|
|
pthread_barrier_init(&m->barrier_, nullptr, count);
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
|
|
ThreadBarrier::~ThreadBarrier() {
|
|
|
|
|
#ifndef __ANDROID__
|
|
|
|
|
pthread_barrier_destroy(&m->barrier_);
|
|
|
|
|
#endif
|
|
|
|
|
delete m;
|
|
|
|
|
}
|
|
|
|
|
class ThreadBarrierPrivate {
|
|
|
|
|
public:
|
|
|
|
|
pthread_mutex_t mutex_;
|
|
|
|
|
pthread_cond_t cond_;
|
|
|
|
|
int count_;
|
|
|
|
|
int tripCount_;
|
|
|
|
|
|
|
|
|
|
inline explicit ThreadBarrierPrivate(int cnt) : count_(0), tripCount_(cnt) {
|
|
|
|
|
CHECK_NE(cnt, 0);
|
|
|
|
|
CHECK_GE(pthread_mutex_init(&mutex_, 0), 0);
|
|
|
|
|
CHECK_GE(pthread_cond_init(&cond_, 0), 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
inline ~ThreadBarrierPrivate() {
|
|
|
|
|
pthread_cond_destroy(&cond_);
|
|
|
|
|
pthread_mutex_destroy(&mutex_);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* @brief wait
|
|
|
|
|
* @return true if the last wait
|
|
|
|
|
*/
|
|
|
|
|
inline bool wait() {
|
|
|
|
|
pthread_mutex_lock(&mutex_);
|
|
|
|
|
++count_;
|
|
|
|
|
if (count_ >= tripCount_) {
|
|
|
|
|
count_ = 0;
|
|
|
|
|
pthread_cond_broadcast(&cond_);
|
|
|
|
|
pthread_mutex_unlock(&mutex_);
|
|
|
|
|
return true;
|
|
|
|
|
} else {
|
|
|
|
|
pthread_cond_wait(&cond_, &mutex_);
|
|
|
|
|
pthread_mutex_unlock(&mutex_);
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
void ThreadBarrier::wait() {
|
|
|
|
|
#ifndef __ANDROID__
|
|
|
|
|
pthread_barrier_wait(&m->barrier_);
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ThreadBarrier::ThreadBarrier(int count) : m(new ThreadBarrierPrivate(count)) {}
|
|
|
|
|
ThreadBarrier::~ThreadBarrier() { delete m; }
|
|
|
|
|
void ThreadBarrier::wait() { m->wait(); }
|
|
|
|
|
|
|
|
|
|
} // namespace paddle
|
|
|
|
|