commit
c58ce0a687
@ -0,0 +1,229 @@
|
||||
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
#include <stddef.h> // for size_t
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
#include <deque>
|
||||
#include "paddle/fluid/framework/channel.h"
|
||||
#include "paddle/fluid/platform/enforce.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
|
||||
template <typename T>
|
||||
class ChannelImpl : public paddle::framework::Channel<T> {
|
||||
friend Channel<T> *paddle::framework::MakeChannel<T>(size_t);
|
||||
friend void paddle::framework::CloseChannel<T>(Channel<T> *);
|
||||
|
||||
public:
|
||||
virtual bool Send(T *);
|
||||
virtual bool Receive(T *);
|
||||
virtual size_t Cap() { return cap_; }
|
||||
virtual void Lock();
|
||||
virtual void Unlock();
|
||||
virtual void Close();
|
||||
|
||||
ChannelImpl(size_t);
|
||||
virtual ~ChannelImpl();
|
||||
|
||||
private:
|
||||
struct QueueMessage {
|
||||
T *data;
|
||||
std::condition_variable_any cond;
|
||||
bool chan_closed = false;
|
||||
bool completed = false;
|
||||
|
||||
QueueMessage(T *item) : data(item) {}
|
||||
|
||||
void Wait(std::unique_lock<std::recursive_mutex> &lock) {
|
||||
cond.wait(lock, [this]() { return completed; });
|
||||
}
|
||||
|
||||
void Notify() {
|
||||
completed = true;
|
||||
cond.notify_all();
|
||||
}
|
||||
};
|
||||
|
||||
bool send_return(bool value) {
|
||||
send_ctr--;
|
||||
destructor_cond_.notify_all();
|
||||
return value;
|
||||
}
|
||||
|
||||
bool recv_return(bool value) {
|
||||
recv_ctr--;
|
||||
destructor_cond_.notify_all();
|
||||
return value;
|
||||
}
|
||||
|
||||
size_t cap_;
|
||||
std::recursive_mutex mu_;
|
||||
bool closed_;
|
||||
std::deque<T> buf_;
|
||||
std::deque<std::shared_ptr<QueueMessage>> recvq;
|
||||
std::deque<std::shared_ptr<QueueMessage>> sendq;
|
||||
std::atomic<unsigned> send_ctr{0};
|
||||
std::atomic<unsigned> recv_ctr{0};
|
||||
std::condition_variable_any destructor_cond_;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
ChannelImpl<T>::ChannelImpl(size_t capacity)
|
||||
: cap_(capacity), closed_(false), send_ctr(0), recv_ctr(0) {
|
||||
PADDLE_ENFORCE_GE(capacity, 0);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool ChannelImpl<T>::Send(T *item) {
|
||||
send_ctr++;
|
||||
std::unique_lock<std::recursive_mutex> lock{mu_};
|
||||
|
||||
// If channel is closed, do nothing
|
||||
if (closed_) {
|
||||
lock.unlock();
|
||||
// TODO(abhinavarora) Should panic on closed channel
|
||||
return send_return(false);
|
||||
}
|
||||
|
||||
// If there is a receiver, directly pass the value we want
|
||||
// to send to the receiver, bypassing the channel buffer if any
|
||||
if (!recvq.empty()) {
|
||||
std::shared_ptr<QueueMessage> m = recvq.front();
|
||||
recvq.pop_front();
|
||||
// Do the data transfer
|
||||
*(m->data) = std::move(*item);
|
||||
// Wake up the blocked process and unlock
|
||||
m->Notify();
|
||||
lock.unlock();
|
||||
return send_return(true);
|
||||
}
|
||||
|
||||
// Unbuffered channel will always bypass this
|
||||
// If buffered channel has space in buffer,
|
||||
// write the element to the buffer.
|
||||
if (buf_.size() < cap_) {
|
||||
// Copy to buffer
|
||||
buf_.push_back(std::move(*item));
|
||||
// Release lock and return true
|
||||
lock.unlock();
|
||||
return send_return(true);
|
||||
}
|
||||
|
||||
// Block on channel, because some receiver will complete
|
||||
// the operation for us
|
||||
auto m = std::make_shared<QueueMessage>(item);
|
||||
sendq.push_back(m);
|
||||
m->Wait(lock);
|
||||
// TODO(abhinavarora) Should panic on closed channel
|
||||
return send_return(!m->chan_closed);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool ChannelImpl<T>::Receive(T *item) {
|
||||
recv_ctr++;
|
||||
std::unique_lock<std::recursive_mutex> lock{mu_};
|
||||
|
||||
// If channel is closed and buffer is empty or
|
||||
// channel is unbuffered
|
||||
if (closed_ && buf_.empty()) {
|
||||
lock.unlock();
|
||||
return recv_return(false);
|
||||
}
|
||||
|
||||
// If there is a sender, directly receive the value we want
|
||||
// from the sender, bypassing the channel buffer if any
|
||||
if (!sendq.empty()) {
|
||||
std::shared_ptr<QueueMessage> m = sendq.front();
|
||||
sendq.pop_front();
|
||||
// Do the data transfer
|
||||
*item = std::move(*(m->data));
|
||||
// Wake up the blocked process and unlock
|
||||
m->Notify();
|
||||
lock.unlock();
|
||||
return recv_return(true);
|
||||
}
|
||||
|
||||
// If this is a buffered channel and there are items in buffer
|
||||
if (buf_.size() > 0) {
|
||||
// Directly read from buffer
|
||||
*item = std::move(buf_.front());
|
||||
buf_.pop_front();
|
||||
// Release lock and return true
|
||||
lock.unlock();
|
||||
return recv_return(true);
|
||||
}
|
||||
|
||||
// No sender available, block on this channel
|
||||
// Some receiver will complete the option for us
|
||||
auto m = std::make_shared<QueueMessage>(item);
|
||||
recvq.push_back(m);
|
||||
m->Wait(lock);
|
||||
|
||||
return recv_return(!m->chan_closed);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void ChannelImpl<T>::Lock() {
|
||||
mu_.lock();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void ChannelImpl<T>::Unlock() {
|
||||
mu_.unlock();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void ChannelImpl<T>::Close() {
|
||||
std::unique_lock<std::recursive_mutex> lock{mu_};
|
||||
|
||||
if (closed_) {
|
||||
// TODO(abhinavarora): closing an already closed channel should panic
|
||||
lock.unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
closed_ = true;
|
||||
|
||||
// Empty the readers
|
||||
while (!recvq.empty()) {
|
||||
std::shared_ptr<QueueMessage> m = recvq.front();
|
||||
recvq.pop_front();
|
||||
m->chan_closed = true;
|
||||
m->Notify();
|
||||
}
|
||||
|
||||
// Empty the senders
|
||||
while (!sendq.empty()) {
|
||||
std::shared_ptr<QueueMessage> m = sendq.front();
|
||||
sendq.pop_front();
|
||||
m->chan_closed = true;
|
||||
m->Notify();
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ChannelImpl<T>::~ChannelImpl() {
|
||||
Close();
|
||||
// The destructor must wait for all readers and writers to complete their task
|
||||
// The channel has been closed, so we will not accept new readers and writers
|
||||
std::unique_lock<std::recursive_mutex> lock{mu_};
|
||||
destructor_cond_.wait(lock,
|
||||
[this]() { return send_ctr == 0 && recv_ctr == 0; });
|
||||
}
|
||||
|
||||
} // namespace framework
|
||||
} // namespace paddle
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1 @@
|
||||
data_type_transform.cc
|
@ -0,0 +1,215 @@
|
||||
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/fluid/framework/data_type_transform.h"
|
||||
#include "paddle/fluid/framework/tensor_util.h"
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
TEST(DataTypeTransform, GPUTransform) {
|
||||
using namespace paddle::framework;
|
||||
using namespace paddle::platform;
|
||||
|
||||
auto cpu_place = CPUPlace();
|
||||
auto gpu_place = CUDAPlace(0);
|
||||
CUDADeviceContext context(gpu_place);
|
||||
|
||||
auto kernel_fp16 = OpKernelType(proto::VarType::FP16, gpu_place,
|
||||
DataLayout::kAnyLayout, LibraryType::kPlain);
|
||||
auto kernel_fp32 = OpKernelType(proto::VarType::FP32, gpu_place,
|
||||
DataLayout::kAnyLayout, LibraryType::kPlain);
|
||||
auto kernel_fp64 = OpKernelType(proto::VarType::FP64, gpu_place,
|
||||
DataLayout::kAnyLayout, LibraryType::kPlain);
|
||||
auto kernel_int32 = OpKernelType(proto::VarType::INT32, gpu_place,
|
||||
DataLayout::kAnyLayout, LibraryType::kPlain);
|
||||
auto kernel_int64 = OpKernelType(proto::VarType::INT64, gpu_place,
|
||||
DataLayout::kAnyLayout, LibraryType::kPlain);
|
||||
auto kernel_bool = OpKernelType(proto::VarType::BOOL, gpu_place,
|
||||
DataLayout::kAnyLayout, LibraryType::kPlain);
|
||||
|
||||
// data type transform from float32
|
||||
{
|
||||
Tensor in;
|
||||
Tensor in_gpu;
|
||||
Tensor out_gpu;
|
||||
Tensor out;
|
||||
|
||||
float* in_ptr = in.mutable_data<float>(make_ddim({2, 3}), cpu_place);
|
||||
float arr[6] = {0, 1, 2, 3, 4, 5};
|
||||
int data_number = sizeof(arr) / sizeof(arr[0]);
|
||||
memcpy(in_ptr, arr, sizeof(arr));
|
||||
TensorCopy(in, gpu_place, context, &in_gpu);
|
||||
|
||||
TransDataType(kernel_fp32, kernel_fp64, in_gpu, &out_gpu);
|
||||
TensorCopy(out_gpu, cpu_place, context, &out);
|
||||
context.Wait();
|
||||
|
||||
double* out_data_double = out.data<double>();
|
||||
for (int i = 0; i < data_number; ++i) {
|
||||
ASSERT_EQ(out_data_double[i], static_cast<double>(arr[i]));
|
||||
}
|
||||
|
||||
TransDataType(kernel_fp32, kernel_int32, in_gpu, &out_gpu);
|
||||
TensorCopy(out_gpu, cpu_place, context, &out);
|
||||
context.Wait();
|
||||
|
||||
int* out_data_int = out.data<int>();
|
||||
for (int i = 0; i < data_number; ++i) {
|
||||
ASSERT_EQ(out_data_int[i], static_cast<int>(arr[i]));
|
||||
}
|
||||
}
|
||||
|
||||
// data type transform from/to float16
|
||||
{
|
||||
Tensor in;
|
||||
Tensor in_gpu;
|
||||
Tensor out_gpu;
|
||||
Tensor out;
|
||||
|
||||
float16* ptr = in.mutable_data<float16>(make_ddim({2, 3}), cpu_place);
|
||||
float16 arr[6] = {float16(0), float16(1), float16(2),
|
||||
float16(3), float16(4), float16(5)};
|
||||
int data_number = sizeof(arr) / sizeof(arr[0]);
|
||||
memcpy(ptr, arr, sizeof(arr));
|
||||
TensorCopy(in, gpu_place, context, &in_gpu);
|
||||
|
||||
// transform from float16 to other data types
|
||||
TransDataType(kernel_fp16, kernel_fp32, in_gpu, &out_gpu);
|
||||
TensorCopy(out_gpu, cpu_place, context, &out);
|
||||
context.Wait();
|
||||
|
||||
float* out_data_float = out.data<float>();
|
||||
for (int i = 0; i < data_number; ++i) {
|
||||
ASSERT_EQ(out_data_float[i], static_cast<float>(ptr[i]));
|
||||
}
|
||||
|
||||
TransDataType(kernel_fp16, kernel_fp64, in_gpu, &out_gpu);
|
||||
TensorCopy(out_gpu, cpu_place, context, &out);
|
||||
context.Wait();
|
||||
|
||||
double* out_data_double = out.data<double>();
|
||||
for (int i = 0; i < data_number; ++i) {
|
||||
ASSERT_EQ(out_data_double[i], static_cast<double>(ptr[i]));
|
||||
}
|
||||
|
||||
TransDataType(kernel_fp16, kernel_int32, in_gpu, &out_gpu);
|
||||
TensorCopy(out_gpu, cpu_place, context, &out);
|
||||
context.Wait();
|
||||
|
||||
int* out_data_int = out.data<int>();
|
||||
for (int i = 0; i < data_number; ++i) {
|
||||
ASSERT_EQ(out_data_int[i], static_cast<int>(ptr[i]));
|
||||
}
|
||||
|
||||
TransDataType(kernel_fp16, kernel_int64, in_gpu, &out_gpu);
|
||||
TensorCopy(out_gpu, cpu_place, context, &out);
|
||||
context.Wait();
|
||||
|
||||
int64_t* out_data_int64 = out.data<int64_t>();
|
||||
for (int i = 0; i < data_number; ++i) {
|
||||
ASSERT_EQ(out_data_int64[i], static_cast<int64_t>(ptr[i]));
|
||||
}
|
||||
|
||||
TransDataType(kernel_fp16, kernel_bool, in_gpu, &out_gpu);
|
||||
TensorCopy(out_gpu, cpu_place, context, &out);
|
||||
context.Wait();
|
||||
|
||||
bool* out_data_bool = out.data<bool>();
|
||||
for (int i = 0; i < data_number; ++i) {
|
||||
ASSERT_EQ(out_data_bool[i], static_cast<bool>(ptr[i]));
|
||||
}
|
||||
|
||||
// transform float to float16
|
||||
float* in_data_float = in.mutable_data<float>(make_ddim({2, 3}), cpu_place);
|
||||
for (int i = 0; i < data_number; ++i) {
|
||||
in_data_float[i] = i;
|
||||
}
|
||||
|
||||
TensorCopy(in, gpu_place, context, &in_gpu);
|
||||
TransDataType(kernel_fp32, kernel_fp16, in_gpu, &out_gpu);
|
||||
TensorCopy(out_gpu, cpu_place, context, &out);
|
||||
context.Wait();
|
||||
|
||||
ptr = out.data<float16>();
|
||||
for (int i = 0; i < data_number; ++i) {
|
||||
ASSERT_EQ(ptr[i].x, static_cast<float16>(in_data_float[i]).x);
|
||||
}
|
||||
|
||||
// transform double to float16
|
||||
double* in_data_double =
|
||||
in.mutable_data<double>(make_ddim({2, 3}), cpu_place);
|
||||
for (int i = 0; i < data_number; ++i) {
|
||||
in_data_double[i] = i;
|
||||
}
|
||||
|
||||
TensorCopy(in, gpu_place, context, &in_gpu);
|
||||
TransDataType(kernel_fp64, kernel_fp16, in_gpu, &out_gpu);
|
||||
TensorCopy(out_gpu, cpu_place, context, &out);
|
||||
context.Wait();
|
||||
|
||||
ptr = out.data<float16>();
|
||||
for (int i = 0; i < data_number; ++i) {
|
||||
ASSERT_EQ(ptr[i].x, static_cast<float16>(in_data_double[i]).x);
|
||||
}
|
||||
|
||||
// transform int to float16
|
||||
int* in_data_int = in.mutable_data<int>(make_ddim({2, 3}), cpu_place);
|
||||
for (int i = 0; i < data_number; ++i) {
|
||||
in_data_int[i] = i;
|
||||
}
|
||||
|
||||
TensorCopy(in, gpu_place, context, &in_gpu);
|
||||
TransDataType(kernel_int32, kernel_fp16, in_gpu, &out_gpu);
|
||||
TensorCopy(out_gpu, cpu_place, context, &out);
|
||||
context.Wait();
|
||||
|
||||
ptr = out.data<float16>();
|
||||
for (int i = 0; i < data_number; ++i) {
|
||||
ASSERT_EQ(ptr[i].x, static_cast<float16>(in_data_int[i]).x);
|
||||
}
|
||||
|
||||
// transform int64 to float16
|
||||
int64_t* in_data_int64 =
|
||||
in.mutable_data<int64_t>(make_ddim({2, 3}), cpu_place);
|
||||
for (int i = 0; i < data_number; ++i) {
|
||||
in_data_int64[i] = i;
|
||||
}
|
||||
|
||||
TensorCopy(in, gpu_place, context, &in_gpu);
|
||||
TransDataType(kernel_int64, kernel_fp16, in_gpu, &out_gpu);
|
||||
TensorCopy(out_gpu, cpu_place, context, &out);
|
||||
context.Wait();
|
||||
|
||||
ptr = out.data<float16>();
|
||||
for (int i = 0; i < data_number; ++i) {
|
||||
ASSERT_EQ(ptr[i].x, static_cast<float16>(in_data_int64[i]).x);
|
||||
}
|
||||
|
||||
// transform bool to float16
|
||||
bool* in_data_bool = in.mutable_data<bool>(make_ddim({2, 3}), cpu_place);
|
||||
for (int i = 0; i < data_number; ++i) {
|
||||
in_data_bool[i] = i;
|
||||
}
|
||||
|
||||
TensorCopy(in, gpu_place, context, &in_gpu);
|
||||
TransDataType(kernel_bool, kernel_fp16, in_gpu, &out_gpu);
|
||||
TensorCopy(out_gpu, cpu_place, context, &out);
|
||||
context.Wait();
|
||||
|
||||
ptr = out.data<float16>();
|
||||
for (int i = 0; i < data_number; ++i) {
|
||||
ASSERT_EQ(ptr[i].x, static_cast<float16>(in_data_bool[i]).x);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,142 +0,0 @@
|
||||
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
#include <deque>
|
||||
#include <mutex>
|
||||
|
||||
#include "paddle/fluid/framework/channel.h"
|
||||
#include "paddle/fluid/platform/enforce.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
namespace details {
|
||||
|
||||
// Four of the properties of Buffered Channel:
|
||||
// - A send to a full channel blocks temporarily until a receive from the
|
||||
// channel or the channel is closed.
|
||||
// - A receive from an empty channel blocks temporarily until a send to the
|
||||
// channel or the channel is closed.
|
||||
// - A send to a closed channel returns false immediately.
|
||||
// - A receive from a closed channel returns false immediately.
|
||||
|
||||
template <typename T>
|
||||
class Buffered : public paddle::framework::Channel<T> {
|
||||
friend Channel<T>* paddle::framework::MakeChannel<T>(size_t);
|
||||
friend void paddle::framework::CloseChannel<T>(Channel<T>*);
|
||||
|
||||
public:
|
||||
virtual bool Send(T*);
|
||||
virtual bool Receive(T*);
|
||||
virtual size_t Cap() { return cap_; }
|
||||
virtual void Close();
|
||||
virtual ~Buffered();
|
||||
|
||||
private:
|
||||
size_t cap_;
|
||||
std::mutex mu_;
|
||||
std::condition_variable empty_cond_var_;
|
||||
std::condition_variable full_cond_var_;
|
||||
std::condition_variable destructor_cond_var_;
|
||||
std::deque<T> channel_;
|
||||
std::atomic<bool> closed_{false};
|
||||
std::atomic<unsigned> send_ctr{0};
|
||||
std::atomic<unsigned> recv_ctr{0};
|
||||
|
||||
Buffered(size_t cap) : cap_(cap), closed_(false) {
|
||||
PADDLE_ENFORCE_GT(cap, 0);
|
||||
}
|
||||
|
||||
void NotifyAllParticipants(std::unique_lock<std::mutex>*);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
bool Buffered<T>::Send(T* item) {
|
||||
bool ret = false;
|
||||
if (closed_) {
|
||||
return ret;
|
||||
}
|
||||
send_ctr++;
|
||||
std::unique_lock<std::mutex> lock(mu_);
|
||||
full_cond_var_.wait(lock,
|
||||
[this]() { return channel_.size() < cap_ || closed_; });
|
||||
if (!closed_) {
|
||||
channel_.push_back(std::move(*item));
|
||||
lock.unlock();
|
||||
empty_cond_var_.notify_one();
|
||||
ret = true;
|
||||
}
|
||||
send_ctr--;
|
||||
destructor_cond_var_.notify_one();
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool Buffered<T>::Receive(T* item) {
|
||||
bool ret = false;
|
||||
// Once the channel has been closed and all data has been consumed,
|
||||
// just return false. Don't even try acquiring the mutex.
|
||||
if (closed_ && channel_.empty()) {
|
||||
return false;
|
||||
}
|
||||
recv_ctr++;
|
||||
std::unique_lock<std::mutex> lock(mu_);
|
||||
empty_cond_var_.wait(lock, [this]() { return !channel_.empty() || closed_; });
|
||||
if (!channel_.empty()) {
|
||||
*item = std::move(channel_.front());
|
||||
channel_.pop_front();
|
||||
full_cond_var_.notify_one();
|
||||
ret = true;
|
||||
}
|
||||
recv_ctr--;
|
||||
destructor_cond_var_.notify_one();
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void Buffered<T>::Close() {
|
||||
if (closed_) {
|
||||
return;
|
||||
}
|
||||
std::unique_lock<std::mutex> lock(mu_);
|
||||
closed_ = true;
|
||||
NotifyAllParticipants(&lock);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Buffered<T>::~Buffered() {
|
||||
std::unique_lock<std::mutex> lock(mu_);
|
||||
closed_ = true;
|
||||
channel_.clear();
|
||||
NotifyAllParticipants(&lock);
|
||||
|
||||
// The destructor must wait for all readers and writers to complete their task
|
||||
// The channel has been closed, so we will not accept new readers and writers
|
||||
lock.lock();
|
||||
destructor_cond_var_.wait(
|
||||
lock, [this]() { return send_ctr == 0 && recv_ctr == 0; });
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void Buffered<T>::NotifyAllParticipants(std::unique_lock<std::mutex>* lock) {
|
||||
lock->unlock();
|
||||
full_cond_var_.notify_all();
|
||||
empty_cond_var_.notify_all();
|
||||
}
|
||||
|
||||
} // namespace details
|
||||
} // namespace framework
|
||||
} // namespace paddle
|
@ -1,174 +0,0 @@
|
||||
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
|
||||
#include "paddle/fluid/framework/channel.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
namespace details {
|
||||
|
||||
// Four of the properties of UnBuffered Channel:
|
||||
// - A send to a channel blocks temporarily until a receive from the
|
||||
// channel or the channel is closed.
|
||||
// - A receive from a channel blocks temporarily until a send to the
|
||||
// channel or the channel is closed.
|
||||
// - A send to a closed channel returns false immediately.
|
||||
// - A receive from a closed channel returns false immediately.
|
||||
template <typename T>
|
||||
class UnBuffered : public paddle::framework::Channel<T> {
|
||||
friend Channel<T>* paddle::framework::MakeChannel<T>(size_t);
|
||||
friend void paddle::framework::CloseChannel<T>(Channel<T>*);
|
||||
|
||||
public:
|
||||
virtual bool Send(T*);
|
||||
virtual bool Receive(T*);
|
||||
virtual size_t Cap() { return 0; }
|
||||
virtual void Close();
|
||||
virtual ~UnBuffered();
|
||||
|
||||
private:
|
||||
std::mutex mu_ch_;
|
||||
// Mutex for readers and writers who are waiting for other reader
|
||||
// and writer to complete execution
|
||||
std::recursive_mutex mu_read_, mu_write_;
|
||||
// reader_found_ is set true when a reader is ready to accept data
|
||||
// writer_found_ is set true when a writer is ready to send data
|
||||
// A transaction occurs only when both are true
|
||||
std::atomic<bool> reader_found_{false}, writer_found_{false};
|
||||
std::condition_variable cv_channel_;
|
||||
std::condition_variable_any cv_reader_, cv_writer_, cv_destructor_;
|
||||
T* item{nullptr};
|
||||
std::atomic<bool> closed_{false};
|
||||
std::atomic<unsigned> send_ctr{0};
|
||||
std::atomic<unsigned> recv_ctr{0};
|
||||
|
||||
UnBuffered() : closed_(false) {}
|
||||
|
||||
void NotifyAllParticipants(std::unique_lock<std::mutex>*);
|
||||
};
|
||||
|
||||
// This function implements the concept of how data should
|
||||
// be sent from a writer to a reader.
|
||||
template <typename T>
|
||||
bool UnBuffered<T>::Send(T* data) {
|
||||
bool ret = false;
|
||||
if (closed_) {
|
||||
return ret;
|
||||
}
|
||||
send_ctr++;
|
||||
// Prevent other writers from entering
|
||||
std::unique_lock<std::recursive_mutex> writer_lock(mu_write_);
|
||||
writer_found_ = true;
|
||||
std::unique_lock<std::recursive_mutex> cv_lock(mu_write_);
|
||||
// If writer comes first, it should wait till a reader arrives
|
||||
cv_writer_.wait(cv_lock,
|
||||
[this]() { return reader_found_ == true || closed_; });
|
||||
cv_reader_.notify_one();
|
||||
if (!closed_) {
|
||||
std::unique_lock<std::mutex> channel_lock(mu_ch_);
|
||||
item = data;
|
||||
channel_lock.unlock();
|
||||
cv_channel_.notify_one();
|
||||
channel_lock.lock();
|
||||
cv_channel_.wait(channel_lock,
|
||||
[this]() { return item == nullptr || closed_; });
|
||||
ret = true;
|
||||
}
|
||||
writer_found_ = false;
|
||||
send_ctr--;
|
||||
cv_destructor_.notify_one();
|
||||
return ret;
|
||||
}
|
||||
|
||||
// This function implements the concept of how
|
||||
// data that was sent by a writer is read from a reader.
|
||||
template <typename T>
|
||||
bool UnBuffered<T>::Receive(T* data) {
|
||||
bool ret = false;
|
||||
// If channel is closed, we don't even want any reader to enter.
|
||||
// Unlike a buffered channel, an unbuffered channel does not allow
|
||||
// readers to read after closing because there is no buffer to be consumed.
|
||||
if (closed_) return ret;
|
||||
recv_ctr++;
|
||||
// Prevent other readers from entering
|
||||
std::unique_lock<std::recursive_mutex> read_lock{mu_read_};
|
||||
reader_found_ = true;
|
||||
std::unique_lock<std::recursive_mutex> cv_lock{mu_read_};
|
||||
// If reader comes first, it should wait till a writer arrives
|
||||
cv_reader_.wait(cv_lock,
|
||||
[this]() { return writer_found_ == true || closed_; });
|
||||
cv_writer_.notify_one();
|
||||
if (!closed_) {
|
||||
std::unique_lock<std::mutex> lock_ch{mu_ch_};
|
||||
// Reader should wait for the writer to first write its data
|
||||
cv_channel_.wait(lock_ch, [this]() { return item != nullptr || closed_; });
|
||||
if (!closed_) {
|
||||
*data = std::move(*item);
|
||||
item = nullptr;
|
||||
lock_ch.unlock();
|
||||
ret = true;
|
||||
}
|
||||
cv_channel_.notify_one();
|
||||
}
|
||||
reader_found_ = false;
|
||||
recv_ctr--;
|
||||
cv_destructor_.notify_one();
|
||||
return ret;
|
||||
}
|
||||
|
||||
// This function implements the sequence of events
|
||||
// that take place once the channel is closed.
|
||||
template <typename T>
|
||||
void UnBuffered<T>::Close() {
|
||||
if (closed_) {
|
||||
return;
|
||||
}
|
||||
std::unique_lock<std::mutex> lock(mu_ch_);
|
||||
item = nullptr;
|
||||
closed_ = true;
|
||||
NotifyAllParticipants(&lock);
|
||||
}
|
||||
|
||||
// This function implements the sequence of events
|
||||
// that are executed once the object of an UnBuffered
|
||||
// channel is destroyed.
|
||||
template <typename T>
|
||||
UnBuffered<T>::~UnBuffered() {
|
||||
std::unique_lock<std::mutex> lock(mu_ch_);
|
||||
item = nullptr;
|
||||
closed_ = true;
|
||||
NotifyAllParticipants(&lock);
|
||||
lock.lock();
|
||||
cv_destructor_.wait(lock,
|
||||
[this]() { return send_ctr == 0 && recv_ctr == 0; });
|
||||
}
|
||||
|
||||
// This function notifies all the readers, writers and
|
||||
// the channel condition variables.
|
||||
template <typename T>
|
||||
void UnBuffered<T>::NotifyAllParticipants(std::unique_lock<std::mutex>* lock) {
|
||||
lock->unlock();
|
||||
cv_writer_.notify_all();
|
||||
cv_channel_.notify_all();
|
||||
cv_reader_.notify_all();
|
||||
}
|
||||
|
||||
} // namespace details
|
||||
} // namespace framework
|
||||
} // namespace paddle
|
File diff suppressed because it is too large
Load Diff
@ -1,246 +0,0 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle/fluid/framework/op_registry.h"
|
||||
#include "paddle/fluid/framework/reader.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
static std::vector<framework::DDim> RestoreShapes(
|
||||
const std::vector<int>& shape_concat, const std::vector<int>& ranks) {
|
||||
std::vector<framework::DDim> res;
|
||||
int offset = 0;
|
||||
for (int len : ranks) {
|
||||
auto start_it = shape_concat.begin() + offset;
|
||||
auto end_it = start_it + len;
|
||||
res.push_back(framework::make_ddim(std::vector<int>(start_it, end_it)));
|
||||
offset += len;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
// general infershape for file readers
|
||||
class CreateFileReaderInferShape : public framework::InferShapeBase {
|
||||
public:
|
||||
void operator()(framework::InferShapeContext* ctx) const override {
|
||||
PADDLE_ENFORCE(ctx->HasOutput("Out"),
|
||||
"The output file reader should not be null.");
|
||||
const auto shape_concat =
|
||||
ctx->Attrs().Get<std::vector<int>>("shape_concat");
|
||||
const auto ranks = ctx->Attrs().Get<std::vector<int>>("ranks");
|
||||
std::vector<framework::DDim> shapes = RestoreShapes(shape_concat, ranks);
|
||||
ctx->SetReaderDims("Out", shapes);
|
||||
|
||||
if (ctx->IsRuntime()) {
|
||||
const auto lod_levels = ctx->Attrs().Get<std::vector<int>>("lod_levels");
|
||||
PADDLE_ENFORCE_EQ(
|
||||
lod_levels.size(), shapes.size(),
|
||||
"The number of 'lod_levels'(%d) doesn't match the number "
|
||||
"of 'shapes'(%d).",
|
||||
lod_levels.size(), shapes.size());
|
||||
framework::VarDesc* reader =
|
||||
boost::get<framework::VarDesc*>(ctx->GetOutputVarPtrs("Out")[0]);
|
||||
reader->SetLoDLevels(lod_levels);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// general infershape for decorated readers
|
||||
class CreateDecoratedReaderInferShape : public framework::InferShapeBase {
|
||||
public:
|
||||
void operator()(framework::InferShapeContext* ctx) const override {
|
||||
PADDLE_ENFORCE(ctx->HasInput("UnderlyingReader"),
|
||||
"Input(UnderlyingReader) should not be null.");
|
||||
PADDLE_ENFORCE(ctx->HasOutput("Out"),
|
||||
"The output decorated reader should not be null.");
|
||||
ctx->SetReaderDims("Out", ctx->GetReaderDims("UnderlyingReader"));
|
||||
|
||||
if (ctx->IsRuntime()) {
|
||||
framework::VarDesc* in_reader = boost::get<framework::VarDesc*>(
|
||||
ctx->GetInputVarPtrs("UnderlyingReader")[0]);
|
||||
framework::VarDesc* out_reader =
|
||||
boost::get<framework::VarDesc*>(ctx->GetOutputVarPtrs("Out")[0]);
|
||||
out_reader->SetLoDLevels(in_reader->GetLoDLevels());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// general var type inference for file readers
|
||||
class CreateFileReaderInferVarType : public framework::VarTypeInference {
|
||||
public:
|
||||
void operator()(const framework::OpDesc& op_desc,
|
||||
framework::BlockDesc* block) const override {
|
||||
std::string reader_name = op_desc.Output("Out")[0];
|
||||
framework::VarDesc* reader = block->FindVarRecursive(reader_name);
|
||||
reader->SetType(framework::proto::VarType::READER);
|
||||
}
|
||||
};
|
||||
|
||||
// general var type inference for decorated readers
|
||||
class CreateDecoratedReaderInferVarType : public framework::VarTypeInference {
|
||||
public:
|
||||
void operator()(const framework::OpDesc& op_desc,
|
||||
framework::BlockDesc* block) const override {
|
||||
std::string in_reader_name = op_desc.Input("UnderlyingReader")[0];
|
||||
framework::VarDesc* in_reader = block->FindVarRecursive(in_reader_name);
|
||||
std::string out_reader_name = op_desc.Output("Out")[0];
|
||||
framework::VarDesc* out_reader = block->FindVarRecursive(out_reader_name);
|
||||
out_reader->SetType(framework::proto::VarType::READER);
|
||||
out_reader->SetDataTypes(in_reader->GetDataTypes());
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class CreateRandomDataGeneratorOp : public framework::OperatorBase {
|
||||
public:
|
||||
using framework::OperatorBase::OperatorBase;
|
||||
|
||||
private:
|
||||
void RunImpl(const framework::Scope& scope,
|
||||
const platform::Place& dev_place) const override {
|
||||
const auto& shape_concat = Attr<std::vector<int>>("shape_concat");
|
||||
const auto& ranks = Attr<std::vector<int>>("ranks");
|
||||
PADDLE_ENFORCE(!shape_concat.empty() && !ranks.empty());
|
||||
PADDLE_ENFORCE_EQ(std::accumulate(ranks.begin(), ranks.end(), 0),
|
||||
int(shape_concat.size()),
|
||||
"The accumulate of all ranks should be equal to the "
|
||||
"shape concat's length.");
|
||||
std::vector<framework::DDim> shapes = RestoreShapes(shape_concat, ranks);
|
||||
auto* out = scope.FindVar(Output("Out"))
|
||||
->template GetMutable<framework::ReaderHolder>();
|
||||
out->Reset(new framework::RandomDataGenerator<T>(shapes, Attr<float>("min"),
|
||||
Attr<float>("max")));
|
||||
}
|
||||
};
|
||||
|
||||
class CreateRandomDataGeneratorOpMaker
|
||||
: public framework::OpProtoAndCheckerMaker {
|
||||
public:
|
||||
CreateRandomDataGeneratorOpMaker(OpProto* op_proto, OpAttrChecker* op_checker)
|
||||
: OpProtoAndCheckerMaker(op_proto, op_checker) {
|
||||
AddOutput("Out", "(ReaderHolder) The created random reader.");
|
||||
AddAttr<std::vector<int>>("shape_concat",
|
||||
"The concat of all data's shapes.");
|
||||
AddAttr<std::vector<int>>(
|
||||
"ranks",
|
||||
"The ranks of each data."
|
||||
"e.g."
|
||||
"shape_concat = [2,3,4,5,6]"
|
||||
"ranks = [3,2]"
|
||||
"It means the reader will generate two data each time,"
|
||||
"whose shapes are [2,3,4] and [5,6] respectively.");
|
||||
AddAttr<std::vector<int>>("lod_levels", "The LoD levels of each data.");
|
||||
AddAttr<float>("min", "The lower bound of reader's uniform distribution.");
|
||||
AddAttr<float>("max", "The upper bound of reader's uniform distribution.");
|
||||
AddComment(R"DOC(
|
||||
CreateRandomDataGenerator Operator
|
||||
|
||||
This Op creates a random reader.
|
||||
The reader generates random data instead of really reading from files.
|
||||
Generated data follow an uniform distribution between 'min' and 'max'.
|
||||
)DOC");
|
||||
}
|
||||
};
|
||||
|
||||
class CreateShuffleReaderOp : public framework::OperatorBase {
|
||||
public:
|
||||
using framework::OperatorBase::OperatorBase;
|
||||
|
||||
private:
|
||||
void RunImpl(const framework::Scope& scope,
|
||||
const platform::Place& dev_place) const override {
|
||||
const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader"))
|
||||
->Get<framework::ReaderHolder>();
|
||||
auto* out = scope.FindVar(Output("Out"))
|
||||
->template GetMutable<framework::ReaderHolder>();
|
||||
out->Reset(new framework::ShuffleReader(underlying_reader.Get(),
|
||||
Attr<int>("buffer_size")));
|
||||
}
|
||||
};
|
||||
|
||||
class CreateShuffleReaderOpMaker : public framework::OpProtoAndCheckerMaker {
|
||||
public:
|
||||
CreateShuffleReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker)
|
||||
: OpProtoAndCheckerMaker(op_proto, op_checker) {
|
||||
AddInput(
|
||||
"UnderlyingReader",
|
||||
"(ReaderHolder) The underlying reader for creating a shuffle reader.");
|
||||
AddOutput("Out", "(ReaderHolder) The created shuffle reader.");
|
||||
AddAttr<int>("buffer_size", "The shuffle buffer size.").GreaterThan(0);
|
||||
AddComment(R"DOC(
|
||||
CreateShuffleReader Operator
|
||||
|
||||
A shuffle reader takes another reader as its 'underlying reader'
|
||||
and yields the underlying reader's outputs in a shuffled order.
|
||||
)DOC");
|
||||
}
|
||||
};
|
||||
|
||||
class CreateBatchReaderOp : public framework::OperatorBase {
|
||||
public:
|
||||
using framework::OperatorBase::OperatorBase;
|
||||
|
||||
private:
|
||||
void RunImpl(const framework::Scope& scope,
|
||||
const platform::Place& dev_place) const override {
|
||||
const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader"))
|
||||
->Get<framework::ReaderHolder>();
|
||||
auto* out = scope.FindVar(Output("Out"))
|
||||
->template GetMutable<framework::ReaderHolder>();
|
||||
out->Reset(new framework::BatchReader(underlying_reader.Get(),
|
||||
Attr<int>("batch_size")));
|
||||
}
|
||||
};
|
||||
|
||||
class CreateBatchReaderOpMaker : public framework::OpProtoAndCheckerMaker {
|
||||
public:
|
||||
CreateBatchReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker)
|
||||
: OpProtoAndCheckerMaker(op_proto, op_checker) {
|
||||
AddInput(
|
||||
"UnderlyingReader",
|
||||
"(ReaderHolder) The underlying reader for creating a batch reader.");
|
||||
AddOutput("Out", "(ReaderHolder) The created batch reader.");
|
||||
AddAttr<int>("batch_size",
|
||||
"How many instances the batch reader yields each time.")
|
||||
.GreaterThan(0);
|
||||
AddComment(R"DOC(
|
||||
CreateBatchReader Operator
|
||||
|
||||
A batch reader takes another reader as its 'underlying reader',
|
||||
gathers the underlying reader's outputs and then yields them in batches.
|
||||
)DOC");
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
||||
|
||||
namespace ops = paddle::operators;
|
||||
REGISTER_OPERATOR(create_random_data_generator,
|
||||
ops::CreateRandomDataGeneratorOp<float>,
|
||||
ops::CreateFileReaderInferShape,
|
||||
ops::CreateRandomDataGeneratorOpMaker,
|
||||
paddle::framework::EmptyGradOpMaker,
|
||||
ops::CreateFileReaderInferVarType);
|
||||
REGISTER_OPERATOR(create_shuffle_reader, ops::CreateShuffleReaderOp,
|
||||
ops::CreateDecoratedReaderInferShape,
|
||||
ops::CreateShuffleReaderOpMaker,
|
||||
paddle::framework::EmptyGradOpMaker,
|
||||
ops::CreateDecoratedReaderInferVarType);
|
||||
REGISTER_OPERATOR(create_batch_reader, ops::CreateBatchReaderOp,
|
||||
ops::CreateDecoratedReaderInferShape,
|
||||
ops::CreateBatchReaderOpMaker,
|
||||
paddle::framework::EmptyGradOpMaker,
|
||||
ops::CreateDecoratedReaderInferVarType);
|
@ -1 +1,3 @@
|
||||
grpc_library(sendrecvop_grpc SRCS sendrecvop_utils.cc grpc_client.cc grpc_server.cc PROTO send_recv.proto DEPS lod_tensor selected_rows)
|
||||
if(WITH_DISTRIBUTE)
|
||||
grpc_library(sendrecvop_grpc SRCS sendrecvop_utils.cc grpc_client.cc grpc_server.cc PROTO send_recv.proto DEPS lod_tensor selected_rows)
|
||||
endif()
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue