commit
8d6f827a0d
@ -0,0 +1,87 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
#include <queue>
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
|
||||
template <typename T>
|
||||
class Channel {
|
||||
public:
|
||||
explicit Channel(std::size_t capacity) : capacity_(capacity) {}
|
||||
|
||||
void Send(T* channel_element) {
|
||||
std::unique_lock<std::mutex> lock(mu_);
|
||||
|
||||
if (IsBounded()) {
|
||||
full_cond_var_.wait(lock, [this]() {
|
||||
bool capacity_valid = capacity_ > 0 ? !IsCapacityFull() : true;
|
||||
return capacity_valid;
|
||||
});
|
||||
}
|
||||
channel_.push_back(std::move(*channel_element));
|
||||
|
||||
lock.unlock();
|
||||
empty_cond_var_.notify_one();
|
||||
}
|
||||
|
||||
T* Receive() {
|
||||
std::unique_lock<std::mutex> lock(mu_);
|
||||
empty_cond_var_.wait(lock, [this]() { return !channel_.empty(); });
|
||||
|
||||
T* channel_element = std::move(channel_.front());
|
||||
channel_.pop_front();
|
||||
|
||||
NotifyAllSenders(&lock);
|
||||
return channel_element;
|
||||
}
|
||||
|
||||
size_t Size() {
|
||||
std::unique_lock<std::mutex> lock(mu_);
|
||||
return channel_.size();
|
||||
}
|
||||
|
||||
void Clear() {
|
||||
std::unique_lock<std::mutex> lock(mu_);
|
||||
channel_.clear();
|
||||
|
||||
NotifyAllSenders(&lock);
|
||||
}
|
||||
|
||||
private:
|
||||
std::size_t capacity_;
|
||||
std::mutex mu_;
|
||||
std::condition_variable empty_cond_var_;
|
||||
std::condition_variable full_cond_var_;
|
||||
std::deque<T> channel_;
|
||||
|
||||
private:
|
||||
void NotifyAllSenders(std::unique_lock<std::mutex>* lock) {
|
||||
if (IsBounded()) {
|
||||
lock->unlock();
|
||||
full_cond_var_.notify_one();
|
||||
}
|
||||
}
|
||||
|
||||
bool IsBounded() const { return capacity_ > 0; }
|
||||
|
||||
bool IsCapacityFull() const { return channel_.size() >= capacity_; }
|
||||
};
|
||||
|
||||
} // namespace operator
|
||||
} // namespace paddle
|
@ -0,0 +1,56 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/framework/feed_fetch_method.h"
|
||||
#include "glog/logging.h"
|
||||
#include "paddle/framework/variable.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
|
||||
void SetFeedVariable(Scope* scope, const LoDTensor& input,
|
||||
const std::string& var_name, size_t index) {
|
||||
// If var_name Variable is not found in GlobalScope, a new variable will
|
||||
// be created.
|
||||
VLOG(3) << "SetFeedVariable name=" << var_name << " index=" << index;
|
||||
Variable* g_feed_value = scope->Var(var_name);
|
||||
auto& feed_inputs =
|
||||
*(g_feed_value->GetMutable<std::vector<paddle::framework::LoDTensor>>());
|
||||
if (index >= feed_inputs.size()) {
|
||||
feed_inputs.resize(index + 1);
|
||||
}
|
||||
// shared data with input tensor
|
||||
feed_inputs[index].ShareDataWith(input);
|
||||
// set lod
|
||||
feed_inputs[index].set_lod(input.lod());
|
||||
}
|
||||
|
||||
LoDTensor& GetFetchVariable(const Scope& scope, const std::string& var_name,
|
||||
size_t index) {
|
||||
// Since we want to fetch LodTensor from a variable, the variable must
|
||||
// be created alreadly.
|
||||
Variable* g_fetch_value = scope.FindVar(var_name);
|
||||
PADDLE_ENFORCE(g_fetch_value->IsType<FeedFetchList>(),
|
||||
"Only %s can be invoked by GetFetchVariable",
|
||||
typeid(FeedFetchList).name());
|
||||
auto& fetch_outputs = *g_fetch_value->GetMutable<FeedFetchList>();
|
||||
auto& tensor = fetch_outputs[index];
|
||||
VLOG(3) << "Fetch " << var_name << " with index " << index
|
||||
<< " shape= " << tensor.dims();
|
||||
PADDLE_ENFORCE_LT(index, fetch_outputs.size());
|
||||
return tensor;
|
||||
}
|
||||
|
||||
} // namespace framework
|
||||
} // namespace paddle
|
@ -1,24 +1,93 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/framework/threadpool.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
|
||||
std::unique_ptr<ThreadPool> ThreadPool::threadpool(nullptr);
|
||||
std::once_flag ThreadPool::init_flag;
|
||||
std::unique_ptr<ThreadPool> ThreadPool::threadpool_(nullptr);
|
||||
std::once_flag ThreadPool::init_flag_;
|
||||
|
||||
ThreadPool* ThreadPool::GetInstance() {
|
||||
std::call_once(init_flag_, &ThreadPool::Init);
|
||||
return threadpool_.get();
|
||||
}
|
||||
|
||||
void ThreadPool::Init() {
|
||||
if (threadpool_.get() == nullptr) {
|
||||
// TODO(Yancey1989): specify the max threads number
|
||||
int num_threads = std::thread::hardware_concurrency();
|
||||
PADDLE_ENFORCE_GT(num_threads, 0);
|
||||
threadpool_.reset(new ThreadPool(num_threads));
|
||||
}
|
||||
}
|
||||
|
||||
ThreadPool::ThreadPool(int num_threads)
|
||||
: total_threads_(num_threads), idle_threads_(num_threads), running_(true) {
|
||||
threads_.resize(num_threads);
|
||||
for (auto& thread : threads_) {
|
||||
// TODO(Yancey1989): binding the thread on the specify CPU number
|
||||
thread.reset(new std::thread(std::bind(&ThreadPool::TaskLoop, this)));
|
||||
}
|
||||
}
|
||||
|
||||
ThreadPool::~ThreadPool() {
|
||||
{
|
||||
// notify all threads to stop running
|
||||
running_ = false;
|
||||
scheduled_.notify_all();
|
||||
}
|
||||
|
||||
for (auto& t : threads_) {
|
||||
t->join();
|
||||
t.reset(nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
void ThreadPool::Wait() {
|
||||
std::unique_lock<std::mutex> lock(mutex_);
|
||||
completed_.wait(lock, [=] { return Done() == true; });
|
||||
}
|
||||
|
||||
void ThreadPool::TaskLoop() {
|
||||
while (running_) {
|
||||
std::unique_lock<std::mutex> lock(mutex_);
|
||||
scheduled_.wait(lock, [=] { return !tasks_.empty() || !running_; });
|
||||
|
||||
if (!running_) {
|
||||
break;
|
||||
}
|
||||
// pop a task from the task queue
|
||||
auto task = std::move(tasks_.front());
|
||||
tasks_.pop();
|
||||
|
||||
--idle_threads_;
|
||||
lock.unlock();
|
||||
|
||||
// run the task
|
||||
task();
|
||||
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(mutex_);
|
||||
++idle_threads_;
|
||||
if (Done()) {
|
||||
completed_.notify_all();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace framework
|
||||
} // namespace paddle
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,24 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/operators/lstmp_op.h"
|
||||
|
||||
namespace ops = paddle::operators;
|
||||
REGISTER_OP_CUDA_KERNEL(
|
||||
lstmp, ops::LSTMPKernel<paddle::platform::CUDADeviceContext, float>,
|
||||
ops::LSTMPKernel<paddle::platform::CUDADeviceContext, double>);
|
||||
REGISTER_OP_CUDA_KERNEL(
|
||||
lstmp_grad,
|
||||
ops::LSTMPGradKernel<paddle::platform::CUDADeviceContext, float>,
|
||||
ops::LSTMPGradKernel<paddle::platform::CUDADeviceContext, double>);
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue