Add Size() and Capacity() in gpu queue.

pull/4472/head
anthonyaje 5 years ago
parent cc23f1d819
commit e2b346d5af

@ -52,6 +52,7 @@ BlockQueueStatus_T GpuQueue::Push(const std::vector<DataItemGpu> &data) {
CHECK_CUDA_RET_WITH_ERROR(cudaEventCreate(&(*(node_info_[tail_].event_))), "Cuda Create Event Failed");
node_info_[tail_].data_ = data;
tail_ = (tail_ + 1) % (capacity_);
++size_;
return SUCCESS;
}
@ -69,6 +70,7 @@ BlockQueueStatus_T GpuQueue::Front(void **addr, size_t *len) const {
BlockQueueStatus_T GpuQueue::Pop() {
head_ = (head_ + 1) % (capacity_);
--size_;
return SUCCESS;
}

@ -44,13 +44,15 @@ class GpuQueue {
void RegisterRelease(const std::function<void(void *)> &func) { host_release_ = func; }
inline bool IsEmpty() const { return head_ == tail_; }
inline bool IsFull() const { return head_ == ((tail_ + 1) % (capacity_)); }
inline bool IsEmpty() const { return size_ == 0; }
inline bool IsFull() const { return size_ == capacity_; }
BlockQueueStatus_T Push(const std::vector<DataItemGpu> &data);
BlockQueueStatus_T Front(void **ptr, size_t *len) const;
BlockQueueStatus_T Pop();
bool Destroy();
size_t Size() { return size_; }
size_t Capacity() { return capacity_; }
private:
struct NodeInfo {
@ -63,6 +65,7 @@ class GpuQueue {
size_t tail_;
std::vector<size_t> shape_;
size_t len_;
size_t size_;
size_t capacity_;
cudaStream_t stream_;
std::unique_ptr<NodeInfo[]> node_info_;
@ -83,6 +86,8 @@ class BlockingQueue {
BlockQueueStatus_T Front(void **ptr, size_t *len);
BlockQueueStatus_T Pop();
bool Destroy();
size_t Size() { return queue_->Size(); }
size_t Capacity() { return queue_->Capacity(); }
private:
std::mutex mutex_;

@ -187,5 +187,39 @@ bool GpuBufferMgr::CloseNotify() {
}
void GpuBufferMgr::CloseConfirm() { sema.Signal(); }
size_t GpuBufferMgr::Size(unsigned int handle) {
if (handle == HandleMgr::INVALID_HANDLE) {
MS_LOG(ERROR) << "handle is invalid";
return 0;
}
return handle_queue_map_.at(handle)->Size();
}
size_t GpuBufferMgr::Size(unsigned int device_id, const std::string &channel_name) {
std::string name = std::to_string(device_id) + std::string("_") + channel_name;
if (!name_queue_map_.count(name)) {
MS_LOG(ERROR) << "Queue not exist " << name;
return 0;
}
return name_queue_map_.at(name)->Size();
}
size_t GpuBufferMgr::Capacity(unsigned int handle) {
if (handle == HandleMgr::INVALID_HANDLE) {
MS_LOG(ERROR) << "handle is invalid";
return 0;
}
return handle_queue_map_.at(handle)->Capacity();
}
size_t GpuBufferMgr::Capacity(unsigned int device_id, const std::string &channel_name) {
std::string name = std::to_string(device_id) + std::string("_") + channel_name;
if (!name_queue_map_.count(name)) {
MS_LOG(ERROR) << "Queue not exist " << name;
return 0;
}
return name_queue_map_.at(name)->Capacity();
}
} // namespace device
} // namespace mindspore

@ -111,6 +111,14 @@ class GpuBufferMgr {
// call for dataset send thread
EXPORT void CloseConfirm();
EXPORT size_t Size(unsigned int handle);
EXPORT size_t Size(unsigned int device_id, const std::string &channel_name);
EXPORT size_t Capacity(unsigned int handle);
EXPORT size_t Capacity(unsigned int device_id, const std::string &channel_name);
private:
void set_device() const;

Loading…
Cancel
Save