|
|
|
@ -10,43 +10,45 @@ See the License for the specific language governing permissions and
|
|
|
|
|
limitations under the License. */
|
|
|
|
|
|
|
|
|
|
#include "paddle/fluid/platform/device_context.h"
|
|
|
|
|
#include <unordered_set>
|
|
|
|
|
#include "paddle/fluid/memory/memory.h"
|
|
|
|
|
|
|
|
|
|
namespace paddle {
|
|
|
|
|
namespace platform {
|
|
|
|
|
|
|
|
|
|
DeviceContextPool* DeviceContextPool::pool = nullptr;
|
|
|
|
|
|
|
|
|
|
const platform::DeviceContext* DeviceContextPool::Get(
|
|
|
|
|
const platform::Place& place) {
|
|
|
|
|
platform::DeviceContext* DeviceContextPool::Get(const platform::Place& place) {
|
|
|
|
|
auto it = device_contexts_.find(place);
|
|
|
|
|
if (it == device_contexts_.end()) {
|
|
|
|
|
PADDLE_THROW(
|
|
|
|
|
"'Place' is not supported, Please re-compile with WITH_GPU "
|
|
|
|
|
"option");
|
|
|
|
|
}
|
|
|
|
|
return it->second;
|
|
|
|
|
return it->second.get();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
DeviceContextPool::DeviceContextPool(
|
|
|
|
|
const std::vector<platform::Place>& places) {
|
|
|
|
|
PADDLE_ENFORCE_GT(places.size(), 0);
|
|
|
|
|
for (size_t i = 0; i < places.size(); i++) {
|
|
|
|
|
if (platform::is_cpu_place(places[i])) {
|
|
|
|
|
using PtrType = std::unique_ptr<DeviceContext>;
|
|
|
|
|
std::unordered_set<Place, PlaceHash> set;
|
|
|
|
|
for (auto& p : places) {
|
|
|
|
|
set.insert(p);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (auto& p : set) {
|
|
|
|
|
if (platform::is_cpu_place(p)) {
|
|
|
|
|
#ifdef PADDLE_WITH_MKLDNN
|
|
|
|
|
device_contexts_.emplace(places[i],
|
|
|
|
|
new platform::MKLDNNDeviceContext(
|
|
|
|
|
boost::get<platform::CPUPlace>(places[i])));
|
|
|
|
|
device_contexts_.emplace(
|
|
|
|
|
p, PtrType(new MKLDNNDeviceContext(boost::get<CPUPlace>(p))));
|
|
|
|
|
#else
|
|
|
|
|
device_contexts_.emplace(places[i],
|
|
|
|
|
new platform::CPUDeviceContext(
|
|
|
|
|
boost::get<platform::CPUPlace>(places[i])));
|
|
|
|
|
device_contexts_.emplace(
|
|
|
|
|
p, PtrType(new CPUDeviceContext(boost::get<CPUPlace>(p))));
|
|
|
|
|
#endif
|
|
|
|
|
} else if (platform::is_gpu_place(places[i])) {
|
|
|
|
|
} else if (platform::is_gpu_place(p)) {
|
|
|
|
|
#ifdef PADDLE_WITH_CUDA
|
|
|
|
|
device_contexts_.emplace(places[i],
|
|
|
|
|
new platform::CUDADeviceContext(
|
|
|
|
|
boost::get<platform::CUDAPlace>(places[i])));
|
|
|
|
|
device_contexts_.emplace(
|
|
|
|
|
p, PtrType(new CUDADeviceContext(boost::get<CUDAPlace>(p))));
|
|
|
|
|
#else
|
|
|
|
|
PADDLE_THROW(
|
|
|
|
|
"'CUDAPlace' is not supported, Please re-compile with WITH_GPU "
|
|
|
|
@ -159,6 +161,7 @@ CUDADeviceContext::~CUDADeviceContext() {
|
|
|
|
|
Place CUDADeviceContext::GetPlace() const { return place_; }
|
|
|
|
|
|
|
|
|
|
void CUDADeviceContext::Wait() const {
|
|
|
|
|
std::lock_guard<std::mutex> guard(mutex_);
|
|
|
|
|
PADDLE_ENFORCE(cudaStreamSynchronize(stream_));
|
|
|
|
|
PADDLE_ENFORCE(cudaGetLastError());
|
|
|
|
|
}
|
|
|
|
|