From e2ba13373aeb4b345dc5909510d686235609983e Mon Sep 17 00:00:00 2001 From: qijun Date: Tue, 25 Jul 2017 15:39:49 +0800 Subject: [PATCH] enable operator gpu unittest --- paddle/framework/tensor.h | 2 ++ paddle/pybind/pybind.cc | 26 +++++++++++++++++++------- paddle/pybind/tensor_bind.h | 29 +++++++++++++++++++++++------ 3 files changed, 44 insertions(+), 13 deletions(-) diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index a36f375d2e..69019c7adc 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -137,6 +137,8 @@ class Tensor { const DDim& dims() const { return dims_; } + paddle::platform::Place place() const { return holder_->place(); } + private: // Placeholder hides type T, so it doesn't appear as a template // parameter of Variable. diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index d48a948d21..4b1bbc2cf2 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -20,6 +20,7 @@ limitations under the License. */ #include "paddle/framework/op_registry.h" #include "paddle/framework/operator.h" #include "paddle/framework/scope.h" +#include "paddle/platform/place.h" #include "paddle/pybind/tensor_bind.h" #include "pybind11/numpy.h" #include "pybind11/pybind11.h" @@ -62,12 +63,12 @@ PYBIND11_PLUGIN(core) { self.Resize(pd::make_ddim(dim)); }) .def("alloc_float", - [](pd::Tensor& self) { - self.mutable_data(paddle::platform::CPUPlace()); + [](pd::Tensor& self, paddle::platform::Place& place) { + self.mutable_data(place); }) .def("alloc_int", - [](pd::Tensor& self) { - self.mutable_data(paddle::platform::CPUPlace()); + [](pd::Tensor& self, paddle::platform::Place& place) { + self.mutable_data(place); }) .def("set", paddle::pybind::PyTensorSetFromArray) .def("set", paddle::pybind::PyTensorSetFromArray) @@ -122,9 +123,20 @@ All parameter, weight, gradient are variables in Paddle. .def("temp", pd::OperatorBase::TMP_VAR_NAME); py::class_(m, "DeviceContext") - .def_static("cpu_context", []() -> paddle::platform::DeviceContext* { - return new paddle::platform::CPUDeviceContext(); - }); + .def_static( + "create", + [](paddle::platform::Place) -> paddle::platform::DeviceContext* { + if (paddle::platform::is_gpu_place(place)) { + return new paddle::platform::GPUDeviceContext(place); + } else if (paddle::platform::is_cpu_place(place)) { + return new paddle::platform::CPUDeviceContext(); + } + }); + + py::class_(m, "GPUPlace").def(py::init()); + .def(py::init<>()); + + py::class_(m, "CPUPlace").def(py::init<>()); py::class_> operator_base( m, "Operator"); diff --git a/paddle/pybind/tensor_bind.h b/paddle/pybind/tensor_bind.h index 995e102bf9..0caece6e95 100644 --- a/paddle/pybind/tensor_bind.h +++ b/paddle/pybind/tensor_bind.h @@ -13,9 +13,10 @@ limitations under the License. */ #pragma once -#include -#include -#include +#include "paddle/framework/tensor.h" +#include "paddle/memory/memcpy.h" +#include "pybind11/numpy.h" +#include "pybind11/pybind11.h" namespace py = pybind11; @@ -56,7 +57,6 @@ struct CastToPyBufferImpl { strides[i - 1] = sizeof(CUR_TYPE) * prod; prod *= dims_outside[i - 1]; } - return py::buffer_info( tensor.mutable_data(tensor.holder_->place()), sizeof(CUR_TYPE), @@ -87,8 +87,25 @@ void PyTensorSetFromArray( } self.Resize(framework::make_ddim(dims)); - auto *dst = self.mutable_data(paddle::platform::CPUPlace()); - std::memcpy(dst, array.data(), sizeof(T) * array.size()); + auto *dst = self.mutable_data(self.place()); + + if (paddle::platform::is_cpu_place(self.place())) { + paddle::memory::Copy( + place, dst, place, array.data(), sizeof(T) * array.size()); + } else if (paddle::platform::is_gpu_place(place)) { +#ifdef PADDLE_ONLY_CPU + PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); +#else + paddle::memory::Copy( + place, + dst, + paddle::platform::CPUPlace(), + array.data(), + sizeof(T) * array.size()); +#endif + } } } // namespace pybind