|
|
@ -68,7 +68,7 @@ limitations under the License. */
|
|
|
|
#include "paddle/fluid/pybind/ir.h"
|
|
|
|
#include "paddle/fluid/pybind/ir.h"
|
|
|
|
#include "paddle/fluid/pybind/pybind_boost_headers.h"
|
|
|
|
#include "paddle/fluid/pybind/pybind_boost_headers.h"
|
|
|
|
|
|
|
|
|
|
|
|
#ifndef _WIN32
|
|
|
|
#ifdef PADDLE_WITH_NCCL
|
|
|
|
#include "paddle/fluid/pybind/nccl_wrapper_py.h"
|
|
|
|
#include "paddle/fluid/pybind/nccl_wrapper_py.h"
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#include "paddle/fluid/framework/data_type.h"
|
|
|
|
#include "paddle/fluid/framework/data_type.h"
|
|
|
@ -78,7 +78,7 @@ limitations under the License. */
|
|
|
|
#include "paddle/fluid/pybind/tensor_py.h"
|
|
|
|
#include "paddle/fluid/pybind/tensor_py.h"
|
|
|
|
#include "paddle/fluid/string/to_string.h"
|
|
|
|
#include "paddle/fluid/string/to_string.h"
|
|
|
|
#ifdef PADDLE_WITH_CUDA
|
|
|
|
#ifdef PADDLE_WITH_CUDA
|
|
|
|
#ifndef _WIN32
|
|
|
|
#ifdef PADDLE_WITH_NCCL
|
|
|
|
#include "paddle/fluid/operators/nccl/nccl_gpu_common.h"
|
|
|
|
#include "paddle/fluid/operators/nccl/nccl_gpu_common.h"
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#include "paddle/fluid/platform/cuda_profiler.h"
|
|
|
|
#include "paddle/fluid/platform/cuda_profiler.h"
|
|
|
@ -926,7 +926,7 @@ All parameter, weight, gradient are variables in Paddle.
|
|
|
|
.def("get_lod_tensor_array",
|
|
|
|
.def("get_lod_tensor_array",
|
|
|
|
[](Variable &self) { return self.GetMutable<LoDTensorArray>(); },
|
|
|
|
[](Variable &self) { return self.GetMutable<LoDTensorArray>(); },
|
|
|
|
py::return_value_policy::reference)
|
|
|
|
py::return_value_policy::reference)
|
|
|
|
#if (defined(PADDLE_WITH_CUDA) && !defined(_WIN32))
|
|
|
|
#if (defined(PADDLE_WITH_NCCL))
|
|
|
|
.def("get_communicator",
|
|
|
|
.def("get_communicator",
|
|
|
|
[](Variable &self) -> platform::Communicator * {
|
|
|
|
[](Variable &self) -> platform::Communicator * {
|
|
|
|
return self.GetMutable<platform::Communicator>();
|
|
|
|
return self.GetMutable<platform::Communicator>();
|
|
|
@ -1174,7 +1174,7 @@ All parameter, weight, gradient are variables in Paddle.
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
});;
|
|
|
|
});;
|
|
|
|
// clang-format on
|
|
|
|
// clang-format on
|
|
|
|
#if (defined(PADDLE_WITH_CUDA) && !defined(_WIN32))
|
|
|
|
#if defined(PADDLE_WITH_NCCL)
|
|
|
|
py::class_<platform::Communicator>(m, "Communicator").def(py::init<>());
|
|
|
|
py::class_<platform::Communicator>(m, "Communicator").def(py::init<>());
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
py::class_<platform::CUDAPlace>(m, "CUDAPlace", R"DOC(
|
|
|
|
py::class_<platform::CUDAPlace>(m, "CUDAPlace", R"DOC(
|
|
|
|