|
|
|
@ -15,6 +15,7 @@
|
|
|
|
|
#include "paddle/fluid/framework/lod_tensor.h"
|
|
|
|
|
#include "paddle/fluid/framework/scope.h"
|
|
|
|
|
#include "paddle/fluid/inference/api/paddle_inference_api.h"
|
|
|
|
|
#include "paddle/fluid/memory/memcpy.h"
|
|
|
|
|
#include "paddle/fluid/platform/enforce.h"
|
|
|
|
|
|
|
|
|
|
namespace paddle {
|
|
|
|
@ -73,6 +74,61 @@ T *ZeroCopyTensor::data(PaddlePlace *place, int *size) const {
|
|
|
|
|
return res;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
|
void ZeroCopyTensor::copy_from_cpu(const T *data) {
|
|
|
|
|
EAGER_GET_TENSOR;
|
|
|
|
|
PADDLE_ENFORCE_GE(
|
|
|
|
|
tensor->numel(), 0,
|
|
|
|
|
"You should call ZeroCopyTensor::Reshape(const std::vector<int> &shape)"
|
|
|
|
|
"function before copy data from cpu.");
|
|
|
|
|
size_t ele_size = tensor->numel() * sizeof(T);
|
|
|
|
|
|
|
|
|
|
if (place_ == PaddlePlace::kCPU) {
|
|
|
|
|
auto *t_data = tensor->mutable_data<T>(platform::CPUPlace());
|
|
|
|
|
std::memcpy(static_cast<void *>(t_data), data, ele_size);
|
|
|
|
|
} else {
|
|
|
|
|
#ifdef PADDLE_WITH_CUDA
|
|
|
|
|
platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
|
|
|
|
|
platform::CUDAPlace gpu_place(device_);
|
|
|
|
|
auto *t_data = tensor->mutable_data<T>(gpu_place);
|
|
|
|
|
auto *dev_ctx =
|
|
|
|
|
static_cast<const platform::CUDADeviceContext *>(pool.Get(gpu_place));
|
|
|
|
|
|
|
|
|
|
memory::Copy(gpu_place, static_cast<void *>(t_data), platform::CPUPlace(),
|
|
|
|
|
data, ele_size, dev_ctx->stream());
|
|
|
|
|
#else
|
|
|
|
|
PADDLE_THROW("Not compile with CUDA, should not reach here.");
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
|
void ZeroCopyTensor::copy_to_cpu(T *data) {
|
|
|
|
|
EAGER_GET_TENSOR;
|
|
|
|
|
auto ele_num = tensor->numel();
|
|
|
|
|
auto *t_data = tensor->data<T>();
|
|
|
|
|
auto t_place = tensor->place();
|
|
|
|
|
|
|
|
|
|
if (platform::is_cpu_place(t_place)) {
|
|
|
|
|
std::memcpy(static_cast<void *>(data), t_data, ele_num * sizeof(T));
|
|
|
|
|
} else {
|
|
|
|
|
#ifdef PADDLE_WITH_CUDA
|
|
|
|
|
platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
|
|
|
|
|
auto gpu_place = boost::get<platform::CUDAPlace>(t_place);
|
|
|
|
|
auto *dev_ctx =
|
|
|
|
|
static_cast<const platform::CUDADeviceContext *>(pool.Get(gpu_place));
|
|
|
|
|
memory::Copy(platform::CPUPlace(), static_cast<void *>(data), gpu_place,
|
|
|
|
|
t_data, ele_num * sizeof(T), dev_ctx->stream());
|
|
|
|
|
#else
|
|
|
|
|
PADDLE_THROW("Not compile with CUDA, should not reach here.");
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
template void ZeroCopyTensor::copy_from_cpu<float>(const float *data);
|
|
|
|
|
template void ZeroCopyTensor::copy_from_cpu<int64_t>(const int64_t *data);
|
|
|
|
|
template void ZeroCopyTensor::copy_to_cpu<float>(float *data);
|
|
|
|
|
template void ZeroCopyTensor::copy_to_cpu<int64_t>(int64_t *data);
|
|
|
|
|
|
|
|
|
|
template float *ZeroCopyTensor::data<float>(PaddlePlace *place,
|
|
|
|
|
int *size) const;
|
|
|
|
|
template int64_t *ZeroCopyTensor::data<int64_t>(PaddlePlace *place,
|
|
|
|
@ -92,10 +148,10 @@ void *ZeroCopyTensor::FindTensor() const {
|
|
|
|
|
return tensor;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::vector<int64_t> ZeroCopyTensor::shape() const {
|
|
|
|
|
std::vector<int> ZeroCopyTensor::shape() const {
|
|
|
|
|
EAGER_GET_TENSOR;
|
|
|
|
|
PADDLE_ENFORCE(tensor_, "not found tensor called %s in the scope", name_);
|
|
|
|
|
return framework::vectorize(tensor->dims());
|
|
|
|
|
return framework::vectorize2int(tensor->dims());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void ZeroCopyTensor::SetLoD(const std::vector<std::vector<size_t>> &x) {
|
|
|
|
|