commit
						c85a323672
					
				@ -0,0 +1,95 @@
 | 
				
			||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
 | 
				
			||||
 | 
				
			||||
   Licensed under the Apache License, Version 2.0 (the "License");
 | 
				
			||||
   you may not use this file except in compliance with the License.
 | 
				
			||||
   You may obtain a copy of the License at
 | 
				
			||||
 | 
				
			||||
   http://www.apache.org/licenses/LICENSE-2.0
 | 
				
			||||
 | 
				
			||||
   Unless required by applicable law or agreed to in writing, software
 | 
				
			||||
   distributed under the License is distributed on an "AS IS" BASIS,
 | 
				
			||||
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
				
			||||
   See the License for the specific language governing permissions and
 | 
				
			||||
   limitations under the License. */
 | 
				
			||||
 | 
				
			||||
#pragma once
 | 
				
			||||
#include <paddle/framework/tensor.h>
 | 
				
			||||
#include <pybind11/numpy.h>
 | 
				
			||||
#include <pybind11/pybind11.h>
 | 
				
			||||
 | 
				
			||||
namespace py = pybind11;
 | 
				
			||||
 | 
				
			||||
namespace paddle {
 | 
				
			||||
 | 
				
			||||
namespace pybind {
 | 
				
			||||
 | 
				
			||||
namespace details {
 | 
				
			||||
 | 
				
			||||
template <bool less, size_t I, typename... ARGS>
 | 
				
			||||
struct CastToPyBufferImpl;
 | 
				
			||||
 | 
				
			||||
template <size_t I, typename... ARGS>
 | 
				
			||||
struct CastToPyBufferImpl<false, I, ARGS...> {
 | 
				
			||||
  py::buffer_info operator()(framework::Tensor &tensor) {
 | 
				
			||||
    PADDLE_THROW("This type of tensor cannot be expose to Python");
 | 
				
			||||
    return py::buffer_info();
 | 
				
			||||
  }
 | 
				
			||||
};
 | 
				
			||||
 | 
				
			||||
template <size_t I, typename... ARGS>
 | 
				
			||||
struct CastToPyBufferImpl<true, I, ARGS...> {
 | 
				
			||||
  using CUR_TYPE = typename std::tuple_element<I, std::tuple<ARGS...>>::type;
 | 
				
			||||
  py::buffer_info operator()(framework::Tensor &tensor) {
 | 
				
			||||
    PADDLE_ENFORCE(paddle::platform::is_cpu_place(tensor.holder_->place()),
 | 
				
			||||
                   "Only CPU tensor can cast to numpy array");
 | 
				
			||||
 | 
				
			||||
    if (std::type_index(typeid(CUR_TYPE)) == tensor.holder_->type()) {
 | 
				
			||||
      auto dim_vec = framework::vectorize(tensor.dims());
 | 
				
			||||
      std::vector<size_t> dims_outside;
 | 
				
			||||
      std::vector<size_t> strides;
 | 
				
			||||
      dims_outside.resize(dim_vec.size());
 | 
				
			||||
      strides.resize(dim_vec.size());
 | 
				
			||||
 | 
				
			||||
      size_t prod = 1;
 | 
				
			||||
      for (size_t i = dim_vec.size(); i != 0; --i) {
 | 
				
			||||
        dims_outside[i - 1] = (size_t)dim_vec[i - 1];
 | 
				
			||||
        strides[i - 1] = sizeof(CUR_TYPE) * prod;
 | 
				
			||||
        prod *= dims_outside[i - 1];
 | 
				
			||||
      }
 | 
				
			||||
 | 
				
			||||
      return py::buffer_info(
 | 
				
			||||
          tensor.mutable_data<CUR_TYPE>(tensor.holder_->place()),
 | 
				
			||||
          sizeof(CUR_TYPE),
 | 
				
			||||
          py::format_descriptor<CUR_TYPE>::format(),
 | 
				
			||||
          (size_t)framework::arity(tensor.dims()),
 | 
				
			||||
          dims_outside,
 | 
				
			||||
          strides);
 | 
				
			||||
    } else {
 | 
				
			||||
      constexpr bool less = I + 1 < std::tuple_size<std::tuple<ARGS...>>::value;
 | 
				
			||||
      return CastToPyBufferImpl<less, I + 1, ARGS...>()(tensor);
 | 
				
			||||
    }
 | 
				
			||||
  }
 | 
				
			||||
};
 | 
				
			||||
}  // namespace details
 | 
				
			||||
inline py::buffer_info CastToPyBuffer(framework::Tensor &tensor) {
 | 
				
			||||
  auto buffer_info = details::CastToPyBufferImpl<true, 0, float, int>()(tensor);
 | 
				
			||||
  return buffer_info;
 | 
				
			||||
}
 | 
				
			||||
 | 
				
			||||
template <typename T>
 | 
				
			||||
void PyTensorSetFromArray(
 | 
				
			||||
    framework::Tensor &self,
 | 
				
			||||
    py::array_t<T, py::array::c_style | py::array::forcecast> array) {
 | 
				
			||||
  std::vector<int> dims;
 | 
				
			||||
  dims.reserve(array.ndim());
 | 
				
			||||
  for (size_t i = 0; i < array.ndim(); ++i) {
 | 
				
			||||
    dims.push_back((int)array.shape()[i]);
 | 
				
			||||
  }
 | 
				
			||||
 | 
				
			||||
  self.set_dims(framework::make_ddim(dims));
 | 
				
			||||
  auto *dst = self.mutable_data<T>(paddle::platform::CPUPlace());
 | 
				
			||||
  std::memcpy(dst, array.data(), sizeof(T) * array.size());
 | 
				
			||||
}
 | 
				
			||||
 | 
				
			||||
}  // namespace pybind
 | 
				
			||||
}  // namespace paddle
 | 
				
			||||
@ -1,2 +1,3 @@
 | 
				
			||||
add_python_test(test_framework test_protobuf.py test_scope.py
 | 
				
			||||
    test_default_scope_funcs.py test_op_creation_methods.py)
 | 
				
			||||
    test_default_scope_funcs.py test_op_creation_methods.py
 | 
				
			||||
    test_tensor.py)
 | 
				
			||||
 | 
				
			||||
@ -0,0 +1,45 @@
 | 
				
			||||
import paddle.v2.framework.core as core
 | 
				
			||||
import unittest
 | 
				
			||||
import numpy
 | 
				
			||||
 | 
				
			||||
 | 
				
			||||
class TestScope(unittest.TestCase):
 | 
				
			||||
    def test_int_tensor(self):
 | 
				
			||||
        scope = core.Scope(None)
 | 
				
			||||
        var = scope.create_var("test_tensor")
 | 
				
			||||
        tensor = var.get_tensor()
 | 
				
			||||
 | 
				
			||||
        tensor.set_dims([1000, 784])
 | 
				
			||||
        tensor.alloc_int()
 | 
				
			||||
 | 
				
			||||
        tensor_array = numpy.array(tensor)
 | 
				
			||||
        self.assertEqual((1000, 784), tensor_array.shape)
 | 
				
			||||
        tensor_array[3, 9] = 1
 | 
				
			||||
        tensor_array[19, 11] = 2
 | 
				
			||||
        tensor.set(tensor_array)
 | 
				
			||||
 | 
				
			||||
        tensor_array_2 = numpy.array(tensor)
 | 
				
			||||
        self.assertEqual(1.0, tensor_array_2[3, 9])
 | 
				
			||||
        self.assertEqual(2.0, tensor_array_2[19, 11])
 | 
				
			||||
 | 
				
			||||
    def test_float_tensor(self):
 | 
				
			||||
        scope = core.Scope(None)
 | 
				
			||||
        var = scope.create_var("test_tensor")
 | 
				
			||||
        tensor = var.get_tensor()
 | 
				
			||||
 | 
				
			||||
        tensor.set_dims([1000, 784])
 | 
				
			||||
        tensor.alloc_float()
 | 
				
			||||
 | 
				
			||||
        tensor_array = numpy.array(tensor)
 | 
				
			||||
        self.assertEqual((1000, 784), tensor_array.shape)
 | 
				
			||||
        tensor_array[3, 9] = 1.0
 | 
				
			||||
        tensor_array[19, 11] = 2.0
 | 
				
			||||
        tensor.set(tensor_array)
 | 
				
			||||
 | 
				
			||||
        tensor_array_2 = numpy.array(tensor)
 | 
				
			||||
        self.assertAlmostEqual(1.0, tensor_array_2[3, 9])
 | 
				
			||||
        self.assertAlmostEqual(2.0, tensor_array_2[19, 11])
 | 
				
			||||
 | 
				
			||||
 | 
				
			||||
if __name__ == '__main__':
 | 
				
			||||
    unittest.main()
 | 
				
			||||
					Loading…
					
					
				
		Reference in new issue