From 2be3a74779935777924d4bb061f98de4ea8272a1 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Mon, 7 Nov 2016 15:06:56 -0800 Subject: [PATCH 1/6] Modified API to use FLAGS_use_gpu as useGpu default value --- paddle/api/Matrix.cpp | 11 +++++++++++ paddle/api/Paddle.swig | 9 ++++++++- paddle/api/PaddleAPI.h | 31 +++++++++++++++++++++++-------- paddle/api/Util.cpp | 2 ++ paddle/api/Vector.cpp | 24 +++++++++++++++++++++++- 5 files changed, 67 insertions(+), 10 deletions(-) diff --git a/paddle/api/Matrix.cpp b/paddle/api/Matrix.cpp index 6a79f83495..f1ff957c6b 100644 --- a/paddle/api/Matrix.cpp +++ b/paddle/api/Matrix.cpp @@ -52,6 +52,17 @@ Matrix* Matrix::createDense(const std::vector& data, size_t height, return m; } +Matrix* Matrix::createDenseFromNumpy(float* data, int dim1, int dim2, + bool copy, bool useGpu) { + if (useGpu) { + /// Gpu mode only supports copy=True + CHECK(copy); + return Matrix::createGpuDenseFromNumpy(data, dim1, dim2); + } else { + return Matrix::createCpuDenseFromNumpy(data, dim1, dim2, copy); + } +} + Matrix* Matrix::createCpuDenseFromNumpy(float* data, int dim1, int dim2, bool copy) { auto m = new Matrix(); diff --git a/paddle/api/Paddle.swig b/paddle/api/Paddle.swig index a09f24ce1c..eaee182b52 100644 --- a/paddle/api/Paddle.swig +++ b/paddle/api/Paddle.swig @@ -133,14 +133,21 @@ namespace std { %newobject Matrix::createZero; %newobject Matrix::createSparse; %newobject Matrix::createDense; +%newobject Matrix::createDenseFromNumpy; +%newobject Matrix::createCpuDenseFromNumpy; +%newobject Matrix::createGpuDenseFromNumpy; %newobject Vector::createZero; %newobject Vector::create; +%newobject Vector::createVectorFromNumpy; %newobject Vector::createCpuVectorFromNumpy; %newobject Vector::createGpuVectorFromNumpy; %newobject IVector::createZero; %newobject IVector::create; +%newobject IVector::createVectorFromNumpy; +%newobject IVector::createCpuVectorFromNumpy; +%newobject IVector::createGpuVectorFromNumpy; %newobject Trainer::createByCommandLine; -%newobject Trainer::getNetworkOutput; +%newobject Trainer::getForwardOutput; %newobject Trainer::getLayerOutput; %newobject Arguments::getSlotValue; %newobject Arguments::getSlotIds; diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index cf790f2f8e..0825260fa1 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -42,6 +42,9 @@ using namespace paddle::enumeration_wrapper; // NOLINT */ void initPaddle(int argc, char** argv); +/// Return FLAGS_use_gpu +bool isUseGpu(); + /// Return true if this py_paddle is compiled in GPU Version bool isGpuVersion(); @@ -101,7 +104,8 @@ public: /** * Create A Matrix with height,width, which is filled by zero. */ - static Matrix* createZero(size_t height, size_t width, bool useGpu = false); + static Matrix* createZero(size_t height, size_t width, + bool useGpu = isUseGpu()); /** * Create Sparse Matrix. @@ -114,7 +118,7 @@ public: */ static Matrix* createSparse(size_t height, size_t width, size_t nnz, bool isNonVal = true, bool trans = false, - bool useGpu = false); + bool useGpu = isUseGpu()); /** * Create Dense Matrix. @@ -123,7 +127,11 @@ public: * @note the value will be copy into a new matrix. */ static Matrix* createDense(const std::vector& data, size_t height, - size_t width, bool useGpu = false); + size_t width, bool useGpu = isUseGpu()); + + static Matrix* createDenseFromNumpy(float* data, int dim1, int dim2, + bool copy = true, + bool useGpu = isUseGpu()); /** * Create Cpu Dense Matrix from numpy matrix, dtype=float32 @@ -221,15 +229,18 @@ public: ~Vector(); /// Create Vector filled with zero. - static Vector* createZero(size_t sz, bool useGpu = false); + static Vector* createZero(size_t sz, bool useGpu = isUseGpu()); /** * Create Vector from list of float. * * It will create a new vector, and copy data into it. */ - static Vector* create(const std::vector& data, bool useGpu = false); + static Vector* create(const std::vector& data, + bool useGpu = isUseGpu()); + static Vector* createVectorFromNumpy(float* data, int dim, bool copy = true, + bool useGpu = isUseGpu()); /** * Create Cpu Vector from numpy array, which dtype=float32 * @@ -279,13 +290,17 @@ class IVector { public: /// Create IVector filled with zero - static IVector* createZero(size_t sz, bool useGpu = false); + static IVector* createZero(size_t sz, bool useGpu = isUseGpu()); /** * Create IVector from list of int. * It will create a new vector, and copy data into it. */ - static IVector* create(const std::vector& data, bool useGpu = false); + static IVector* create(const std::vector& data, + bool useGpu = isUseGpu()); + + static IVector* createVectorFromNumpy(int* data, int dim, bool copy = true, + bool useGpu = isUseGpu()); /** * Create Cpu IVector from numpy array, which dtype=int32 @@ -297,7 +312,7 @@ public: /** * Create Gpu IVector from numpy array, which dtype=int32 */ - static IVector* createGpuVectorFromNumy(int* data, int dim); + static IVector* createGpuVectorFromNumpy(int* data, int dim); /// Cast to numpy array inplace. void toNumpyArrayInplace(int** view_data, int* dim1) throw(UnsupportError); diff --git a/paddle/api/Util.cpp b/paddle/api/Util.cpp index 8a6741078f..f953b322ce 100644 --- a/paddle/api/Util.cpp +++ b/paddle/api/Util.cpp @@ -41,6 +41,8 @@ IntWithFloatArray::IntWithFloatArray(const float* v, const int* i, size_t l, bool f) : valBuf(v), idxBuf(i), length(l), needFree(f) {} +bool isUseGpu() {return FLAGS_use_gpu;} + bool isGpuVersion() { #ifdef PADDLE_ONLY_CPU return false; diff --git a/paddle/api/Vector.cpp b/paddle/api/Vector.cpp index 1affc1a5fe..5abafad9d1 100644 --- a/paddle/api/Vector.cpp +++ b/paddle/api/Vector.cpp @@ -39,6 +39,17 @@ IVector* IVector::create(const std::vector& data, bool useGpu) { return v; } +IVector* IVector::createVectorFromNumpy(int* data, int dim, bool copy, + bool useGpu) { + if (useGpu) { + /// if use gpu only copy=true is supported + CHECK(copy); + return IVector::createGpuVectorFromNumpy(data, dim); + } else { + return IVector::createCpuVectorFromNumpy(data, dim, copy); + } +} + IVector* IVector::createCpuVectorFromNumpy(int* data, int dim, bool copy) { auto v = new IVector(); if (copy) { @@ -50,7 +61,7 @@ IVector* IVector::createCpuVectorFromNumpy(int* data, int dim, bool copy) { return v; } -IVector* IVector::createGpuVectorFromNumy(int* data, int dim) { +IVector* IVector::createGpuVectorFromNumpy(int* data, int dim) { auto v = new IVector(); v->m->vec = paddle::IVector::create(dim, true); v->m->vec->copyFrom(data, dim); @@ -188,6 +199,17 @@ Vector* Vector::createByPaddleVectorPtr(void* ptr) { } } +Vector* Vector::createVectorFromNumpy(float* data, int dim, bool copy, + bool useGpu) { + if (useGpu) { + /// if use gpu only copy=True is supported + CHECK(copy); + return Vector::createGpuVectorFromNumpy(data, dim); + } else { + return Vector::createCpuVectorFromNumpy(data, dim, copy); + } +} + Vector* Vector::createCpuVectorFromNumpy(float* data, int dim, bool copy) { CHECK_GT(dim, 0); auto retVec = new Vector(); From f22573bdafa4554482fa51459e2763b12bea3190 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Tue, 8 Nov 2016 10:21:55 -0800 Subject: [PATCH 2/6] changed to isUsingGpu() in PaddleAPI.h and throw exceptions instead of CHECK --- paddle/api/Matrix.cpp | 8 ++++++-- paddle/api/PaddleAPI.h | 25 ++++++++++++++----------- paddle/api/Util.cpp | 2 +- paddle/api/Vector.cpp | 16 ++++++++++++---- 4 files changed, 33 insertions(+), 18 deletions(-) diff --git a/paddle/api/Matrix.cpp b/paddle/api/Matrix.cpp index f1ff957c6b..6201ce926f 100644 --- a/paddle/api/Matrix.cpp +++ b/paddle/api/Matrix.cpp @@ -53,10 +53,14 @@ Matrix* Matrix::createDense(const std::vector& data, size_t height, } Matrix* Matrix::createDenseFromNumpy(float* data, int dim1, int dim2, - bool copy, bool useGpu) { + bool copy, bool useGpu) + throw (UnsupportError) { if (useGpu) { /// Gpu mode only supports copy=True - CHECK(copy); + if (!copy) { + UnsupportError e; + throw e; + } return Matrix::createGpuDenseFromNumpy(data, dim1, dim2); } else { return Matrix::createCpuDenseFromNumpy(data, dim1, dim2, copy); diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index 0825260fa1..386de6d597 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -43,7 +43,7 @@ using namespace paddle::enumeration_wrapper; // NOLINT void initPaddle(int argc, char** argv); /// Return FLAGS_use_gpu -bool isUseGpu(); +bool isUsingGpu(); /// Return true if this py_paddle is compiled in GPU Version bool isGpuVersion(); @@ -105,7 +105,7 @@ public: * Create A Matrix with height,width, which is filled by zero. */ static Matrix* createZero(size_t height, size_t width, - bool useGpu = isUseGpu()); + bool useGpu = isUsingGpu()); /** * Create Sparse Matrix. @@ -118,7 +118,7 @@ public: */ static Matrix* createSparse(size_t height, size_t width, size_t nnz, bool isNonVal = true, bool trans = false, - bool useGpu = isUseGpu()); + bool useGpu = isUsingGpu()); /** * Create Dense Matrix. @@ -127,11 +127,12 @@ public: * @note the value will be copy into a new matrix. */ static Matrix* createDense(const std::vector& data, size_t height, - size_t width, bool useGpu = isUseGpu()); + size_t width, bool useGpu = isUsingGpu()); static Matrix* createDenseFromNumpy(float* data, int dim1, int dim2, bool copy = true, - bool useGpu = isUseGpu()); + bool useGpu = isUsingGpu()) + throw (UnsupportError) ; /** * Create Cpu Dense Matrix from numpy matrix, dtype=float32 @@ -229,7 +230,7 @@ public: ~Vector(); /// Create Vector filled with zero. - static Vector* createZero(size_t sz, bool useGpu = isUseGpu()); + static Vector* createZero(size_t sz, bool useGpu = isUsingGpu()); /** * Create Vector from list of float. @@ -237,10 +238,11 @@ public: * It will create a new vector, and copy data into it. */ static Vector* create(const std::vector& data, - bool useGpu = isUseGpu()); + bool useGpu = isUsingGpu()); static Vector* createVectorFromNumpy(float* data, int dim, bool copy = true, - bool useGpu = isUseGpu()); + bool useGpu = isUsingGpu()) + throw (UnsupportError) ; /** * Create Cpu Vector from numpy array, which dtype=float32 * @@ -290,17 +292,18 @@ class IVector { public: /// Create IVector filled with zero - static IVector* createZero(size_t sz, bool useGpu = isUseGpu()); + static IVector* createZero(size_t sz, bool useGpu = isUsingGpu()); /** * Create IVector from list of int. * It will create a new vector, and copy data into it. */ static IVector* create(const std::vector& data, - bool useGpu = isUseGpu()); + bool useGpu = isUsingGpu()); static IVector* createVectorFromNumpy(int* data, int dim, bool copy = true, - bool useGpu = isUseGpu()); + bool useGpu = isUsingGpu()) + throw (UnsupportError) ; /** * Create Cpu IVector from numpy array, which dtype=int32 diff --git a/paddle/api/Util.cpp b/paddle/api/Util.cpp index f953b322ce..f72c06aad3 100644 --- a/paddle/api/Util.cpp +++ b/paddle/api/Util.cpp @@ -41,7 +41,7 @@ IntWithFloatArray::IntWithFloatArray(const float* v, const int* i, size_t l, bool f) : valBuf(v), idxBuf(i), length(l), needFree(f) {} -bool isUseGpu() {return FLAGS_use_gpu;} +bool isUsingGpu() {return FLAGS_use_gpu;} bool isGpuVersion() { #ifdef PADDLE_ONLY_CPU diff --git a/paddle/api/Vector.cpp b/paddle/api/Vector.cpp index 5abafad9d1..787cf1c973 100644 --- a/paddle/api/Vector.cpp +++ b/paddle/api/Vector.cpp @@ -40,10 +40,14 @@ IVector* IVector::create(const std::vector& data, bool useGpu) { } IVector* IVector::createVectorFromNumpy(int* data, int dim, bool copy, - bool useGpu) { + bool useGpu) + throw (UnsupportError) { if (useGpu) { /// if use gpu only copy=true is supported - CHECK(copy); + if (!copy) { + UnsupportError e; + throw e; + } return IVector::createGpuVectorFromNumpy(data, dim); } else { return IVector::createCpuVectorFromNumpy(data, dim, copy); @@ -200,10 +204,14 @@ Vector* Vector::createByPaddleVectorPtr(void* ptr) { } Vector* Vector::createVectorFromNumpy(float* data, int dim, bool copy, - bool useGpu) { + bool useGpu) + throw (UnsupportError) { if (useGpu) { /// if use gpu only copy=True is supported - CHECK(copy); + if (!copy) { + UnsupportError e; + throw e; + } return Vector::createGpuVectorFromNumpy(data, dim); } else { return Vector::createCpuVectorFromNumpy(data, dim, copy); From 70fecee080ac91781c6c68bdc9cbe9fdaa0cbe48 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Tue, 8 Nov 2016 14:02:21 -0800 Subject: [PATCH 3/6] add unittest for Matrix and Vector in API --- paddle/api/PaddleAPI.h | 3 +++ paddle/api/Vector.cpp | 15 +++++++++++ paddle/api/test/testMatrix.py | 14 +++++++--- paddle/api/test/testVector.py | 49 ++++++++++++++++++++++++++++++----- 4 files changed, 71 insertions(+), 10 deletions(-) diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index 386de6d597..5df7320136 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -272,6 +272,9 @@ public: /// Return is GPU vector or not. bool isGpu() const; + /// Return a list of float, the memory is alloced and copied. + FloatArray getData() const; + /// __len__ in python size_t getSize() const; diff --git a/paddle/api/Vector.cpp b/paddle/api/Vector.cpp index 787cf1c973..3da7a5c476 100644 --- a/paddle/api/Vector.cpp +++ b/paddle/api/Vector.cpp @@ -267,6 +267,21 @@ void Vector::copyFromNumpyArray(float* data, int dim) { m->vec->copyFrom(data, dim); } +FloatArray Vector::getData() const { + if (this->isGpu()) { + float* src = m->vec->getData(); + size_t len = m->vec->getSize(); + float* dest = new float[len]; + hl_memcpy_device2host(dest, src, len * sizeof(float)); + FloatArray ret_val(dest, len); + ret_val.needFree = true; + return ret_val; + } else { + FloatArray ret_val(m->vec->getData(), m->vec->getSize()); + return ret_val; + } +} + bool Vector::isGpu() const { return std::dynamic_pointer_cast(m->vec) != nullptr; } diff --git a/paddle/api/test/testMatrix.py b/paddle/api/test/testMatrix.py index 11035a9281..6d0d42f340 100644 --- a/paddle/api/test/testMatrix.py +++ b/paddle/api/test/testMatrix.py @@ -42,7 +42,7 @@ class TestMatrix(unittest.TestCase): self.assertEqual(m.getSparseRowCols(2), []) def test_sparse_value(self): - m = swig_paddle.Matrix.createSparse(3, 3, 6, False) + m = swig_paddle.Matrix.createSparse(3, 3, 6, False, False, False) self.assertIsNotNone(m) m.sparseCopyFrom([0, 2, 3, 3], [0, 1, 2], [7.3, 4.2, 3.2]) @@ -66,7 +66,7 @@ class TestMatrix(unittest.TestCase): self.assertIsNotNone(m) self.assertTrue(abs(m.get(1, 1) - 0.5) < 1e-5) - def test_numpy(self): + def test_numpyCpu(self): numpy_mat = np.matrix([[1, 2], [3, 4], [5, 6]], dtype="float32") m = swig_paddle.Matrix.createCpuDenseFromNumpy(numpy_mat) self.assertEqual( @@ -100,8 +100,16 @@ class TestMatrix(unittest.TestCase): for a, e in zip(gpu_m.getData(), [1.0, 3.23, 3.0, 4.0, 5.0, 6.0]): self.assertAlmostEqual(a, e) + + def test_numpy(self): + numpy_mat = np.matrix([[1, 2], [3, 4], [5, 6]], dtype="float32") + m = swig_paddle.Matrix.createDenseFromNumpy(numpy_mat) + self.assertEqual((int(m.getHeight()), int(m.getWidth())), numpy_mat.shape) + self.assertEqual(m.isGpu(), swig_paddle.isUsingGpu()) + for a, e in zip(m.getData(), [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]): + self.assertAlmostEqual(a, e) if __name__ == "__main__": - swig_paddle.initPaddle("--use_gpu=0") + swig_paddle.initPaddle("--use_gpu=1" if swig_paddle.isGpuVersion() else "--use_gpu=0") unittest.main() diff --git a/paddle/api/test/testVector.py b/paddle/api/test/testVector.py index 5226df79ee..4903951414 100644 --- a/paddle/api/test/testVector.py +++ b/paddle/api/test/testVector.py @@ -20,20 +20,28 @@ import unittest class TestIVector(unittest.TestCase): def test_createZero(self): - m = swig_paddle.IVector.createZero(10) + m = swig_paddle.IVector.createZero(10, False) self.assertIsNotNone(m) for i in xrange(10): self.assertEqual(m[i], 0) m[i] = i self.assertEqual(m[i], i) + + m = swig_paddle.IVector.createZero(10) + self.assertEqual(m.isGpu(), swig_paddle.isUsingGpu()) + self.assertEqual(m.getData(), [0]*10) def test_create(self): - m = swig_paddle.IVector.create(range(10)) + m = swig_paddle.IVector.create(range(10), False) self.assertIsNotNone(m) for i in xrange(10): self.assertEqual(m[i], i) + + m = swig_paddle.IVector.create(range(10)) + self.assertEqual(m.isGpu(), swig_paddle.isUsingGpu()) + self.assertEqual(m.getData(), range(10)) - def test_numpy(self): + def test_cpu_numpy(self): vec = np.array([1, 3, 4, 65, 78, 1, 4], dtype="int32") iv = swig_paddle.IVector.createCpuVectorFromNumpy(vec) self.assertEqual(vec.shape[0], int(iv.__len__())) @@ -61,25 +69,43 @@ class TestIVector(unittest.TestCase): expect_vec = range(0, 10) expect_vec[4] = 7 self.assertEqual(vec.getData(), expect_vec) + + def test_numpy(self): + vec = np.array([1, 3, 4, 65, 78, 1, 4], dtype="int32") + iv = swig_paddle.IVector.createVectorFromNumpy(vec) + self.assertEqual(iv.isGpu(), swig_paddle.isUsingGpu()) + self.assertEqual(iv.getData(), list(vec)) class TestVector(unittest.TestCase): def testCreateZero(self): - v = swig_paddle.Vector.createZero(10) + v = swig_paddle.Vector.createZero(10, False) self.assertIsNotNone(v) for i in xrange(len(v)): self.assertTrue(util.doubleEqual(v[i], 0)) v[i] = i self.assertTrue(util.doubleEqual(v[i], i)) + + v = swig_paddle.Vector.createZero(10) + self.assertEqual(v.isGpu(), swig_paddle.isUsingGpu()) + self.assertEqual(v.getData(), [0]*10) def testCreate(self): - v = swig_paddle.Vector.create([x / 100.0 for x in xrange(100)]) + v = swig_paddle.Vector.create([x / 100.0 for x in xrange(100)], False) self.assertIsNotNone(v) for i in xrange(len(v)): self.assertTrue(util.doubleEqual(v[i], i / 100.0)) self.assertEqual(100, len(v)) + + v = swig_paddle.Vector.create([x / 100.0 for x in xrange(100)]) + self.assertEqual(v.isGpu(), swig_paddle.isUsingGpu()) + self.assertEqual(100, len(v)) + vdata = v.getData() + for i in xrange(len(v)): + self.assertTrue(util.doubleEqual(vdata[i], i / 100.0)) + - def testNumpy(self): + def testCpuNumpy(self): numpy_arr = np.array([1.2, 2.3, 3.4, 4.5], dtype="float32") vec = swig_paddle.Vector.createCpuVectorFromNumpy(numpy_arr) assert isinstance(vec, swig_paddle.Vector) @@ -102,9 +128,18 @@ class TestVector(unittest.TestCase): for i in xrange(1, len(numpy_3)): util.doubleEqual(numpy_3[i], vec[i]) + + def testNumpy(self): + numpy_arr = np.array([1.2, 2.3, 3.4, 4.5], dtype="float32") + vec = swig_paddle.Vector.createVectorFromNumpy(numpy_arr) + self.assertEqual(vec.isGpu(), swig_paddle.isUsingGpu()) + vecData = vec.getData() + for n, v in zip(numpy_arr, vecData): + self.assertTrue(util.doubleEqual(n, v)) + def testCopyFromNumpy(self): - vec = swig_paddle.Vector.createZero(1) + vec = swig_paddle.Vector.createZero(1, False) arr = np.array([1.3, 3.2, 2.4], dtype="float32") vec.copyFromNumpyArray(arr) for i in xrange(len(vec)): From 91e6dcb68f19b339c526a90672122dad54789fe5 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Tue, 8 Nov 2016 15:32:21 -0800 Subject: [PATCH 4/6] fixed a bug in Paddle::Vector::createCpuVectorFromNumpy --- paddle/api/Vector.cpp | 2 +- paddle/api/test/testVector.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/paddle/api/Vector.cpp b/paddle/api/Vector.cpp index 3da7a5c476..b8b1b2d2f1 100644 --- a/paddle/api/Vector.cpp +++ b/paddle/api/Vector.cpp @@ -223,7 +223,7 @@ Vector* Vector::createCpuVectorFromNumpy(float* data, int dim, bool copy) { auto retVec = new Vector(); if (copy) { retVec->m->vec = paddle::Vector::create((size_t)dim, false); - return retVec; + retVec->m->vec->copyFrom(data, dim); } else { retVec->m->vec = paddle::Vector::create(data, (size_t)dim, false); } diff --git a/paddle/api/test/testVector.py b/paddle/api/test/testVector.py index 4903951414..5ca4d90dee 100644 --- a/paddle/api/test/testVector.py +++ b/paddle/api/test/testVector.py @@ -150,3 +150,4 @@ if __name__ == '__main__': swig_paddle.initPaddle("--use_gpu=1" if swig_paddle.isGpuVersion() else "--use_gpu=0") unittest.main() + From b207535198d218e49508752cdc8ae9d4221d78d4 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Fri, 11 Nov 2016 11:21:20 -0800 Subject: [PATCH 5/6] Add setUseGpu in PaddleAPI.h and handle UnsupportedError in swig with meaningful message displayed --- paddle/api/Matrix.cpp | 6 ++---- paddle/api/Paddle.swig | 14 ++++++++++++++ paddle/api/PaddleAPI.h | 19 ++++++++++++------- paddle/api/Util.cpp | 2 ++ paddle/api/Vector.cpp | 12 ++++-------- paddle/api/test/testMatrix.py | 8 ++++++-- paddle/api/test/testVector.py | 10 ++++++---- 7 files changed, 46 insertions(+), 25 deletions(-) diff --git a/paddle/api/Matrix.cpp b/paddle/api/Matrix.cpp index 6201ce926f..0c8d2935a0 100644 --- a/paddle/api/Matrix.cpp +++ b/paddle/api/Matrix.cpp @@ -53,13 +53,11 @@ Matrix* Matrix::createDense(const std::vector& data, size_t height, } Matrix* Matrix::createDenseFromNumpy(float* data, int dim1, int dim2, - bool copy, bool useGpu) - throw (UnsupportError) { + bool copy, bool useGpu) { if (useGpu) { /// Gpu mode only supports copy=True if (!copy) { - UnsupportError e; - throw e; + throw UnsupportError("Gpu mode only supports copy=True"); } return Matrix::createGpuDenseFromNumpy(data, dim1, dim2); } else { diff --git a/paddle/api/Paddle.swig b/paddle/api/Paddle.swig index eaee182b52..e723a669f3 100644 --- a/paddle/api/Paddle.swig +++ b/paddle/api/Paddle.swig @@ -4,6 +4,20 @@ #define SWIG_FILE_WITH_INIT #include "api/PaddleAPI.h" %} + +%include "exception.i" +%exception{ + try{ + $action + } + catch(UnsupportError &ex ){ + SWIG_exception(SWIG_RuntimeError, ex.what()); + } + catch( ... ){ + SWIG_fail; + } +} + %include "std_vector.i" %include "std_pair.i" #ifdef SWIGPYTHON diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index 5df7320136..807519e739 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -18,6 +18,7 @@ limitations under the License. */ #include #include #include +#include #include #include "paddle/utils/GlobalConstants.h" #include "paddle/utils/TypeDefs.h" @@ -45,6 +46,9 @@ void initPaddle(int argc, char** argv); /// Return FLAGS_use_gpu bool isUsingGpu(); +/// Set the Flags_use_gpu to the given parameter +void setUseGpu(bool useGpu); + /// Return true if this py_paddle is compiled in GPU Version bool isGpuVersion(); @@ -55,7 +59,11 @@ class IOError {}; class RangeError {}; /// Not support Error, such as access GPU memory directly, etc. -class UnsupportError {}; +class UnsupportError : public std::runtime_error { +public: + UnsupportError() : std::runtime_error(" ") {}; + UnsupportError(const std::string& message) : std::runtime_error(message) {}; +}; /// This type will map to python's list of float. struct FloatArray { @@ -131,8 +139,7 @@ public: static Matrix* createDenseFromNumpy(float* data, int dim1, int dim2, bool copy = true, - bool useGpu = isUsingGpu()) - throw (UnsupportError) ; + bool useGpu = isUsingGpu()); /** * Create Cpu Dense Matrix from numpy matrix, dtype=float32 @@ -241,8 +248,7 @@ public: bool useGpu = isUsingGpu()); static Vector* createVectorFromNumpy(float* data, int dim, bool copy = true, - bool useGpu = isUsingGpu()) - throw (UnsupportError) ; + bool useGpu = isUsingGpu()); /** * Create Cpu Vector from numpy array, which dtype=float32 * @@ -305,8 +311,7 @@ public: bool useGpu = isUsingGpu()); static IVector* createVectorFromNumpy(int* data, int dim, bool copy = true, - bool useGpu = isUsingGpu()) - throw (UnsupportError) ; + bool useGpu = isUsingGpu()); /** * Create Cpu IVector from numpy array, which dtype=int32 diff --git a/paddle/api/Util.cpp b/paddle/api/Util.cpp index f72c06aad3..a8932351a6 100644 --- a/paddle/api/Util.cpp +++ b/paddle/api/Util.cpp @@ -43,6 +43,8 @@ IntWithFloatArray::IntWithFloatArray(const float* v, const int* i, size_t l, bool isUsingGpu() {return FLAGS_use_gpu;} +void setUseGpu(bool useGpu) {FLAGS_use_gpu = useGpu;} + bool isGpuVersion() { #ifdef PADDLE_ONLY_CPU return false; diff --git a/paddle/api/Vector.cpp b/paddle/api/Vector.cpp index b8b1b2d2f1..547be27ed5 100644 --- a/paddle/api/Vector.cpp +++ b/paddle/api/Vector.cpp @@ -40,13 +40,11 @@ IVector* IVector::create(const std::vector& data, bool useGpu) { } IVector* IVector::createVectorFromNumpy(int* data, int dim, bool copy, - bool useGpu) - throw (UnsupportError) { + bool useGpu) { if (useGpu) { /// if use gpu only copy=true is supported if (!copy) { - UnsupportError e; - throw e; + throw UnsupportError("Gpu mode only supports copy=True"); } return IVector::createGpuVectorFromNumpy(data, dim); } else { @@ -204,13 +202,11 @@ Vector* Vector::createByPaddleVectorPtr(void* ptr) { } Vector* Vector::createVectorFromNumpy(float* data, int dim, bool copy, - bool useGpu) - throw (UnsupportError) { + bool useGpu) { if (useGpu) { /// if use gpu only copy=True is supported if (!copy) { - UnsupportError e; - throw e; + throw UnsupportError("Gpu mode only supports copy=True"); } return Vector::createGpuVectorFromNumpy(data, dim); } else { diff --git a/paddle/api/test/testMatrix.py b/paddle/api/test/testMatrix.py index 6d0d42f340..87cedd607c 100644 --- a/paddle/api/test/testMatrix.py +++ b/paddle/api/test/testMatrix.py @@ -111,5 +111,9 @@ class TestMatrix(unittest.TestCase): if __name__ == "__main__": - swig_paddle.initPaddle("--use_gpu=1" if swig_paddle.isGpuVersion() else "--use_gpu=0") - unittest.main() + swig_paddle.initPaddle("--use_gpu=0") + suite = unittest.TestLoader().loadTestsFromTestCase(TestMatrix) + unittest.TextTestRunner().run(suite) + if swig_paddle.isGpuVersion(): + swig_paddle.setUseGpu(True) + unittest.main() diff --git a/paddle/api/test/testVector.py b/paddle/api/test/testVector.py index 5ca4d90dee..48aaa1d73d 100644 --- a/paddle/api/test/testVector.py +++ b/paddle/api/test/testVector.py @@ -147,7 +147,9 @@ class TestVector(unittest.TestCase): if __name__ == '__main__': - swig_paddle.initPaddle("--use_gpu=1" - if swig_paddle.isGpuVersion() else "--use_gpu=0") - unittest.main() - + swig_paddle.initPaddle("--use_gpu=0") + suite = unittest.TestLoader().loadTestsFromTestCase(TestVector) + unittest.TextTestRunner().run(suite) + if swig_paddle.isGpuVersion(): + swig_paddle.setUseGpu(True) + unittest.main() \ No newline at end of file From 4c86285a996a9be52c63eeef9d917cf18b78302b Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Fri, 11 Nov 2016 13:44:16 -0800 Subject: [PATCH 6/6] modifed Paddle.swig to specially handle UnsupportError only --- paddle/api/Matrix.cpp | 3 ++- paddle/api/Paddle.swig | 15 ++++----------- paddle/api/PaddleAPI.h | 9 ++++++--- paddle/api/Vector.cpp | 4 ++-- 4 files changed, 14 insertions(+), 17 deletions(-) diff --git a/paddle/api/Matrix.cpp b/paddle/api/Matrix.cpp index 0c8d2935a0..e5493a381a 100644 --- a/paddle/api/Matrix.cpp +++ b/paddle/api/Matrix.cpp @@ -53,7 +53,8 @@ Matrix* Matrix::createDense(const std::vector& data, size_t height, } Matrix* Matrix::createDenseFromNumpy(float* data, int dim1, int dim2, - bool copy, bool useGpu) { + bool copy, bool useGpu) + throw (UnsupportError) { if (useGpu) { /// Gpu mode only supports copy=True if (!copy) { diff --git a/paddle/api/Paddle.swig b/paddle/api/Paddle.swig index e723a669f3..6a0fbc537d 100644 --- a/paddle/api/Paddle.swig +++ b/paddle/api/Paddle.swig @@ -6,17 +6,10 @@ %} %include "exception.i" -%exception{ - try{ - $action - } - catch(UnsupportError &ex ){ - SWIG_exception(SWIG_RuntimeError, ex.what()); - } - catch( ... ){ - SWIG_fail; - } -} +%typemap(throws) UnsupportError %{ + SWIG_exception(SWIG_RuntimeError, $1.what()); + SWIG_fail; +%} %include "std_vector.i" %include "std_pair.i" diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index 807519e739..5688ece44d 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -139,7 +139,8 @@ public: static Matrix* createDenseFromNumpy(float* data, int dim1, int dim2, bool copy = true, - bool useGpu = isUsingGpu()); + bool useGpu = isUsingGpu()) + throw (UnsupportError); /** * Create Cpu Dense Matrix from numpy matrix, dtype=float32 @@ -248,7 +249,8 @@ public: bool useGpu = isUsingGpu()); static Vector* createVectorFromNumpy(float* data, int dim, bool copy = true, - bool useGpu = isUsingGpu()); + bool useGpu = isUsingGpu()) + throw (UnsupportError); /** * Create Cpu Vector from numpy array, which dtype=float32 * @@ -311,7 +313,8 @@ public: bool useGpu = isUsingGpu()); static IVector* createVectorFromNumpy(int* data, int dim, bool copy = true, - bool useGpu = isUsingGpu()); + bool useGpu = isUsingGpu()) + throw (UnsupportError); /** * Create Cpu IVector from numpy array, which dtype=int32 diff --git a/paddle/api/Vector.cpp b/paddle/api/Vector.cpp index 547be27ed5..d44cdefc35 100644 --- a/paddle/api/Vector.cpp +++ b/paddle/api/Vector.cpp @@ -40,7 +40,7 @@ IVector* IVector::create(const std::vector& data, bool useGpu) { } IVector* IVector::createVectorFromNumpy(int* data, int dim, bool copy, - bool useGpu) { + bool useGpu) throw (UnsupportError){ if (useGpu) { /// if use gpu only copy=true is supported if (!copy) { @@ -202,7 +202,7 @@ Vector* Vector::createByPaddleVectorPtr(void* ptr) { } Vector* Vector::createVectorFromNumpy(float* data, int dim, bool copy, - bool useGpu) { + bool useGpu) throw (UnsupportError){ if (useGpu) { /// if use gpu only copy=True is supported if (!copy) {