From 3b2e8fc8f3d7e81adbef3cd47e848a32ff4fc7f0 Mon Sep 17 00:00:00 2001 From: liaogang Date: Mon, 7 Aug 2017 14:11:28 +0800 Subject: [PATCH 1/3] Accelerate CI process under android environment --- paddle/scripts/docker/build_android.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/scripts/docker/build_android.sh b/paddle/scripts/docker/build_android.sh index 56d290be4a..5584e29e2a 100644 --- a/paddle/scripts/docker/build_android.sh +++ b/paddle/scripts/docker/build_android.sh @@ -20,4 +20,4 @@ cmake -DCMAKE_SYSTEM_NAME=Android \ -DWITH_SWIG_PY=OFF \ .. make -j `nproc` -make install +make install -j `nproc` From 36ac89b9c4ba2662eea633d9bd1d8e492b6b1b72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=AD=A6=E6=AF=85?= Date: Mon, 7 Aug 2017 19:31:10 +0800 Subject: [PATCH 2/3] Put everything in paddle python wheel package enable pip install paddle (#3102) * put everything in paddle wheel * update * update * fix unitest * with platform specs --- paddle/CMakeLists.txt | 2 -- paddle/api/CMakeLists.txt | 10 ++-------- paddle/scripts/CMakeLists.txt | 14 ++++++-------- paddle/scripts/submit_local.sh.in | 0 paddle/setup.py.in | 32 ------------------------------- python/CMakeLists.txt | 2 +- python/setup.py.in | 29 +++++++++++++++++++++------- 7 files changed, 31 insertions(+), 58 deletions(-) mode change 100644 => 100755 paddle/scripts/submit_local.sh.in delete mode 100644 paddle/setup.py.in diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index f8a88cf317..cf61a243e9 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -22,7 +22,5 @@ if(WITH_C_API) endif() if(WITH_SWIG_PY) - configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in - ${CMAKE_CURRENT_SOURCE_DIR}/setup.py) add_subdirectory(api) endif() diff --git a/paddle/api/CMakeLists.txt b/paddle/api/CMakeLists.txt index 84da89a142..7a1e8b8b26 100644 --- a/paddle/api/CMakeLists.txt +++ b/paddle/api/CMakeLists.txt @@ -82,9 +82,7 @@ SWIG_LINK_LIBRARIES(swig_paddle add_custom_command(OUTPUT ${PROJ_ROOT}/paddle/py_paddle/_swig_paddle.so COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/swig_paddle.py ${PROJ_ROOT}/paddle/py_paddle COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/_swig_paddle.so ${PROJ_ROOT}/paddle/py_paddle - COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel - COMMAND ${CMAKE_COMMAND} -E touch dist/.timestamp - COMMAND rm -rf py_paddle.egg-info build + COMMAND ${CMAKE_COMMAND} -E touch .timestamp WORKING_DIRECTORY ${PROJ_ROOT}/paddle DEPENDS _swig_paddle ) @@ -92,10 +90,6 @@ add_custom_command(OUTPUT ${PROJ_ROOT}/paddle/py_paddle/_swig_paddle.so # TODO(yuyang18) : make wheel name calculated by cmake add_custom_target(python_api_wheel ALL DEPENDS ${PROJ_ROOT}/paddle/py_paddle/_swig_paddle.so) -install(DIRECTORY ${CMAKE_SOURCE_DIR}/paddle/dist/ - DESTINATION opt/paddle/share/wheels -) - if(WITH_TESTING) IF(NOT PY_PIP_FOUND) SET(PIP_SOURCES_DIR ${PYTHON_SOURCES_DIR}/pip) @@ -108,7 +102,7 @@ if(WITH_TESTING) BUILD_COMMAND "" INSTALL_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install BUILD_IN_SOURCE 1 - DEPENDS python setuptools python_api_wheel + #DEPENDS python setuptools python_api_wheel ) ENDIF() add_subdirectory(test) diff --git a/paddle/scripts/CMakeLists.txt b/paddle/scripts/CMakeLists.txt index 66a46e1883..a52f06fe49 100644 --- a/paddle/scripts/CMakeLists.txt +++ b/paddle/scripts/CMakeLists.txt @@ -1,17 +1,15 @@ configure_file(submit_local.sh.in - submit_local.sh + paddle @ONLY) -install(FILES ${CMAKE_CURRENT_BINARY_DIR}/submit_local.sh DESTINATION bin +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/paddle DESTINATION bin PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ - GROUP_EXECUTE GROUP_READ WORLD_EXECUTE WORLD_READ - RENAME paddle) + GROUP_EXECUTE GROUP_READ WORLD_EXECUTE WORLD_READ) configure_file(tools/usage_stat/usage.sh - usage.sh + paddle_usage @ONLY) -install(FILES ${CMAKE_CURRENT_BINARY_DIR}/usage.sh DESTINATION opt/paddle/bin +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/paddle_usage DESTINATION opt/paddle/bin PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ - GROUP_EXECUTE GROUP_READ WORLD_EXECUTE WORLD_READ - RENAME paddle_usage) + GROUP_EXECUTE GROUP_READ WORLD_EXECUTE WORLD_READ) diff --git a/paddle/scripts/submit_local.sh.in b/paddle/scripts/submit_local.sh.in old mode 100644 new mode 100755 diff --git a/paddle/setup.py.in b/paddle/setup.py.in deleted file mode 100644 index af107e7672..0000000000 --- a/paddle/setup.py.in +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from setuptools import setup, Extension - -setup(name="py_paddle", - version="${PADDLE_VERSION}", - packages=['py_paddle'], - include_package_data=True, - package_data={'py_paddle':['*.py','_swig_paddle.so']}, - install_requires = [ - 'nltk>=3.2.2', - # We use `numpy.flip` in `test_image.py`. - # `numpy.flip` is introduced in `1.12.0` - 'numpy>=1.12.0', # The numpy is required. - 'protobuf==${PROTOBUF_VERSION}' # The paddle protobuf version - ], - url='http://www.paddlepaddle.org/', - license='Apache 2.0', -) diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 0171f9d8cc..b5030da8e7 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -39,7 +39,7 @@ add_custom_command(OUTPUT ${OUTPUT_DIR}/.timestamp DEPENDS gen_proto_py copy_paddle_pybind framework_py_proto ${PY_FILES} ${external_project_dependencies} ${COPY_PADDLE_MASTER}) add_custom_target(paddle_python ALL DEPENDS - ${OUTPUT_DIR}/.timestamp) + ${OUTPUT_DIR}/.timestamp paddle_pserver_main paddle_trainer paddle_merge_model python_api_wheel) set(PADDLE_PYTHON_PACKAGE_DIR ${CMAKE_CURRENT_BINARY_DIR}/dist/) diff --git a/python/setup.py.in b/python/setup.py.in index 7808238aa6..38f0a503be 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -1,4 +1,8 @@ -from setuptools import setup +from setuptools import setup, Distribution + +class BinaryDistribution(Distribution): + def has_ext_modules(foo): + return True packages=['paddle', 'paddle.proto', @@ -11,7 +15,8 @@ packages=['paddle', 'paddle.v2.master', 'paddle.v2.plot', 'paddle.v2.framework', - 'paddle.v2.framework.proto'] + 'paddle.v2.framework.proto', + 'py_paddle'] setup_requires=["requests", "numpy>=1.12", @@ -21,23 +26,33 @@ setup_requires=["requests", "rarfile", "scipy>=0.19.0", "Pillow", - "nltk"] + "nltk>=3.2.2"] if '${CMAKE_SYSTEM_PROCESSOR}' not in ['arm', 'armv7-a', 'aarch64']: setup_requires+=["opencv-python"] -setup(name='paddle', +setup(name='paddlepaddle', version='${PADDLE_VERSION}', description='Parallel Distributed Deep Learning', install_requires=setup_requires, packages=packages, - package_data={'paddle.v2.master': ['libpaddle_master.so'], - 'paddle.v2.framework': ['core.so'] + package_data={ + 'paddle.v2.master': ['libpaddle_master.so'], + 'paddle.v2.framework': ['core.so'], + 'py_paddle':['*.py','_swig_paddle.so'] }, package_dir={ '': '${CMAKE_CURRENT_SOURCE_DIR}', # The paddle.v2.framework.proto will be generated while compiling. # So that package points to other directory. - 'paddle.v2.framework.proto': '${PROJ_BINARY_ROOT}/paddle/framework' + 'paddle.v2.framework.proto': '${PROJ_BINARY_ROOT}/paddle/framework', + 'py_paddle': '${PROJ_ROOT}/paddle/py_paddle' }, + scripts=['${PROJ_BINARY_ROOT}/paddle/scripts/paddle'], + distclass=BinaryDistribution, + data_files=[('/usr/local/opt/paddle/bin', + ['${PROJ_BINARY_ROOT}/paddle/scripts/paddle_usage', + '${PROJ_BINARY_ROOT}/paddle/trainer/paddle_trainer', + '${PROJ_BINARY_ROOT}/paddle/trainer/paddle_merge_model', + '${PROJ_BINARY_ROOT}/paddle/pserver/paddle_pserver_main'])] ) From 493396d81cbcd1e29b5ea6c3aa11cfa20496b773 Mon Sep 17 00:00:00 2001 From: Qiao Longfei Date: Mon, 7 Aug 2017 19:50:37 +0800 Subject: [PATCH 3/3] add support_gpu (#3304) * add support_gpu * fix allclose * fix name error and symplify code --- paddle/framework/op_registry.h | 6 ++++++ paddle/framework/pybind.cc | 2 ++ python/paddle/v2/framework/tests/op_test_util.py | 6 ++++-- 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 6c26183818..b2813da83d 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -260,6 +260,12 @@ class OpRegistry { return CreateOp(op_desc.type(), inputs, outputs, attrs); } + static bool SupportGPU(const std::string& op_type) { + OperatorWithKernel::OpKernelKey key; + key.place_ = platform::GPUPlace(); + return OperatorWithKernel::AllOpKernels().at(op_type).count(key) != 0; + } + static std::shared_ptr CreateGradOp(const OperatorBase& op) { PADDLE_ENFORCE(!op.IsNetOp(), "Use framework::Backward to get backward ops"); diff --git a/paddle/framework/pybind.cc b/paddle/framework/pybind.cc index cbb86c4195..d4ac8fda54 100644 --- a/paddle/framework/pybind.cc +++ b/paddle/framework/pybind.cc @@ -200,6 +200,8 @@ All parameter, weight, gradient are variables in Paddle. return OpRegistry::CreateOp(desc); }); + operator_base.def_static("support_gpu", &OpRegistry::SupportGPU); + operator_base.def("backward", [](const OperatorBase &forwardOp, const std::unordered_set &no_grad_vars) { diff --git a/python/paddle/v2/framework/tests/op_test_util.py b/python/paddle/v2/framework/tests/op_test_util.py index e6bc7d8a9b..636828064f 100644 --- a/python/paddle/v2/framework/tests/op_test_util.py +++ b/python/paddle/v2/framework/tests/op_test_util.py @@ -28,7 +28,7 @@ class OpTestMeta(type): kwargs = dict() places = [] places.append(core.CPUPlace()) - if core.is_compile_gpu(): + if core.is_compile_gpu() and core.Operator.support_gpu(self.type): places.append(core.GPUPlace(0)) for place in places: @@ -66,7 +66,9 @@ class OpTestMeta(type): for out_name in func.all_output_args: actual = numpy.array(scope.find_var(out_name).get_tensor()) expect = self.outputs[out_name] - numpy.isclose(actual, expect) + self.assertTrue( + numpy.allclose(actual, expect), + "output name: " + out_name + "has diff") obj.test_all = test_all return obj