Enhance cuda code & unittest for argsort_op

analysis/code-clean
Yibing Liu 7 years ago
commit 9386ac0a40

@ -4,7 +4,6 @@
[![Build Status](https://travis-ci.org/PaddlePaddle/Paddle.svg?branch=develop)](https://travis-ci.org/PaddlePaddle/Paddle)
[![Documentation Status](https://img.shields.io/badge/docs-latest-brightgreen.svg?style=flat)](http://www.paddlepaddle.org/docs/develop/documentation/en/getstarted/index_en.html)
[![Documentation Status](https://img.shields.io/badge/中文文档-最新-brightgreen.svg)](http://www.paddlepaddle.org/docs/develop/documentation/zh/getstarted/index_cn.html)
[![Coverage Status](https://coveralls.io/repos/github/PaddlePaddle/Paddle/badge.svg?branch=develop)](https://coveralls.io/github/PaddlePaddle/Paddle?branch=develop)
[![Release](https://img.shields.io/github/release/PaddlePaddle/Paddle.svg)](https://github.com/PaddlePaddle/Paddle/releases)
[![License](https://img.shields.io/badge/license-Apache%202-blue.svg)](LICENSE)

@ -122,5 +122,9 @@ def parse_args():
type=str,
default="",
help='Directory that contains all the training recordio files.')
parser.add_argument(
'--use_inference_transpiler',
action='store_true',
help='If set, uses inference transpiler to optimize the program.')
args = parser.parse_args()
return args

@ -131,6 +131,11 @@ def train(avg_loss, infer_prog, optimizer, train_reader, test_reader, batch_acc,
exe = fluid.Executor(place)
exe.run(startup_prog)
# Use inference_transpiler to speedup
if args.use_inference_transpiler:
t = fluid.InferenceTranspiler()
t.transpile(infer_prog, place)
if not args.use_reader_op:
feed_var_list = [
var for var in train_prog.global_block().vars.itervalues()

@ -26,13 +26,15 @@ function(fetch_include_recursively root_dir)
endforeach()
endfunction()
# download library
message(STATUS "Download Anakin library from ${ANAKIN_LIBRARY_URL}")
execute_process(COMMAND bash -c "mkdir -p ${ANAKIN_INSTALL_DIR}")
execute_process(COMMAND bash -c "rm -rf ${ANAKIN_INSTALL_DIR}/*")
execute_process(COMMAND bash -c "cd ${ANAKIN_INSTALL_DIR}; wget -q ${ANAKIN_LIBRARY_URL}")
execute_process(COMMAND bash -c "mkdir -p ${ANAKIN_INSTALL_DIR}")
execute_process(COMMAND bash -c "cd ${ANAKIN_INSTALL_DIR}; tar xzf anakin_release_simple.tar.gz")
if (NOT EXISTS "${ANAKIN_INSTALL_DIR}")
# download library
message(STATUS "Download Anakin library from ${ANAKIN_LIBRARY_URL}")
execute_process(COMMAND bash -c "mkdir -p ${ANAKIN_INSTALL_DIR}")
execute_process(COMMAND bash -c "rm -rf ${ANAKIN_INSTALL_DIR}/*")
execute_process(COMMAND bash -c "cd ${ANAKIN_INSTALL_DIR}; wget -q ${ANAKIN_LIBRARY_URL}")
execute_process(COMMAND bash -c "mkdir -p ${ANAKIN_INSTALL_DIR}")
execute_process(COMMAND bash -c "cd ${ANAKIN_INSTALL_DIR}; tar xzf anakin_release_simple.tar.gz")
endif()
if (WITH_ANAKIN)
message(STATUS "Anakin for inference is enabled")

@ -40,12 +40,12 @@ ExternalProject_Add(
# NOTE(wuyi):
# this package is generated by following steps:
# 1. git clone -b v1.8.x https://github.com/grpc/grpc.git
# 2. submodule update --init
# 2. git submodule update --init
# 3. keep only zlib, cares, protobuf, boringssl under "third_party",
# checkout and clean other dirs under third_party
# 4. remove .git, and package the directory.
URL "http://paddlepaddledeps.bj.bcebos.com/grpc-v1.8.x.tar.gz"
URL_MD5 "c9c58ee7d0e8929a63155af6a2ecdbd0"
URL "http://paddlepaddledeps.bj.bcebos.com/grpc-v1.10.x.tar.gz"
URL_MD5 "1f268a2aff6759839dccd256adcc91cf"
PREFIX ${GRPC_SOURCES_DIR}
UPDATE_COMMAND ""
CONFIGURE_COMMAND ""

@ -114,7 +114,12 @@ INCLUDE_DIRECTORIES(${CBLAS_INC_DIR})
SET(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/cblas_dummy.c)
FILE(WRITE ${dummyfile} "const char *dummy_cblas = \"${dummyfile}\";")
ADD_LIBRARY(cblas STATIC ${dummyfile})
TARGET_LINK_LIBRARIES(cblas ${CBLAS_LIBRARIES})
IF("${CBLAS_PROVIDER}" STREQUAL "MKLML")
TARGET_LINK_LIBRARIES(cblas dynload_mklml)
ELSE()
TARGET_LINK_LIBRARIES(cblas ${CBLAS_LIBRARIES})
ENDIF("${CBLAS_PROVIDER}" STREQUAL "MKLML")
IF(NOT ${CBLAS_FOUND})
ADD_DEPENDENCIES(cblas extern_openblas)

@ -96,6 +96,20 @@ if(NOT APPLE AND NOT ANDROID)
set(CMAKE_CXX_LINK_EXECUTABLE "${CMAKE_CXX_LINK_EXECUTABLE} -pthread -ldl -lrt")
endif(NOT APPLE AND NOT ANDROID)
set_property(GLOBAL PROPERTY FLUID_MODULES "")
# find all fluid modules is used for paddle fluid static library
# for building inference libs
function(find_fluid_modules TARGET_NAME)
get_filename_component(__target_path ${TARGET_NAME} ABSOLUTE)
string(REGEX REPLACE "^${PADDLE_SOURCE_DIR}/" "" __target_path ${__target_path})
string(FIND "${__target_path}" "fluid" pos)
if(pos GREATER 1)
get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES)
set(fluid_modules ${fluid_modules} ${TARGET_NAME})
set_property(GLOBAL PROPERTY FLUID_MODULES "${fluid_modules}")
endif()
endfunction(find_fluid_modules)
function(merge_static_libs TARGET_NAME)
set(libs ${ARGN})
list(REMOVE_DUPLICATES libs)
@ -195,6 +209,15 @@ function(cc_library TARGET_NAME)
list(REMOVE_ITEM cc_library_DEPS warpctc)
add_dependencies(${TARGET_NAME} warpctc)
endif()
# Only deps libmklml.so, not link
if("${cc_library_DEPS};" MATCHES "mklml;")
list(REMOVE_ITEM cc_library_DEPS mklml)
if(NOT "${TARGET_NAME}" MATCHES "dynload_mklml")
list(APPEND cc_library_DEPS dynload_mklml)
endif()
add_dependencies(${TARGET_NAME} mklml)
target_link_libraries(${TARGET_NAME} "-L${MKLML_LIB_DIR} -liomp5 -Wl,--as-needed")
endif()
target_link_libraries(${TARGET_NAME} ${cc_library_DEPS})
add_dependencies(${TARGET_NAME} ${cc_library_DEPS})
endif()
@ -241,6 +264,7 @@ function(cc_test TARGET_NAME)
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
if (${cc_test_SERIAL})
set_property(TEST ${TARGET_NAME} PROPERTY SERIAL 1)
set_property(TEST ${TARGET_NAME} PROPERTY ENVIRONMENT FLAGS_init_allocated_mem=true)
endif()
endif()
endfunction(cc_test)
@ -305,6 +329,7 @@ function(nv_test TARGET_NAME)
add_test(${TARGET_NAME} ${TARGET_NAME})
if (nv_test_SERIAL)
set_property(TEST ${TARGET_NAME} PROPERTY SERIAL 1)
set_property(TEST ${TARGET_NAME} PROPERTY ENVIRONMENT FLAGS_init_allocated_mem=true)
endif()
endif()
endfunction(nv_test)
@ -552,7 +577,7 @@ function(py_test TARGET_NAME)
set(multiValueArgs SRCS DEPS ARGS ENVS)
cmake_parse_arguments(py_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
add_test(NAME ${TARGET_NAME}
COMMAND env PYTHONPATH=${PADDLE_BINARY_DIR}/python ${py_test_ENVS}
COMMAND env FLAGS_init_allocated_mem=true PYTHONPATH=${PADDLE_BINARY_DIR}/python ${py_test_ENVS}
${PYTHON_EXECUTABLE} -u ${py_test_SRCS} ${py_test_ARGS}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
endif()

@ -12,19 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
set_property(GLOBAL PROPERTY FLUID_MODULES "")
# find all fluid modules is used for paddle fluid static library
function(find_fluid_modules TARGET_NAME)
get_filename_component(__target_path ${TARGET_NAME} ABSOLUTE)
string(REGEX REPLACE "^${PADDLE_SOURCE_DIR}/" "" __target_path ${__target_path})
string(FIND "${__target_path}" "fluid" pos)
if(pos GREATER 1)
get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES)
set(fluid_modules ${fluid_modules} ${TARGET_NAME})
set_property(GLOBAL PROPERTY FLUID_MODULES "${fluid_modules}")
endif()
endfunction(find_fluid_modules)
# make package for paddle fluid shared and static library
function(copy TARGET)
set(options "")
@ -149,21 +136,33 @@ copy(memory_lib
DSTS ${dst_dir}/${module} ${dst_dir}/${module}/detail
)
set(module "inference")
copy(inference_lib DEPS paddle_fluid_shared paddle_fluid
SRCS ${src_dir}/${module}/*.h ${PADDLE_BINARY_DIR}/paddle/fluid/inference/libpaddle_fluid.*
DSTS ${dst_dir}/${module} ${dst_dir}/${module}
)
set(inference_deps paddle_fluid_shared paddle_fluid)
if(WITH_CONTRIB)
set(contrib_dst_dir "${FLUID_INSTALL_DIR}/contrib/inference")
copy(contrib_inference_lib DEPS paddle_inference_api
message(STATUS "installing contrib")
set(contrib_dst_dir "${FLUID_INSTALL_DIR}/contrib/inference")
if (WITH_ANAKIN)
copy(contrib_anakin_inference_lib DEPS paddle_inference_api inference_anakin_api
SRCS
${PADDLE_BINARY_DIR}/paddle/contrib/inference/libinference_anakin_api* # compiled anakin api
${PADDLE_BINARY_DIR}/third_party/install/anakin/*.tar.gz # anakin release
DSTS ${contrib_dst_dir}/anakin ${contrib_dst_dir}/anakin)
list(APPEND inference_deps contrib_anakin_inference_lib)
endif()
copy(contrib_inference_lib DEPS paddle_inference_api paddle_inference_api_shared
SRCS ${PADDLE_SOURCE_DIR}/paddle/contrib/inference/paddle_inference_api.h
${PADDLE_BINARY_DIR}/paddle/contrib/inference/libpaddle_inference_api.*
DSTS ${contrib_dst_dir} ${contrib_dst_dir}
)
${PADDLE_BINARY_DIR}/paddle/contrib/inference/libpaddle_inference_api*
DSTS ${contrib_dst_dir} ${contrib_dst_dir})
list(APPEND inference_deps contrib_inference_lib)
endif()
set(module "inference")
copy(inference_lib DEPS ${inference_deps}
SRCS ${src_dir}/${module}/*.h ${PADDLE_BINARY_DIR}/paddle/fluid/inference/libpaddle_fluid.*
DSTS ${dst_dir}/${module} ${dst_dir}/${module}
)
set(module "platform")
copy(platform_lib DEPS profiler_py_proto
SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/dynload/*.h ${src_dir}/${module}/details/*.h

@ -0,0 +1,53 @@
=========
关于我们
=========
什么是PaddlePaddle
--------------------
- PaddlePaddle是百度自主研发并开源的深度学习框架它能够让开发者和企业安全、快速地实现自己的AI想法
- 项目团队汇聚了全球顶级的深度学习科学家,致力于为开发者和企业提供最好的深度学习研发体验
- 框架具有易学、易用、安全、高效四大特性,是最适合中国开发者和企业的深度学习工具
PaddlePaddle的技术特色
-------------------------
- 新一代深度学习框架: PaddlePaddle是基于“深度学习编程语言”的新一代深度学习框架在保证性能的同时极大的提升了框架对模型的表达能力能够描述任意潜在可能出现的模型
- 对大规模计算更加友好经过百度内多种大规模计算业务的打磨PaddlePaddle在分布式计算上表现优异基于EDL技术能够节约大量计算资源同时也能支持大规模稀疏模型的训练
- 提供可视化的深度学习通过Visual DL可以帮助开发者方便的观测训练整体趋势、数据样本质量和中间结果、参数分布和变化趋势、以及模型的结构帮助开发者更便捷的完成编程过程
提供基于PaddlePaddle的教育体系
--------------------------------
- 深度学习课程:百度与中国市场顶级的教育、培训机构共同开发了深度学习精品课程以及学习教材,帮助开发者从零掌握深度学习
- 深度学习实训对于目的是科研和学习的用户PaddlePaddle提供了无需安装、线上运行的开发环境并提供算法、算力、数据支持
- 线下培训:提供丰富、高质量的线下教育活动,如青年教师培训、线下实战营、沙龙等多种形式的培训和交流
提供基于PaddlePaddle的AI服务
------------------------------
- EadyDL可以帮助零算法基础的企业快速完成一个深度学习任务只需少量的数据即可得到优质的模型
- AI市场提供标准化的AI 能力、产品的交易机制帮助企业快速找到所需有效开展AI业务
- 深度学习竞赛: PaddlePaddle汇聚顶尖深度学习开发者企业可以发布自己的商业问题通过竞赛方式快速找到最优的解决方案
你对PaddlePaddle有任何的问题都可以通过以下方式联系到我们
-----------------------------------------------------------
- 学习/使用问题:可以在 `PaddlePaddle开源社区 <https://github.com/PaddlePaddle/Paddle/issues>`_,以及 `PaddlePaddle中文社区 <http://ai.baidu.com/forum/topic/list/168>`_ 向我们反馈
- 对PaddlePaddle框架发展的建议可发送邮件至Paddle-better@baidu.com
我们期待与你一起打造世界顶级深度学习框架共同推动AI技术的进步
PaddlePaddle团队

@ -0,0 +1,16 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
=============
fluid.average
=============
.. _api_fluid_average_WeightedAverage:
WeightedAverage
---------------
.. autoclass:: paddle.fluid.average.WeightedAverage
:members:
:noindex:

@ -0,0 +1,23 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
==============
fluid.backward
==============
.. _api_fluid_backward_append_backward:
append_backward
---------------
.. autofunction:: paddle.fluid.backward.append_backward
:noindex:
.. _api_fluid_backward_calc_gradient:
calc_gradient
-------------
.. autofunction:: paddle.fluid.backward.calc_gradient
:noindex:

@ -1,9 +1,11 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
====
clip
====
==========
fluid.clip
==========
.. _api_fluid_clip_ErrorClipByValue:
ErrorClipByValue
----------------
@ -12,6 +14,8 @@ ErrorClipByValue
:members:
:noindex:
.. _api_fluid_clip_GradientClipByValue:
GradientClipByValue
-------------------
@ -19,6 +23,8 @@ GradientClipByValue
:members:
:noindex:
.. _api_fluid_clip_GradientClipByNorm:
GradientClipByNorm
------------------
@ -26,6 +32,8 @@ GradientClipByNorm
:members:
:noindex:
.. _api_fluid_clip_GradientClipByGlobalNorm:
GradientClipByGlobalNorm
------------------------
@ -33,15 +41,3 @@ GradientClipByGlobalNorm
:members:
:noindex:
append_gradient_clip_ops
------------------------
.. autofunction:: paddle.fluid.clip.append_gradient_clip_ops
:noindex:
error_clip_callback
-------------------
.. autofunction:: paddle.fluid.clip.error_clip_callback
:noindex:

@ -1,10 +0,0 @@
==================================
Data Reader Interface and DataSets
==================================
.. toctree::
:maxdepth: 1
data/data_reader.rst
data/image.rst
data/dataset.rst

@ -1,9 +1,11 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
===========
data_feeder
===========
=================
fluid.data_feeder
=================
.. _api_fluid_data_feeder_DataFeeder:
DataFeeder
----------

@ -1,7 +0,0 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
=========
evaluator
=========

@ -1,9 +1,11 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
========
executor
========
==============
fluid.executor
==============
.. _api_fluid_executor_Executor:
Executor
--------
@ -12,24 +14,32 @@ Executor
:members:
:noindex:
.. _api_fluid_executor_global_scope:
global_scope
------------
.. autofunction:: paddle.fluid.executor.global_scope
:noindex:
.. _api_fluid_executor_scope_guard:
scope_guard
-----------
.. autofunction:: paddle.fluid.executor.scope_guard
:noindex:
switch_scope
------------
.. _api_fluid_executor__switch_scope:
_switch_scope
-------------
.. autofunction:: paddle.fluid.executor.switch_scope
.. autofunction:: paddle.fluid.executor._switch_scope
:noindex:
.. _api_fluid_executor_fetch_var:
fetch_var
---------

File diff suppressed because it is too large Load Diff

@ -29,19 +29,27 @@ def parse_arg():
class DocGenerator(object):
def __init__(self, module_name, stream=sys.stdout):
def __init__(self, module_name=None, stream=sys.stdout):
if module_name == "":
module_name = None
self.stream = stream
self.module_name = module_name
if not hasattr(fluid, module_name):
raise ValueError("Cannot find fluid.{0}".format(module_name))
if module_name is None:
self.module_name = "fluid"
else:
self.module = getattr(fluid, module_name)
self.module_name = "fluid." + module_name
if module_name is None:
self.module = fluid
else:
if not hasattr(fluid, module_name):
raise ValueError("Cannot find fluid.{0}".format(module_name))
else:
self.module = getattr(fluid, module_name)
self.stream.write('''.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
''')
self._print_header_(module_name, dot='=', is_title=True)
self._print_header_(self.module_name, dot='=', is_title=True)
def print_submodule(self, submodule_name):
submodule = getattr(self.module, submodule_name)
@ -60,25 +68,29 @@ class DocGenerator(object):
self._print_header_(name, dot='=', is_title=False)
def print_item(self, name):
item = getattr(self.module, name)
item = getattr(self.module, name, None)
if item is None:
return
if isinstance(item, types.TypeType):
self.print_class(name)
elif isinstance(item, types.FunctionType):
self.print_method(name)
else:
raise RuntimeError("Unsupported item {0}".format(name))
pass
def print_class(self, name):
self._print_ref_(name)
self._print_header_(name, dot='-', is_title=False)
self.stream.write('''.. autoclass:: paddle.fluid.{0}.{1}
self.stream.write('''.. autoclass:: paddle.{0}.{1}
:members:
:noindex:
'''.format(self.module_name, name))
def print_method(self, name):
self._print_ref_(name)
self._print_header_(name, dot='-', is_title=False)
self.stream.write('''.. autofunction:: paddle.fluid.{0}.{1}
self.stream.write('''.. autofunction:: paddle.{0}.{1}
:noindex:
'''.format(self.module_name, name))
@ -94,6 +106,10 @@ class DocGenerator(object):
self.stream.write('\n')
self.stream.write('\n')
def _print_ref_(self, name):
self.stream.write(".. _api_{0}_{1}:\n\n".format("_".join(
self.module_name.split(".")), name))
def main():
args = parse_arg()

@ -1,7 +1,9 @@
#!/bin/bash
python gen_doc.py layers --submodules control_flow device io nn ops tensor detection learning_rate_scheduler metric > layers.rst
python gen_doc.py layers --submodules control_flow device io nn ops tensor learning_rate_scheduler detection metric_op tensor > layers.rst
for module in data_feeder clip metrics executor initializer io nets optimizer param_attr profiler regularizer transpiler
for module in data_feeder clip metrics executor initializer io nets optimizer param_attr profiler regularizer transpiler recordio_writer backward average profiler
do
python gen_doc.py ${module} > ${module}.rst
done
python gen_doc.py "" > fluid.rst

@ -1,10 +1,11 @@
======================
Fluid
======================
=============
API Reference
=============
.. toctree::
:maxdepth: 1
fluid.rst
layers.rst
data_feeder.rst
executor.rst
@ -18,3 +19,8 @@ Fluid
regularizer.rst
io.rst
data.rst
transpiler.rst
recordio_writer.rst
backward.rst
average.rst
profiler.rst

@ -1,9 +1,11 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
===========
initializer
===========
=================
fluid.initializer
=================
.. _api_fluid_initializer_Constant:
Constant
--------
@ -12,6 +14,8 @@ Constant
:members:
:noindex:
.. _api_fluid_initializer_Uniform:
Uniform
-------
@ -19,6 +23,8 @@ Uniform
:members:
:noindex:
.. _api_fluid_initializer_Normal:
Normal
------
@ -26,6 +32,8 @@ Normal
:members:
:noindex:
.. _api_fluid_initializer_Xavier:
Xavier
------
@ -33,6 +41,8 @@ Xavier
:members:
:noindex:
.. _api_fluid_initializer_Bilinear:
Bilinear
--------
@ -40,18 +50,33 @@ Bilinear
:members:
:noindex:
.. _api_fluid_initializer_MSRA:
MSRA
----
.. autoclass:: paddle.fluid.initializer.MSRA
:members:
:noindex:
.. _api_fluid_initializer_force_init_on_cpu:
force_init_on_cpu
-----------------
.. autofunction:: paddle.fluid.initializer.force_init_on_cpu
:noindex:
.. _api_fluid_initializer_init_on_cpu:
init_on_cpu
-----------
.. autofunction:: paddle.fluid.initializer.init_on_cpu
:noindex:
.. _api_fluid_initializer_ConstantInitializer:
ConstantInitializer
-------------------
@ -59,6 +84,8 @@ ConstantInitializer
:members:
:noindex:
.. _api_fluid_initializer_UniformInitializer:
UniformInitializer
------------------
@ -66,6 +93,8 @@ UniformInitializer
:members:
:noindex:
.. _api_fluid_initializer_NormalInitializer:
NormalInitializer
-----------------
@ -73,6 +102,8 @@ NormalInitializer
:members:
:noindex:
.. _api_fluid_initializer_XavierInitializer:
XavierInitializer
-----------------
@ -80,6 +111,8 @@ XavierInitializer
:members:
:noindex:
.. _api_fluid_initializer_BilinearInitializer:
BilinearInitializer
-------------------
@ -87,3 +120,12 @@ BilinearInitializer
:members:
:noindex:
.. _api_fluid_initializer_MSRAInitializer:
MSRAInitializer
---------------
.. autoclass:: paddle.fluid.initializer.MSRAInitializer
:members:
:noindex:

@ -1,9 +1,11 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
==
io
==
========
fluid.io
========
.. _api_fluid_io_save_vars:
save_vars
---------
@ -11,84 +13,112 @@ save_vars
.. autofunction:: paddle.fluid.io.save_vars
:noindex:
.. _api_fluid_io_save_params:
save_params
-----------
.. autofunction:: paddle.fluid.io.save_params
:noindex:
.. _api_fluid_io_save_persistables:
save_persistables
-----------------
.. autofunction:: paddle.fluid.io.save_persistables
:noindex:
.. _api_fluid_io_load_vars:
load_vars
---------
.. autofunction:: paddle.fluid.io.load_vars
:noindex:
.. _api_fluid_io_load_params:
load_params
-----------
.. autofunction:: paddle.fluid.io.load_params
:noindex:
.. _api_fluid_io_load_persistables:
load_persistables
-----------------
.. autofunction:: paddle.fluid.io.load_persistables
:noindex:
.. _api_fluid_io_save_inference_model:
save_inference_model
--------------------
.. autofunction:: paddle.fluid.io.save_inference_model
:noindex:
.. _api_fluid_io_load_inference_model:
load_inference_model
--------------------
.. autofunction:: paddle.fluid.io.load_inference_model
:noindex:
.. _api_fluid_io_get_inference_program:
get_inference_program
---------------------
.. autofunction:: paddle.fluid.io.get_inference_program
:noindex:
.. _api_fluid_io_save_checkpoint:
save_checkpoint
---------------
.. autofunction:: paddle.fluid.io.save_checkpoint
:noindex:
.. _api_fluid_io_load_checkpoint:
load_checkpoint
---------------
.. autofunction:: paddle.fluid.io.load_checkpoint
:noindex:
.. _api_fluid_io_clean_checkpoint:
clean_checkpoint
----------------
.. autofunction:: paddle.fluid.io.clean_checkpoint
:noindex:
.. _api_fluid_io_load_persist_vars_without_grad:
load_persist_vars_without_grad
------------------------------
.. autofunction:: paddle.fluid.io.load_persist_vars_without_grad
:noindex:
.. _api_fluid_io_save_persist_vars_without_grad:
save_persist_vars_without_grad
------------------------------
.. autofunction:: paddle.fluid.io.save_persist_vars_without_grad
:noindex:
.. _api_fluid_io_get_latest_checkpoint_serial:
get_latest_checkpoint_serial
----------------------------

File diff suppressed because it is too large Load Diff

@ -1,9 +1,11 @@
.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
=======
metrics
=======
=============
fluid.metrics
=============
.. _api_fluid_metrics_MetricBase:
MetricBase
----------
@ -12,6 +14,8 @@ MetricBase
:members:
:noindex:
.. _api_fluid_metrics_CompositeMetric:
CompositeMetric
---------------
@ -19,6 +23,26 @@ CompositeMetric
:members:
:noindex:
.. _api_fluid_metrics_Precision:
Precision
---------
.. autoclass:: paddle.fluid.metrics.Precision
:members:
:noindex:
.. _api_fluid_metrics_Recall:
Recall
------
.. autoclass:: paddle.fluid.metrics.Recall
:members:
:noindex:
.. _api_fluid_metrics_Accuracy:
Accuracy
--------
@ -26,6 +50,8 @@ Accuracy
:members:
:noindex:
.. _api_fluid_metrics_ChunkEvaluator:
ChunkEvaluator
--------------
@ -33,6 +59,8 @@ ChunkEvaluator
:members:
:noindex:
.. _api_fluid_metrics_EditDistance:
EditDistance
------------
@ -40,6 +68,8 @@ EditDistance
:members:
:noindex:
.. _api_fluid_metrics_DetectionMAP:
DetectionMAP
------------
@ -47,6 +77,8 @@ DetectionMAP
:members:
:noindex:
.. _api_fluid_metrics_Auc:
Auc
---

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save