make inference api work with Doxygen (#15195)

revert-15207-remove_op_handle_lock_and_fix_var
Yan Chunwei 7 years ago committed by GitHub
parent c562be20d9
commit d09d6eadc0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -35,8 +35,11 @@ using framework::proto::ProgramDesc;
using framework::NaiveExecutor; using framework::NaiveExecutor;
using contrib::AnalysisConfig; using contrib::AnalysisConfig;
/* This predictor is based on the original native predictor with IR and Analysis /** \brief This predictor is based on the original native predictor with IR and
* support. It will optimize IR and Parameters in the runtime. * Analysis support.
*
* It will optimize IR and Parameters in the runtime.
*
* TODO(Superjomn) Replace the Navive predictor? * TODO(Superjomn) Replace the Navive predictor?
*/ */
class AnalysisPredictor : public PaddlePredictor { class AnalysisPredictor : public PaddlePredictor {

@ -19,7 +19,6 @@ limitations under the License. */
#include <memory> #include <memory>
#include <string> #include <string>
#include <vector> #include <vector>
#include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/framework/ddim.h"
#include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/lod_tensor_array.h" #include "paddle/fluid/framework/lod_tensor_array.h"

@ -19,6 +19,8 @@
#include <unordered_set> #include <unordered_set>
#include <vector> #include <vector>
/*! \file */
// Here we include some header files with relative paths, for that in deploy, // Here we include some header files with relative paths, for that in deploy,
// the abstract path of this header file will be changed. // the abstract path of this header file will be changed.
#include "paddle_api.h" // NOLINT #include "paddle_api.h" // NOLINT
@ -41,49 +43,125 @@ struct AnalysisConfig {
explicit AnalysisConfig(const std::string& prog_file, explicit AnalysisConfig(const std::string& prog_file,
const std::string& params_file); const std::string& params_file);
// Model path related. /** Set model with a directory.
*/
void SetModel(const std::string& model_dir) { model_dir_ = model_dir; } void SetModel(const std::string& model_dir) { model_dir_ = model_dir; }
/** Set model with two specific pathes for program and parameters.
*/
void SetModel(const std::string& prog_file_path, void SetModel(const std::string& prog_file_path,
const std::string& params_file_path); const std::string& params_file_path);
/** Set program file path.
*/
void SetProgFile(const std::string& x) { prog_file_ = x; } void SetProgFile(const std::string& x) { prog_file_ = x; }
/** Set parameter composed file path.
*/
void SetParamsFile(const std::string& x) { params_file_ = x; } void SetParamsFile(const std::string& x) { params_file_ = x; }
/** Get the model directory path.
*/
const std::string& model_dir() const { return model_dir_; } const std::string& model_dir() const { return model_dir_; }
/** Get the program file path.
*/
const std::string& prog_file() const { return prog_file_; } const std::string& prog_file() const { return prog_file_; }
/** Get the composed parameters file.
*/
const std::string& params_file() const { return params_file_; } const std::string& params_file() const { return params_file_; }
// GPU related. // GPU related.
/**
* \brief Turn on GPU.
* @param memory_pool_init_size_mb initial size of the GPU memory pool in MB.
* @param device_id the GPU card to use (default is 0).
*/
void EnableUseGpu(uint64_t memory_pool_init_size_mb, int device_id = 0); void EnableUseGpu(uint64_t memory_pool_init_size_mb, int device_id = 0);
/** Turn off the GPU.
*/
void DisableGpu(); void DisableGpu();
/** A bool state telling whether the GPU is turned on.
*/
bool use_gpu() const { return use_gpu_; } bool use_gpu() const { return use_gpu_; }
/** Get the GPU device id.
*/
int gpu_device_id() const { return device_id_; } int gpu_device_id() const { return device_id_; }
/** Get the initial size in MB of the GPU memory pool.
*/
int memory_pool_init_size_mb() const { return memory_pool_init_size_mb_; } int memory_pool_init_size_mb() const { return memory_pool_init_size_mb_; }
/** Get the proportion of the initial memory pool size compared to the device.
*/
float fraction_of_gpu_memory_for_pool() const; float fraction_of_gpu_memory_for_pool() const;
// Determine whether to perform graph optimization. /** \brief Control whether to perform IR graph optimization.
*
* If turned off, the AnalysisConfig will act just like a NativeConfig.
*/
void SwitchIrOptim(int x = true) { enable_ir_optim_ = x; } void SwitchIrOptim(int x = true) { enable_ir_optim_ = x; }
/** A boolean state tell whether the ir graph optimization is actived.
*/
bool ir_optim() const { return enable_ir_optim_; } bool ir_optim() const { return enable_ir_optim_; }
/** \brief INTERNAL Determine whether to use the feed and fetch operators.
* Just for internal development, not stable yet.
* When ZeroCopyTensor is used, this should turned off.
*/
void SwitchUseFeedFetchOps(int x = true) { use_feed_fetch_ops_ = x; } void SwitchUseFeedFetchOps(int x = true) { use_feed_fetch_ops_ = x; }
/** A boolean state telling whether to use the feed and fetch operators.
*/
bool use_feed_fetch_ops_enabled() const { return use_feed_fetch_ops_; } bool use_feed_fetch_ops_enabled() const { return use_feed_fetch_ops_; }
/** \brief Control whether to specify the inputs' names.
*
* The PaddleTensor type has a `name` member, assign it with the corresponding
* variable name. This is used only when the input PaddleTensors passed to the
* `PaddlePredictor.Run(...)` cannot follow the order in the training phase.
*/
void SwitchSpecifyInputNames(bool x = true) { specify_input_name_ = x; } void SwitchSpecifyInputNames(bool x = true) { specify_input_name_ = x; }
/** A boolean state tell whether the input PaddleTensor names specified should
* be used to reorder the inputs in `PaddlePredictor.Run(...)`.
*/
bool specify_input_name() const { return specify_input_name_; } bool specify_input_name() const { return specify_input_name_; }
/**
* \brief Turn on the TensorRT engine.
*
* The TensorRT engine will accelerate some subgraphes in the original Fluid
* computation graph. In some models such as TensorRT50, GoogleNet and so on,
* it gains significant performance acceleration.
*
* @param workspace_size the memory size(in byte) used for TensorRT workspace.
* @param max_batch_size the maximum batch size of this prediction task,
* better set as small as possible, or performance loss.
* @param min_subgrpah_size the minimum TensorRT subgraph size needed, if a
* subgraph is less than this, it will not transfer to TensorRT engine.
*/
void EnableTensorRtEngine(int workspace_size = 1 << 20, void EnableTensorRtEngine(int workspace_size = 1 << 20,
int max_batch_size = 1, int min_subgraph_size = 3); int max_batch_size = 1, int min_subgraph_size = 3);
/** A boolean state telling whether the TensorRT engine is used.
*/
bool tensorrt_engine_enabled() const { return use_tensorrt_; } bool tensorrt_engine_enabled() const { return use_tensorrt_; }
/** Control whther to debug IR graph analysis phase.
*/
void SwitchIrDebug(int x = true) { ir_debug_ = x; } void SwitchIrDebug(int x = true) { ir_debug_ = x; }
/** Turn on MKLDNN.
*/
void EnableMKLDNN(); void EnableMKLDNN();
/** A boolean state telling whether to use the MKLDNN.
*/
bool mkldnn_enabled() const { return use_mkldnn_; } bool mkldnn_enabled() const { return use_mkldnn_; }
// Set and get the number of cpu math library threads. /** Set and get the number of cpu math library threads.
*/
void SetCpuMathLibraryNumThreads(int cpu_math_library_num_threads); void SetCpuMathLibraryNumThreads(int cpu_math_library_num_threads);
/** An int state telling how many threads are used in the CPU math library.
*/
int cpu_math_library_num_threads() const { int cpu_math_library_num_threads() const {
return cpu_math_library_num_threads_; return cpu_math_library_num_threads_;
} }
/** Transform the AnalysisConfig to NativeConfig.
*/
NativeConfig ToNativeConfig() const { NativeConfig ToNativeConfig() const {
NativeConfig config; NativeConfig config;
config.model_dir = model_dir_; config.model_dir = model_dir_;
@ -95,19 +173,30 @@ struct AnalysisConfig {
config.specify_input_name = specify_input_name_; config.specify_input_name = specify_input_name_;
return config; return config;
} }
/** Specify the operator type list to use MKLDNN acceleration.
* @param op_list the operator type list.
*/
void SetMKLDNNOp(std::unordered_set<std::string> op_list) { void SetMKLDNNOp(std::unordered_set<std::string> op_list) {
mkldnn_enabled_op_types_ = op_list; mkldnn_enabled_op_types_ = op_list;
} }
// Specify the memory buffer of program and parameter /** Specify the memory buffer of program and parameter
* @param prog_buffer the memory buffer of program.
* @param prog_buffer_size the size of the data.
* @param params_buffer the memory buffer of the composed parameters file.
* @param params_buffer_size the size of the commposed parameters data.
*/
void SetModelBuffer(const char* prog_buffer, size_t prog_buffer_size, void SetModelBuffer(const char* prog_buffer, size_t prog_buffer_size,
const char* program_buffer, size_t program_buffer_size); const char* params_buffer, size_t params_buffer_size);
/** A boolean state telling whether the model is set from the CPU memory.
*/
bool model_from_memory() const { return model_from_memory_; } bool model_from_memory() const { return model_from_memory_; }
friend class ::paddle::AnalysisPredictor; friend class ::paddle::AnalysisPredictor;
// NOTE just for developer, not an official API, easily to be broken. /** NOTE just for developer, not an official API, easily to be broken.
// Get a pass builder for customize the passes in IR analysis phase. * Get a pass builder for customize the passes in IR analysis phase.
*/
PassStrategy* pass_builder() const; PassStrategy* pass_builder() const;
protected: protected:

File diff suppressed because it is too large Load Diff

@ -18,30 +18,39 @@
#include <string> #include <string>
#include <vector> #include <vector>
/*! \file */
/*! \namespace paddle */
namespace paddle { namespace paddle {
/*
* This is a pass builder based on string. It is part of inference API. /** This is a pass builder based on string. It is part of inference API.
*/ */
class PaddlePassBuilder { class PaddlePassBuilder {
public: public:
explicit PaddlePassBuilder(const std::vector<std::string> &passes) explicit PaddlePassBuilder(const std::vector<std::string> &passes)
: passes_(passes) {} : passes_(passes) {}
/** Append a pass to the end of the passes. */
void AppendPass(const std::string &pass_type); void AppendPass(const std::string &pass_type);
/** Insert a pass to a specific position.
* @param idx the position to insert.
* @param pass_type the pass key.
*/
void InsertPass(size_t idx, const std::string &pass_type); void InsertPass(size_t idx, const std::string &pass_type);
// Delete the `idx`-th pass. /** Delete the `idx`-th pass. */
void DeletePass(size_t idx); void DeletePass(size_t idx);
// Delete all the passes that has type `pass_type`. /** Delete all the passes that has type `pass_type`. */
void DeletePass(const std::string &pass_type); void DeletePass(const std::string &pass_type);
// Visualize the computation graph after each pass by generating a DOT /** Visualize the computation graph after each pass by generating a DOT
// language file, one can draw them with the Graphviz toolkit. * language file, one can draw them with the Graphviz toolkit.
*/
void TurnOnDebug(); void TurnOnDebug();
// Human-readible information. /** Human-readible information. */
std::string DebugString(); std::string DebugString();
const std::vector<std::string> &AllPasses() const { return passes_; } const std::vector<std::string> &AllPasses() const { return passes_; }
@ -50,16 +59,16 @@ class PaddlePassBuilder {
std::vector<std::string> passes_; std::vector<std::string> passes_;
}; };
/* /**Pass strategy to help control the IR passes.
* Pass strategy to help control the IR passes.
*/ */
class PassStrategy : public PaddlePassBuilder { class PassStrategy : public PaddlePassBuilder {
public: public:
explicit PassStrategy(const std::vector<std::string> &passes) explicit PassStrategy(const std::vector<std::string> &passes)
: PaddlePassBuilder(passes) {} : PaddlePassBuilder(passes) {}
// The MKLDNN control exists in both CPU and GPU mode, because there can be /** The MKLDNN control exists in both CPU and GPU mode, because there can be
// still some CPU kernels running in CPU mode. * still some CPU kernels running in CPU mode.
*/
virtual void EnableMKLDNN() = 0; virtual void EnableMKLDNN() = 0;
bool use_gpu() const { return use_gpu_; } bool use_gpu() const { return use_gpu_; }
@ -70,8 +79,7 @@ class PassStrategy : public PaddlePassBuilder {
bool use_gpu_{false}; bool use_gpu_{false};
}; };
/* /** The CPU passes controller, it is used in AnalysisPredictor with CPU mode.
* The CPU passes controller, it is used in AnalysisPredictor with CPU mode.
*/ */
class CpuPassStrategy : public PassStrategy { class CpuPassStrategy : public PassStrategy {
public: public:
@ -117,8 +125,7 @@ class CpuPassStrategy : public PassStrategy {
CpuPassStrategy(const CpuPassStrategy &other) : PassStrategy(other.passes_) {} CpuPassStrategy(const CpuPassStrategy &other) : PassStrategy(other.passes_) {}
}; };
/* /** The GPU passes strategy, it is used in AnalysisPredictor with GPU mode.
* The GPU passes strategy, it is used in
*/ */
class GpuPassStrategy : public PassStrategy { class GpuPassStrategy : public PassStrategy {
public: public:

Loading…
Cancel
Save