AnalysisConfig remove contrib namespace (#15540)

fix/ir_debug
Yan Chunwei 6 years ago committed by GitHub
parent 7bc8481c62
commit 655179089f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -132,7 +132,7 @@ struct Argument {
DECL_ARGUMENT_FIELD(tensorrt_workspace_size, TensorRtWorkspaceSize, int);
DECL_ARGUMENT_FIELD(tensorrt_min_subgraph_size, TensorRtMinSubgraphSize, int);
DECL_ARGUMENT_FIELD(tensorrt_precision_mode, TensorRtPrecisionMode,
contrib::AnalysisConfig::Precision);
AnalysisConfig::Precision);
// Memory optimized related.
DECL_ARGUMENT_FIELD(enable_memory_optim, EnableMemoryOptim, bool);

@ -32,7 +32,7 @@ limitations under the License. */
#ifdef _WIN32
#include <direct.h>
#include <io.h>
#define GCC_ATTRIBUTE(attr__) ;
#define GCC_ATTRIBUTE(attr__)
#define MKDIR(path) _mkdir(path)
#else
#include <unistd.h>

@ -71,7 +71,7 @@ void IRPassManager::CreatePasses(Argument *argument,
new framework::ProgramDesc *(&argument->main_program()));
bool enable_int8 = argument->tensorrt_precision_mode() ==
contrib::AnalysisConfig::Precision::kInt8;
AnalysisConfig::Precision::kInt8;
pass->Set("enable_int8", new bool(enable_int8));
std::string model_opt_cache_dir =

@ -22,7 +22,7 @@
namespace paddle {
PassStrategy *contrib::AnalysisConfig::pass_builder() const {
PassStrategy *AnalysisConfig::pass_builder() const {
if (!pass_builder_.get()) {
if (use_gpu_) {
LOG(INFO) << "Create GPU IR passes";
@ -42,27 +42,27 @@ PassStrategy *contrib::AnalysisConfig::pass_builder() const {
return pass_builder_.get();
}
contrib::AnalysisConfig::AnalysisConfig(const std::string &model_dir) {
AnalysisConfig::AnalysisConfig(const std::string &model_dir) {
model_dir_ = model_dir;
Update();
}
contrib::AnalysisConfig::AnalysisConfig(const std::string &prog_file,
const std::string &params_file) {
AnalysisConfig::AnalysisConfig(const std::string &prog_file,
const std::string &params_file) {
prog_file_ = prog_file;
params_file_ = params_file;
Update();
}
void contrib::AnalysisConfig::SetModel(const std::string &prog_file_path,
const std::string &params_file_path) {
void AnalysisConfig::SetModel(const std::string &prog_file_path,
const std::string &params_file_path) {
prog_file_ = prog_file_path;
params_file_ = params_file_path;
Update();
}
void contrib::AnalysisConfig::EnableUseGpu(uint64_t memory_pool_init_size_mb,
int device_id) {
void AnalysisConfig::EnableUseGpu(uint64_t memory_pool_init_size_mb,
int device_id) {
#ifdef PADDLE_WITH_CUDA
use_gpu_ = true;
memory_pool_init_size_mb_ = memory_pool_init_size_mb;
@ -74,13 +74,13 @@ void contrib::AnalysisConfig::EnableUseGpu(uint64_t memory_pool_init_size_mb,
Update();
}
void contrib::AnalysisConfig::DisableGpu() {
void AnalysisConfig::DisableGpu() {
use_gpu_ = false;
Update();
}
contrib::AnalysisConfig::AnalysisConfig(const contrib::AnalysisConfig &other) {
AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) {
#define CP_MEMBER(member__) member__ = other.member__;
// Model related.
@ -130,7 +130,7 @@ contrib::AnalysisConfig::AnalysisConfig(const contrib::AnalysisConfig &other) {
Update();
}
void contrib::AnalysisConfig::EnableMKLDNN() {
void AnalysisConfig::EnableMKLDNN() {
#ifdef PADDLE_WITH_MKLDNN
pass_builder()->EnableMKLDNN();
use_mkldnn_ = true;
@ -142,9 +142,9 @@ void contrib::AnalysisConfig::EnableMKLDNN() {
Update();
}
void contrib::AnalysisConfig::EnableTensorRtEngine(
void AnalysisConfig::EnableTensorRtEngine(
int workspace_size, int max_batch_size, int min_subgraph_size,
contrib::AnalysisConfig::Precision precision_mode) {
AnalysisConfig::Precision precision_mode) {
#ifdef PADDLE_WITH_CUDA
if (!use_gpu()) {
LOG(ERROR) << "To use TensorRT engine, please call EnableGpu() first";
@ -165,7 +165,7 @@ void contrib::AnalysisConfig::EnableTensorRtEngine(
}
// TODO(Superjomn) refactor this, buggy.
void contrib::AnalysisConfig::Update() {
void AnalysisConfig::Update() {
auto info = SerializeInfoCache();
if (info == serialized_info_cache_) return;
@ -225,7 +225,7 @@ void contrib::AnalysisConfig::Update() {
}
}
std::string contrib::AnalysisConfig::SerializeInfoCache() {
std::string AnalysisConfig::SerializeInfoCache() {
std::stringstream ss;
ss << model_dir_;
ss << prog_file_;
@ -260,14 +260,14 @@ std::string contrib::AnalysisConfig::SerializeInfoCache() {
return ss.str();
}
void contrib::AnalysisConfig::SetCpuMathLibraryNumThreads(
void AnalysisConfig::SetCpuMathLibraryNumThreads(
int cpu_math_library_num_threads) {
cpu_math_library_num_threads_ = cpu_math_library_num_threads;
Update();
}
float contrib::AnalysisConfig::fraction_of_gpu_memory_for_pool() const {
float AnalysisConfig::fraction_of_gpu_memory_for_pool() const {
#ifdef PADDLE_WITH_CUDA
// Get the GPU memory details and calculate the fraction of memory for the
// GPU memory pool.
@ -282,8 +282,8 @@ float contrib::AnalysisConfig::fraction_of_gpu_memory_for_pool() const {
#endif
}
void contrib::AnalysisConfig::EnableMemoryOptim(
bool static_optim, bool force_update_static_cache) {
void AnalysisConfig::EnableMemoryOptim(bool static_optim,
bool force_update_static_cache) {
enable_memory_optim_ = true;
static_memory_optim_ = static_optim;
static_memory_optim_force_update_ = force_update_static_cache;
@ -291,14 +291,14 @@ void contrib::AnalysisConfig::EnableMemoryOptim(
Update();
}
bool contrib::AnalysisConfig::enable_memory_optim() const {
bool AnalysisConfig::enable_memory_optim() const {
return enable_memory_optim_;
}
void contrib::AnalysisConfig::SetModelBuffer(const char *prog_buffer,
size_t prog_buffer_size,
const char *param_buffer,
size_t param_buffer_size) {
void AnalysisConfig::SetModelBuffer(const char *prog_buffer,
size_t prog_buffer_size,
const char *param_buffer,
size_t param_buffer_size) {
prog_file_ = std::string(prog_buffer, prog_buffer + prog_buffer_size);
params_file_ = std::string(param_buffer, param_buffer + param_buffer_size);
model_from_memory_ = true;
@ -306,7 +306,7 @@ void contrib::AnalysisConfig::SetModelBuffer(const char *prog_buffer,
Update();
}
NativeConfig contrib::AnalysisConfig::ToNativeConfig() const {
NativeConfig AnalysisConfig::ToNativeConfig() const {
NativeConfig config;
config.model_dir = model_dir_;
config.prog_file = prog_file_;

@ -47,7 +47,6 @@ DECLARE_bool(profile);
namespace paddle {
using contrib::AnalysisConfig;
using inference::Singleton;
#if PADDLE_WITH_TENSORRT
using inference::tensorrt::TRTInt8Calibrator;
@ -731,10 +730,10 @@ std::string AnalysisPredictor::GetSeriazlizedProgram() const {
}
template <>
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<contrib::AnalysisConfig>(
const contrib::AnalysisConfig &config) {
return CreatePaddlePredictor<contrib::AnalysisConfig,
PaddleEngineKind::kAnalysis>(config);
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<AnalysisConfig>(
const AnalysisConfig &config) {
return CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
config);
}
} // namespace paddle

@ -33,7 +33,6 @@ using inference::analysis::Argument;
using inference::analysis::Analyzer;
using framework::proto::ProgramDesc;
using framework::NaiveExecutor;
using contrib::AnalysisConfig;
/** \brief This predictor is based on the original native predictor with IR and
* Analysis support.
@ -123,7 +122,7 @@ class AnalysisPredictor : public PaddlePredictor {
#endif
private:
contrib::AnalysisConfig config_;
AnalysisConfig config_;
Argument argument_;
std::unique_ptr<NaiveExecutor> executor_;
platform::Place place_;

@ -24,7 +24,6 @@
DEFINE_string(dirname, "", "dirname to tests.");
namespace paddle {
using contrib::AnalysisConfig;
TEST(AnalysisPredictor, analysis_off) {
AnalysisConfig config;

@ -295,7 +295,7 @@ TEST(inference_api_native, image_classification_gpu) {
#endif
TEST(PassBuilder, Delete) {
contrib::AnalysisConfig config;
AnalysisConfig config;
config.DisableGpu();
config.pass_builder()->DeletePass("attention_lstm_fuse_pass");
const auto& passes = config.pass_builder()->AllPasses();

@ -36,7 +36,7 @@ namespace demo {
*/
void Main() {
std::unique_ptr<PaddlePredictor> predictor;
paddle::contrib::AnalysisConfig config;
paddle::AnalysisConfig config;
config.EnableUseGpu(100, 0);
config.SetModel(FLAGS_modeldir + "/__model__",
FLAGS_modeldir + "/__params__");

@ -34,7 +34,6 @@ DEFINE_bool(use_gpu, false, "Whether use gpu.");
namespace paddle {
namespace demo {
using contrib::AnalysisConfig;
/*
* Use the native and analysis fluid engine to inference the demo.
*/

@ -29,11 +29,6 @@
namespace paddle {
class AnalysisPredictor;
// ==
//
// -----------------------------------------------------------------------------------
// NOTE: The following APIs are not mature yet, we are still working on them.
namespace contrib {
// NOTE WIP, not stable yet.
struct AnalysisConfig {
@ -260,5 +255,4 @@ struct AnalysisConfig {
mutable std::unique_ptr<PassStrategy> pass_builder_;
};
} // namespace contrib
} // namespace paddle

@ -221,7 +221,7 @@ class PaddlePredictor {
virtual std::string GetSeriazlizedProgram() const {
assert(false); // Force raise error.
return "NotImplemented";
};
}
/** The common configs for all the predictors.
*/

@ -13,16 +13,16 @@
// limitations under the License.
#pragma once
#include <NvInfer.h>
#include <cuda_runtime_api.h>
#include <atomic>
#include <memory>
#include <mutex>
#include <mutex> // NOLINT
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include <NvInfer.h>
#include <cuda_runtime_api.h>
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/inference/tensorrt/engine.h"
#include "paddle/fluid/platform/place.h"

@ -19,7 +19,6 @@ DEFINE_int32(max_turn_num, 9,
namespace paddle {
namespace inference {
using contrib::AnalysisConfig;
constexpr int32_t kMaxTurnLen = 50;
@ -165,7 +164,7 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data,
input_slots->push_back(std::move(response_mask_tensor));
}
void SetConfig(contrib::AnalysisConfig *cfg) {
void SetConfig(AnalysisConfig *cfg) {
cfg->SetModel(FLAGS_infer_model + "/__model__", FLAGS_infer_model + "/param");
cfg->SwitchSpecifyInputNames();
cfg->SwitchIrOptim(true);
@ -187,7 +186,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
// Easy for profiling independently.
void profile(bool use_mkldnn = false) {
contrib::AnalysisConfig cfg;
AnalysisConfig cfg;
SetConfig(&cfg);
if (use_mkldnn) {
@ -223,7 +222,7 @@ TEST(Analyzer_dam, profile_mkldnn) { profile(true /* use_mkldnn */); }
// Check the fuse status
TEST(Analyzer_dam, fuse_statis) {
contrib::AnalysisConfig cfg;
AnalysisConfig cfg;
SetConfig(&cfg);
int num_ops;
@ -256,7 +255,7 @@ void compare(bool use_mkldnn = false) {
TEST(Analyzer_dam, compare_with_static_memory_optim) {
// The small dam will core in CI, but works in local.
if (FLAGS_max_turn_num == 9) {
contrib::AnalysisConfig cfg, cfg1;
AnalysisConfig cfg, cfg1;
DataRecord data(FLAGS_infer_data, FLAGS_batch_size);
std::vector<std::vector<PaddleTensor>> input_slots_all;
@ -282,7 +281,7 @@ TEST(Analyzer_dam, compare_with_static_memory_optim) {
TEST(Analyzer_dam, compare_with_dynamic_memory_optim) {
// The small dam will core in CI, but works in local.
if (FLAGS_max_turn_num == 9) {
contrib::AnalysisConfig cfg, cfg1;
AnalysisConfig cfg, cfg1;
DataRecord data(FLAGS_infer_data, FLAGS_batch_size);
std::vector<std::vector<PaddleTensor>> input_slots_all;

@ -18,8 +18,6 @@ namespace paddle {
namespace inference {
namespace analysis {
using contrib::AnalysisConfig;
struct DataRecord {
std::vector<int64_t> data;
std::vector<size_t> lod;

@ -16,7 +16,6 @@
namespace paddle {
namespace inference {
using contrib::AnalysisConfig;
struct DataRecord {
std::vector<std::vector<int64_t>> query, title;
@ -75,7 +74,7 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data,
}
}
void SetConfig(contrib::AnalysisConfig *cfg) {
void SetConfig(AnalysisConfig *cfg) {
cfg->SetModel(FLAGS_infer_model);
cfg->DisableGpu();
cfg->SwitchSpecifyInputNames();
@ -95,7 +94,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
// Easy for profiling independently.
void profile(bool use_mkldnn = false) {
contrib::AnalysisConfig cfg;
AnalysisConfig cfg;
SetConfig(&cfg);
std::vector<PaddleTensor> outputs;
@ -130,7 +129,7 @@ TEST(Analyzer_MM_DNN, profile_mkldnn) { profile(true /* use_mkldnn */); }
// Check the fuse status
TEST(Analyzer_MM_DNN, fuse_statis) {
contrib::AnalysisConfig cfg;
AnalysisConfig cfg;
SetConfig(&cfg);
int num_ops;
@ -141,7 +140,7 @@ TEST(Analyzer_MM_DNN, fuse_statis) {
// Compare result of NativeConfig and AnalysisConfig
void compare(bool use_mkldnn = false) {
contrib::AnalysisConfig cfg;
AnalysisConfig cfg;
SetConfig(&cfg);
if (use_mkldnn) {

@ -16,7 +16,6 @@
namespace paddle {
namespace inference {
using contrib::AnalysisConfig;
struct DataRecord {
std::vector<std::vector<int64_t>> word, mention;
@ -76,7 +75,7 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data) {
}
}
void SetConfig(contrib::AnalysisConfig *cfg, bool memory_load = false) {
void SetConfig(AnalysisConfig *cfg, bool memory_load = false) {
if (memory_load) {
std::string buffer_prog, buffer_param;
ReadBinaryFile(FLAGS_infer_model + "/__model__", &buffer_prog);
@ -105,7 +104,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
// Easy for profiling independently.
void profile(bool memory_load = false) {
contrib::AnalysisConfig cfg;
AnalysisConfig cfg;
SetConfig(&cfg, memory_load);
std::vector<PaddleTensor> outputs;
@ -136,7 +135,7 @@ TEST(Analyzer_Chinese_ner, profile_memory_load) {
// Check the fuse status
TEST(Analyzer_Chinese_ner, fuse_statis) {
contrib::AnalysisConfig cfg;
AnalysisConfig cfg;
SetConfig(&cfg);
int num_ops;
@ -152,7 +151,7 @@ TEST(Analyzer_Chinese_ner, fuse_statis) {
// Compare result of NativeConfig and AnalysisConfig
TEST(Analyzer_Chinese_ner, compare) {
contrib::AnalysisConfig cfg;
AnalysisConfig cfg;
SetConfig(&cfg);
std::vector<std::vector<PaddleTensor>> input_slots_all;

@ -16,7 +16,6 @@
namespace paddle {
namespace inference {
using contrib::AnalysisConfig;
struct DataRecord {
std::vector<std::vector<int64_t>> query_basic, query_phrase, title_basic,
@ -103,7 +102,7 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data,
}
}
void SetConfig(contrib::AnalysisConfig *cfg) {
void SetConfig(AnalysisConfig *cfg) {
cfg->SetModel(FLAGS_infer_model);
cfg->DisableGpu();
cfg->SwitchSpecifyInputNames();
@ -123,7 +122,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
// Easy for profiling independently.
TEST(Analyzer_Pyramid_DNN, profile) {
contrib::AnalysisConfig cfg;
AnalysisConfig cfg;
SetConfig(&cfg);
std::vector<PaddleTensor> outputs;
@ -147,7 +146,7 @@ TEST(Analyzer_Pyramid_DNN, profile) {
// Check the fuse status
TEST(Analyzer_Pyramid_DNN, fuse_statis) {
contrib::AnalysisConfig cfg;
AnalysisConfig cfg;
SetConfig(&cfg);
int num_ops;
@ -158,7 +157,7 @@ TEST(Analyzer_Pyramid_DNN, fuse_statis) {
// Compare result of NativeConfig and AnalysisConfig
TEST(Analyzer_Pyramid_DNN, compare) {
contrib::AnalysisConfig cfg;
AnalysisConfig cfg;
SetConfig(&cfg);
std::vector<std::vector<PaddleTensor>> input_slots_all;

@ -223,7 +223,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
// Easy for profiling independently.
TEST(Analyzer_rnn1, profile) {
contrib::AnalysisConfig cfg;
AnalysisConfig cfg;
SetConfig(&cfg);
cfg.DisableGpu();
cfg.SwitchIrDebug();
@ -237,7 +237,7 @@ TEST(Analyzer_rnn1, profile) {
// Check the fuse status
TEST(Analyzer_rnn1, fuse_statis) {
contrib::AnalysisConfig cfg;
AnalysisConfig cfg;
SetConfig(&cfg);
int num_ops;
@ -254,7 +254,7 @@ TEST(Analyzer_rnn1, fuse_statis) {
// Compare result of NativeConfig and AnalysisConfig
TEST(Analyzer_rnn1, compare) {
contrib::AnalysisConfig cfg;
AnalysisConfig cfg;
SetConfig(&cfg);
std::vector<std::vector<PaddleTensor>> input_slots_all;
@ -276,7 +276,7 @@ TEST(Analyzer_rnn1, compare_determine) {
// Test Multi-Thread.
TEST(Analyzer_rnn1, multi_thread) {
contrib::AnalysisConfig cfg;
AnalysisConfig cfg;
SetConfig(&cfg);
std::vector<PaddleTensor> outputs;

@ -20,7 +20,6 @@ limitations under the License. */
namespace paddle {
namespace inference {
namespace analysis {
using contrib::AnalysisConfig;
struct Record {
std::vector<float> data;

@ -58,9 +58,8 @@ std::ostream &operator<<(std::ostream &os, const NativeConfig &config) {
return os;
}
std::ostream &operator<<(std::ostream &os,
const contrib::AnalysisConfig &config) {
os << GenSpaces(num_spaces) << "contrib::AnalysisConfig {\n";
std::ostream &operator<<(std::ostream &os, const AnalysisConfig &config) {
os << GenSpaces(num_spaces) << "AnalysisConfig {\n";
num_spaces++;
os << config.ToNativeConfig();
if (!config.model_from_memory()) {

@ -65,7 +65,7 @@ float Random(float low, float high) {
void PrintConfig(const PaddlePredictor::Config *config, bool use_analysis) {
const auto *analysis_config =
reinterpret_cast<const contrib::AnalysisConfig *>(config);
reinterpret_cast<const AnalysisConfig *>(config);
if (use_analysis) {
LOG(INFO) << *analysis_config;
return;
@ -109,9 +109,9 @@ void CompareResult(const std::vector<PaddleTensor> &outputs,
std::unique_ptr<PaddlePredictor> CreateTestPredictor(
const PaddlePredictor::Config *config, bool use_analysis = true) {
const auto *analysis_config =
reinterpret_cast<const contrib::AnalysisConfig *>(config);
reinterpret_cast<const AnalysisConfig *>(config);
if (use_analysis) {
return CreatePaddlePredictor<contrib::AnalysisConfig>(*analysis_config);
return CreatePaddlePredictor<AnalysisConfig>(*analysis_config);
}
auto native_config = analysis_config->ToNativeConfig();
return CreatePaddlePredictor<NativeConfig>(native_config);

@ -42,9 +42,9 @@ void SetConfig(ConfigType* config, std::string model_dir, bool use_gpu,
}
template <>
void SetConfig<contrib::AnalysisConfig>(contrib::AnalysisConfig* config,
std::string model_dir, bool use_gpu,
bool use_tensorrt, int batch_size) {
void SetConfig<AnalysisConfig>(AnalysisConfig* config, std::string model_dir,
bool use_gpu, bool use_tensorrt,
int batch_size) {
if (!FLAGS_prog_filename.empty() && !FLAGS_param_filename.empty()) {
config->SetModel(model_dir + "/" + FLAGS_prog_filename,
model_dir + "/" + FLAGS_param_filename);
@ -75,11 +75,11 @@ void profile(std::string model_dir, bool use_analysis, bool use_tensorrt) {
std::vector<PaddleTensor> outputs;
if (use_analysis || use_tensorrt) {
contrib::AnalysisConfig config;
AnalysisConfig config;
config.EnableUseGpu(100, 0);
config.pass_builder()->TurnOnDebug();
SetConfig<contrib::AnalysisConfig>(&config, model_dir, true, use_tensorrt,
FLAGS_batch_size);
SetConfig<AnalysisConfig>(&config, model_dir, true, use_tensorrt,
FLAGS_batch_size);
TestPrediction(reinterpret_cast<PaddlePredictor::Config*>(&config),
inputs_all, &outputs, FLAGS_num_threads, true);
} else {
@ -99,18 +99,18 @@ void compare(std::string model_dir, bool use_tensorrt) {
SetFakeImageInput(&inputs_all, model_dir, false, "__model__", "");
}
contrib::AnalysisConfig analysis_config;
SetConfig<contrib::AnalysisConfig>(&analysis_config, model_dir, true,
use_tensorrt, FLAGS_batch_size);
AnalysisConfig analysis_config;
SetConfig<AnalysisConfig>(&analysis_config, model_dir, true, use_tensorrt,
FLAGS_batch_size);
CompareNativeAndAnalysis(
reinterpret_cast<const PaddlePredictor::Config*>(&analysis_config),
inputs_all);
}
void compare_continuous_input(std::string model_dir, bool use_tensorrt) {
contrib::AnalysisConfig analysis_config;
SetConfig<contrib::AnalysisConfig>(&analysis_config, model_dir, true,
use_tensorrt, FLAGS_batch_size);
AnalysisConfig analysis_config;
SetConfig<AnalysisConfig>(&analysis_config, model_dir, true, use_tensorrt,
FLAGS_batch_size);
auto config =
reinterpret_cast<const PaddlePredictor::Config*>(&analysis_config);
auto native_pred = CreateTestPredictor(config, false);

@ -33,7 +33,6 @@ using paddle::PaddlePredictor;
using paddle::NativeConfig;
using paddle::NativePaddlePredictor;
using paddle::AnalysisPredictor;
using paddle::contrib::AnalysisConfig;
static void BindPaddleDType(py::module *m);
static void BindPaddleBuf(py::module *m);

Loading…
Cancel
Save