add note for use mkldnn

fix-develop-build.sh
tensor-tang 7 years ago
parent 01f0f16884
commit dd0b2036c6

@ -77,7 +77,7 @@ bool AnalysisPredictor::Init(
OptimizeInferenceProgram();
ctx_ = executor_->Prepare(*inference_program_, 0);
if (config_.use_mkldnn) {
if (config_._use_mkldnn) {
executor_->EnableMKLDNN(*inference_program_);
}

@ -106,7 +106,7 @@ bool NativePaddlePredictor::Init(
}
ctx_ = executor_->Prepare(*inference_program_, 0);
if (config_.use_mkldnn) {
if (config_._use_mkldnn) {
executor_->EnableMKLDNN(*inference_program_);
}
executor_->CreateVariables(*inference_program_,

@ -121,8 +121,8 @@ struct NativeConfig : public PaddlePredictor::Config {
bool use_gpu{false};
int device{0};
float fraction_of_gpu_memory{-1.f}; // Negative to notify initialization.
// MKLDNN related fields.
bool use_mkldnn{false};
// NOTE: NOT use it, just for the internal test, will discard later
bool _use_mkldnn{false};
// Specify the variable's name of each input.
bool specify_input_name{false};

@ -72,7 +72,7 @@ void TestVisualPrediction(bool use_mkldnn) {
cfg.param_file = FLAGS_infer_model + "/__params__";
cfg.prog_file = FLAGS_infer_model + "/__model__";
cfg.use_gpu = false;
cfg.use_mkldnn = use_mkldnn;
cfg._use_mkldnn = use_mkldnn;
cfg.device = 0;
cfg.enable_ir_optim = true;
cfg.ir_passes.push_back("fc_gru_fuse_pass");

Loading…
Cancel
Save