|
|
|
@ -64,10 +64,12 @@ void PaddlePassBuilder::DeletePass(size_t idx) {
|
|
|
|
|
passes_.erase(std::begin(passes_) + idx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void GpuPassStrategy::EnableMKLDNN() {
|
|
|
|
|
LOG(ERROR) << "GPU not support MKLDNN yet";
|
|
|
|
|
void PaddlePassBuilder::AppendAnalysisPass(const std::string &pass) {
|
|
|
|
|
analysis_passes_.push_back(pass);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void PaddlePassBuilder::ClearPasses() { passes_.clear(); }
|
|
|
|
|
|
|
|
|
|
// The following passes works for Anakin sub-graph engine.
|
|
|
|
|
const std::vector<std::string> kAnakinSubgraphPasses({
|
|
|
|
|
"infer_clean_graph_pass", //
|
|
|
|
@ -102,12 +104,12 @@ GpuPassStrategy::GpuPassStrategy() : PassStrategy({}) {
|
|
|
|
|
use_gpu_ = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void GpuPassStrategy::EnableMkldnnQuantizer() {
|
|
|
|
|
LOG(ERROR) << "GPU not support MKL-DNN quantization";
|
|
|
|
|
void GpuPassStrategy::EnableMKLDNN() {
|
|
|
|
|
LOG(ERROR) << "GPU not support MKLDNN yet";
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void PaddlePassBuilder::AppendAnalysisPass(const std::string &pass) {
|
|
|
|
|
analysis_passes_.push_back(pass);
|
|
|
|
|
void GpuPassStrategy::EnableMkldnnQuantizer() {
|
|
|
|
|
LOG(ERROR) << "GPU not support MKL-DNN quantization";
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) {
|
|
|
|
@ -135,5 +137,39 @@ CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) {
|
|
|
|
|
});
|
|
|
|
|
use_gpu_ = false;
|
|
|
|
|
}
|
|
|
|
|
void PaddlePassBuilder::ClearPasses() { passes_.clear(); }
|
|
|
|
|
|
|
|
|
|
void CpuPassStrategy::EnableMKLDNN() {
|
|
|
|
|
// TODO(Superjomn) Consider the way to mix CPU with GPU.
|
|
|
|
|
#ifdef PADDLE_WITH_MKLDNN
|
|
|
|
|
if (!use_mkldnn_) {
|
|
|
|
|
passes_.insert(passes_.begin(), "mkldnn_placement_pass");
|
|
|
|
|
|
|
|
|
|
for (auto &pass : std::vector<std::string>(
|
|
|
|
|
{"depthwise_conv_mkldnn_pass", //
|
|
|
|
|
"conv_bn_fuse_pass", // Execute BN passes again to
|
|
|
|
|
"conv_eltwiseadd_bn_fuse_pass", // preserve correct pass order
|
|
|
|
|
"conv_bias_mkldnn_fuse_pass", //
|
|
|
|
|
"conv3d_bias_mkldnn_fuse_pass", //
|
|
|
|
|
"conv_elementwise_add_mkldnn_fuse_pass",
|
|
|
|
|
"conv_relu_mkldnn_fuse_pass"})) {
|
|
|
|
|
passes_.push_back(pass);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
use_mkldnn_ = true;
|
|
|
|
|
#else
|
|
|
|
|
use_mkldnn_ = false;
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void CpuPassStrategy::EnableMkldnnQuantizer() {
|
|
|
|
|
#ifdef PADDLE_WITH_MKLDNN
|
|
|
|
|
if (!use_mkldnn_quantizer_) {
|
|
|
|
|
passes_.push_back("cpu_quantize_placement_pass");
|
|
|
|
|
}
|
|
|
|
|
use_mkldnn_quantizer_ = true;
|
|
|
|
|
#else
|
|
|
|
|
use_mkldnn_quantizer_ = false;
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
} // namespace paddle
|
|
|
|
|