|
|
|
@ -50,7 +50,7 @@ template <typename Target>
|
|
|
|
|
bool PaddleInferenceAnakinPredictor<Target>::Init(
|
|
|
|
|
const contrib::AnakinConfig &config) {
|
|
|
|
|
if (!(graph_.load(config.model_file))) {
|
|
|
|
|
VLOG(30) << "fail to load graph from " << config.model_file;
|
|
|
|
|
VLOG(3) << "fail to load graph from " << config.model_file;
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
auto inputs = graph_.get_ins();
|
|
|
|
@ -76,15 +76,15 @@ bool PaddleInferenceAnakinPredictor<Target>::Run(
|
|
|
|
|
std::vector<PaddleTensor> *output_data, int batch_size) {
|
|
|
|
|
for (const auto &input : inputs) {
|
|
|
|
|
if (input.dtype != PaddleDType::FLOAT32) {
|
|
|
|
|
VLOG(30) << "Only support float type inputs. " << input.name
|
|
|
|
|
<< "'s type is not float";
|
|
|
|
|
VLOG(3) << "Only support float type inputs. " << input.name
|
|
|
|
|
<< "'s type is not float";
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
auto d_tensor_in_p = executor_p_->get_in(input.name);
|
|
|
|
|
auto net_shape = d_tensor_in_p->shape();
|
|
|
|
|
if (net_shape.size() != input.shape.size()) {
|
|
|
|
|
VLOG(30) << " input " << input.name
|
|
|
|
|
<< "'s shape size should be equal to that of net";
|
|
|
|
|
VLOG(3) << " input " << input.name
|
|
|
|
|
<< "'s shape size should be equal to that of net";
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
int sum = 1;
|
|
|
|
@ -105,15 +105,15 @@ bool PaddleInferenceAnakinPredictor<Target>::Run(
|
|
|
|
|
|
|
|
|
|
if (input.lod.size() > 0) {
|
|
|
|
|
if (input.lod.size() > 1) {
|
|
|
|
|
VLOG(30) << " input lod first dim should <=1, but you set "
|
|
|
|
|
<< input.lod.size();
|
|
|
|
|
VLOG(3) << " input lod first dim should <=1, but you set "
|
|
|
|
|
<< input.lod.size();
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
std::vector<int> offset(input.lod[0].begin(), input.lod[0].end());
|
|
|
|
|
d_tensor_in_p->set_seq_offset(offset);
|
|
|
|
|
VLOG(30) << "offset.size(): " << offset.size();
|
|
|
|
|
VLOG(3) << "offset.size(): " << offset.size();
|
|
|
|
|
for (int i = 0; i < offset.size(); i++) {
|
|
|
|
|
VLOG(30) << offset[i];
|
|
|
|
|
VLOG(3) << offset[i];
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -124,7 +124,7 @@ bool PaddleInferenceAnakinPredictor<Target>::Run(
|
|
|
|
|
if (cudaMemcpy(d_data_p, static_cast<float *>(input.data.data()),
|
|
|
|
|
d_tensor_in_p->valid_size() * sizeof(float),
|
|
|
|
|
cudaMemcpyHostToDevice) != 0) {
|
|
|
|
|
VLOG(30) << "copy data from CPU to GPU error";
|
|
|
|
|
VLOG(3) << "copy data from CPU to GPU error";
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
@ -141,7 +141,7 @@ bool PaddleInferenceAnakinPredictor<Target>::Run(
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
if (output_data->empty()) {
|
|
|
|
|
VLOG(30) << "At least one output should be set with tensors' names.";
|
|
|
|
|
VLOG(3) << "At least one output should be set with tensors' names.";
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
for (auto &output : *output_data) {
|
|
|
|
@ -157,7 +157,7 @@ bool PaddleInferenceAnakinPredictor<Target>::Run(
|
|
|
|
|
if (cudaMemcpy(output.data.data(), tensor->mutable_data(),
|
|
|
|
|
tensor->valid_size() * sizeof(float),
|
|
|
|
|
cudaMemcpyDeviceToHost) != 0) {
|
|
|
|
|
VLOG(30) << "copy data from GPU to CPU error";
|
|
|
|
|
VLOG(3) << "copy data from GPU to CPU error";
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
@ -181,14 +181,14 @@ anakin::Net<Target, anakin::saber::AK_FLOAT, anakin::Precision::FP32>
|
|
|
|
|
template <typename Target>
|
|
|
|
|
std::unique_ptr<PaddlePredictor>
|
|
|
|
|
PaddleInferenceAnakinPredictor<Target>::Clone() {
|
|
|
|
|
VLOG(30) << "Anakin Predictor::clone";
|
|
|
|
|
VLOG(3) << "Anakin Predictor::clone";
|
|
|
|
|
std::unique_ptr<PaddlePredictor> cls(
|
|
|
|
|
new PaddleInferenceAnakinPredictor<Target>());
|
|
|
|
|
// construct executer from other graph
|
|
|
|
|
auto anakin_predictor_p =
|
|
|
|
|
dynamic_cast<PaddleInferenceAnakinPredictor<Target> *>(cls.get());
|
|
|
|
|
if (!anakin_predictor_p) {
|
|
|
|
|
VLOG(30) << "fail to call Init";
|
|
|
|
|
VLOG(3) << "fail to call Init";
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
anakin_predictor_p->get_executer().init(graph_);
|
|
|
|
@ -206,10 +206,10 @@ template <>
|
|
|
|
|
std::unique_ptr<PaddlePredictor>
|
|
|
|
|
CreatePaddlePredictor<contrib::AnakinConfig, PaddleEngineKind::kAnakin>(
|
|
|
|
|
const contrib::AnakinConfig &config) {
|
|
|
|
|
VLOG(30) << "Anakin Predictor create.";
|
|
|
|
|
VLOG(3) << "Anakin Predictor create.";
|
|
|
|
|
if (config.target_type == contrib::AnakinConfig::NVGPU) {
|
|
|
|
|
#ifdef PADDLE_WITH_CUDA
|
|
|
|
|
VLOG(30) << "Anakin Predictor create on [ NVIDIA GPU ].";
|
|
|
|
|
VLOG(3) << "Anakin Predictor create on [ NVIDIA GPU ].";
|
|
|
|
|
std::unique_ptr<PaddlePredictor> x(
|
|
|
|
|
new PaddleInferenceAnakinPredictor<anakin::NV>(config));
|
|
|
|
|
return x;
|
|
|
|
@ -218,12 +218,12 @@ CreatePaddlePredictor<contrib::AnakinConfig, PaddleEngineKind::kAnakin>(
|
|
|
|
|
return nullptr;
|
|
|
|
|
#endif
|
|
|
|
|
} else if (config.target_type == contrib::AnakinConfig::X86) {
|
|
|
|
|
VLOG(30) << "Anakin Predictor create on [ Intel X86 ].";
|
|
|
|
|
VLOG(3) << "Anakin Predictor create on [ Intel X86 ].";
|
|
|
|
|
std::unique_ptr<PaddlePredictor> x(
|
|
|
|
|
new PaddleInferenceAnakinPredictor<anakin::X86>(config));
|
|
|
|
|
return x;
|
|
|
|
|
} else {
|
|
|
|
|
VLOG(30) << "Anakin Predictor create on unknown platform.";
|
|
|
|
|
VLOG(3) << "Anakin Predictor create on unknown platform.";
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|