You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
281 lines
8.0 KiB
281 lines
8.0 KiB
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
#pragma once
|
|
|
|
#include <glog/logging.h>
|
|
#include <sys/time.h>
|
|
#include <algorithm>
|
|
#include <numeric>
|
|
#include <sstream>
|
|
#include <string>
|
|
#include <vector>
|
|
#include "paddle/fluid/framework/lod_tensor.h"
|
|
#include "paddle/fluid/inference/api/paddle_inference_api.h"
|
|
#include "paddle/fluid/inference/api/timer.h"
|
|
#include "paddle/fluid/string/printf.h"
|
|
|
|
namespace paddle {
|
|
namespace inference {
|
|
|
|
static void split(const std::string &str, char sep,
|
|
std::vector<std::string> *pieces) {
|
|
pieces->clear();
|
|
if (str.empty()) {
|
|
return;
|
|
}
|
|
size_t pos = 0;
|
|
size_t next = str.find(sep, pos);
|
|
while (next != std::string::npos) {
|
|
pieces->push_back(str.substr(pos, next - pos));
|
|
pos = next + 1;
|
|
next = str.find(sep, pos);
|
|
}
|
|
if (!str.substr(pos).empty()) {
|
|
pieces->push_back(str.substr(pos));
|
|
}
|
|
}
|
|
static void split_to_float(const std::string &str, char sep,
|
|
std::vector<float> *fs) {
|
|
std::vector<std::string> pieces;
|
|
split(str, sep, &pieces);
|
|
std::transform(pieces.begin(), pieces.end(), std::back_inserter(*fs),
|
|
[](const std::string &v) { return std::stof(v); });
|
|
}
|
|
static void split_to_int64(const std::string &str, char sep,
|
|
std::vector<int64_t> *is) {
|
|
std::vector<std::string> pieces;
|
|
split(str, sep, &pieces);
|
|
std::transform(pieces.begin(), pieces.end(), std::back_inserter(*is),
|
|
[](const std::string &v) { return std::stoi(v); });
|
|
}
|
|
template <typename T>
|
|
std::string to_string(const std::vector<T> &vec) {
|
|
std::stringstream ss;
|
|
for (const auto &c : vec) {
|
|
ss << c << " ";
|
|
}
|
|
return ss.str();
|
|
}
|
|
template <>
|
|
std::string to_string<std::vector<float>>(
|
|
const std::vector<std::vector<float>> &vec);
|
|
|
|
template <>
|
|
std::string to_string<std::vector<std::vector<float>>>(
|
|
const std::vector<std::vector<std::vector<float>>> &vec);
|
|
|
|
template <typename T>
|
|
int VecReduceToInt(const std::vector<T> &v) {
|
|
return std::accumulate(v.begin(), v.end(), 1, [](T a, T b) { return a * b; });
|
|
}
|
|
|
|
template <typename T>
|
|
static void TensorAssignData(PaddleTensor *tensor,
|
|
const std::vector<std::vector<T>> &data) {
|
|
// Assign buffer
|
|
int num_elems = VecReduceToInt(tensor->shape);
|
|
tensor->data.Resize(sizeof(T) * num_elems);
|
|
int c = 0;
|
|
for (const auto &f : data) {
|
|
for (T v : f) {
|
|
static_cast<T *>(tensor->data.data())[c++] = v;
|
|
}
|
|
}
|
|
}
|
|
|
|
template <typename T>
|
|
static int ZeroCopyTensorAssignData(ZeroCopyTensor *tensor,
|
|
const std::vector<std::vector<T>> &data) {
|
|
int size{0};
|
|
auto *ptr = tensor->mutable_data<T>(PaddlePlace::kCPU);
|
|
int c = 0;
|
|
for (const auto &f : data) {
|
|
for (T v : f) {
|
|
ptr[c++] = v;
|
|
}
|
|
}
|
|
return size;
|
|
}
|
|
|
|
static std::string DescribeTensor(const PaddleTensor &tensor) {
|
|
std::stringstream os;
|
|
os << "Tensor [" << tensor.name << "]\n";
|
|
os << " - type: ";
|
|
switch (tensor.dtype) {
|
|
case PaddleDType::FLOAT32:
|
|
os << "float32";
|
|
break;
|
|
case PaddleDType::INT64:
|
|
os << "int64";
|
|
break;
|
|
default:
|
|
os << "unset";
|
|
}
|
|
os << '\n';
|
|
|
|
os << " - shape: " << to_string(tensor.shape) << '\n';
|
|
os << " - lod: ";
|
|
for (auto &l : tensor.lod) {
|
|
os << to_string(l) << "; ";
|
|
}
|
|
os << "\n";
|
|
os << " - data: ";
|
|
|
|
int dim = VecReduceToInt(tensor.shape);
|
|
for (int i = 0; i < dim; i++) {
|
|
os << static_cast<float *>(tensor.data.data())[i] << " ";
|
|
}
|
|
os << '\n';
|
|
return os.str();
|
|
}
|
|
|
|
static void PrintTime(int batch_size, int repeat, int num_threads, int tid,
|
|
double latency, int epoch = 1) {
|
|
LOG(INFO) << "====== batch_size: " << batch_size << ", repeat: " << repeat
|
|
<< ", threads: " << num_threads << ", thread id: " << tid
|
|
<< ", latency: " << latency << "ms ======";
|
|
if (epoch > 1) {
|
|
int samples = batch_size * epoch;
|
|
LOG(INFO) << "====== sample number: " << samples
|
|
<< ", average latency of each sample: " << latency / samples
|
|
<< "ms ======";
|
|
}
|
|
}
|
|
|
|
template <typename T>
|
|
std::string LoDTensorSummary(const framework::LoDTensor &tensor) {
|
|
std::stringstream ss;
|
|
ss << "\n---- tensor ---" << '\n';
|
|
ss << "lod: [";
|
|
for (const auto &level : tensor.lod()) {
|
|
ss << "[ ";
|
|
for (auto i : level) {
|
|
ss << i << ", ";
|
|
}
|
|
ss << "]";
|
|
}
|
|
ss << "]\n";
|
|
|
|
ss << "shape: [";
|
|
int size = 1;
|
|
for (int i = 0; i < tensor.dims().size(); i++) {
|
|
int dim = tensor.dims()[i];
|
|
ss << dim << ", ";
|
|
size *= dim;
|
|
}
|
|
ss << "]\n";
|
|
|
|
ss << "data: ";
|
|
for (int i = 0; i < std::min(20, size); i++) {
|
|
ss << tensor.data<T>()[i] << " ";
|
|
}
|
|
ss << "\n";
|
|
|
|
return ss.str();
|
|
}
|
|
|
|
static bool CompareLoD(const framework::LoD &a, const framework::LoD &b) {
|
|
if (a.size() != b.size()) {
|
|
LOG(ERROR) << string::Sprintf("lod size not match %d != %d", a.size(),
|
|
b.size());
|
|
return false;
|
|
}
|
|
for (size_t i = 0; i < a.size(); i++) {
|
|
auto &al = a[i];
|
|
auto &bl = b[i];
|
|
if (al.size() != bl.size()) {
|
|
LOG(ERROR) << string::Sprintf("level size %d != %d", al.size(),
|
|
bl.size());
|
|
return false;
|
|
}
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static bool CompareShape(const std::vector<int64_t> &a,
|
|
const std::vector<int64_t> &b) {
|
|
if (a.size() != b.size()) {
|
|
LOG(ERROR) << string::Sprintf("shape size not match %d != %d", a.size(),
|
|
b.size());
|
|
return false;
|
|
}
|
|
for (size_t i = 0; i < a.size(); i++) {
|
|
if (a[i] != b[i]) {
|
|
LOG(ERROR) << string::Sprintf("shape %d-th element not match %d != %d", i,
|
|
a[i], b[i]);
|
|
return false;
|
|
}
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static bool CompareTensorData(const framework::LoDTensor &a,
|
|
const framework::LoDTensor &b) {
|
|
auto a_shape = framework::vectorize(a.dims());
|
|
auto b_shape = framework::vectorize(b.dims());
|
|
size_t a_size = std::accumulate(a_shape.begin(), a_shape.end(), 1,
|
|
[](int a, int b) { return a * b; });
|
|
size_t b_size = std::accumulate(b_shape.begin(), b_shape.end(), 1,
|
|
[](int a, int b) { return a * b; });
|
|
if (a_size != b_size) {
|
|
LOG(ERROR) << string::Sprintf("tensor data size not match, %d != %d",
|
|
a_size, b_size);
|
|
}
|
|
|
|
for (size_t i = 0; i < a_size; i++) {
|
|
if (a.type() == typeid(float)) {
|
|
const auto *a_data = a.data<float>();
|
|
const auto *b_data = b.data<float>();
|
|
if (std::abs(a_data[i] - b_data[i]) > 1e-3) {
|
|
LOG(ERROR) << string::Sprintf(
|
|
"tensor data %d-th element not match, %f != %f", i, a_data[i],
|
|
b_data[i]);
|
|
return false;
|
|
}
|
|
} else if (a.type() == typeid(int64_t)) {
|
|
const auto *a_data = a.data<int64_t>();
|
|
const auto *b_data = b.data<int64_t>();
|
|
if (std::abs(a_data[i] - b_data[i]) > 1e-3) {
|
|
LOG(ERROR) << string::Sprintf(
|
|
"tensor data %d-th element not match, %f != %f", i, a_data[i],
|
|
b_data[i]);
|
|
return false;
|
|
}
|
|
}
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool CompareTensor(const framework::LoDTensor &a,
|
|
const framework::LoDTensor &b) {
|
|
if (!CompareLoD(a.lod(), b.lod())) {
|
|
return false;
|
|
}
|
|
if (!CompareShape(framework::vectorize(a.dims()),
|
|
framework::vectorize(b.dims()))) {
|
|
return false;
|
|
}
|
|
|
|
if (!CompareTensorData(a, b)) {
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
} // namespace inference
|
|
} // namespace paddle
|