|
|
@ -132,7 +132,8 @@ std::unordered_map<std::string, int> GetFuseStatis(PaddlePredictor *predictor,
|
|
|
|
void SetFakeImageInput(std::vector<std::vector<PaddleTensor>> *inputs,
|
|
|
|
void SetFakeImageInput(std::vector<std::vector<PaddleTensor>> *inputs,
|
|
|
|
const std::string &dirname, bool is_combined = true,
|
|
|
|
const std::string &dirname, bool is_combined = true,
|
|
|
|
std::string model_filename = "model",
|
|
|
|
std::string model_filename = "model",
|
|
|
|
std::string params_filename = "params") {
|
|
|
|
std::string params_filename = "params",
|
|
|
|
|
|
|
|
const std::vector<std::string> *feed_names = nullptr) {
|
|
|
|
// Set fake_image_data
|
|
|
|
// Set fake_image_data
|
|
|
|
PADDLE_ENFORCE_EQ(FLAGS_test_all_data, 0, "Only have single batch of data.");
|
|
|
|
PADDLE_ENFORCE_EQ(FLAGS_test_all_data, 0, "Only have single batch of data.");
|
|
|
|
std::vector<std::vector<int64_t>> feed_target_shapes = GetFeedTargetShapes(
|
|
|
|
std::vector<std::vector<int64_t>> feed_target_shapes = GetFeedTargetShapes(
|
|
|
@ -146,26 +147,32 @@ void SetFakeImageInput(std::vector<std::vector<PaddleTensor>> *inputs,
|
|
|
|
os << "}\n";
|
|
|
|
os << "}\n";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
LOG(INFO) << os.str();
|
|
|
|
LOG(INFO) << os.str();
|
|
|
|
|
|
|
|
if (feed_names) {
|
|
|
|
int dim1 = feed_target_shapes[0][1];
|
|
|
|
PADDLE_ENFORCE_EQ(feed_names->size(), feed_target_shapes.size());
|
|
|
|
int dim2 = feed_target_shapes[0][2];
|
|
|
|
}
|
|
|
|
int dim3 = feed_target_shapes[0][3];
|
|
|
|
std::vector<PaddleTensor> input_slots(feed_target_shapes.size());
|
|
|
|
|
|
|
|
for (size_t i = 0; i < feed_target_shapes.size(); ++i) {
|
|
|
|
PaddleTensor input;
|
|
|
|
const auto &feed_shape = feed_target_shapes[i];
|
|
|
|
std::vector<int> shape({FLAGS_batch_size, dim1, dim2, dim3});
|
|
|
|
auto &input = input_slots[i];
|
|
|
|
input.shape = shape;
|
|
|
|
std::vector<int> shape({FLAGS_batch_size});
|
|
|
|
input.dtype = PaddleDType::FLOAT32;
|
|
|
|
for (size_t s = 1; s < feed_shape.size(); ++s) {
|
|
|
|
|
|
|
|
shape.push_back(static_cast<int>(feed_shape[s]));
|
|
|
|
// fill input data, for profile easily, do not use random data here.
|
|
|
|
}
|
|
|
|
size_t size = FLAGS_batch_size * dim1 * dim2 * dim3;
|
|
|
|
if (feed_names) {
|
|
|
|
input.data.Resize(size * sizeof(float));
|
|
|
|
input.name = (*feed_names)[i];
|
|
|
|
float *input_data = static_cast<float *>(input.data.data());
|
|
|
|
}
|
|
|
|
for (size_t i = 0; i < size; i++) {
|
|
|
|
input.shape = shape;
|
|
|
|
*(input_data + i) = static_cast<float>(i) / size;
|
|
|
|
input.dtype = PaddleDType::FLOAT32;
|
|
|
|
|
|
|
|
size_t len = std::accumulate(shape.begin(), shape.end(), 1,
|
|
|
|
|
|
|
|
[](int a, int b) { return a * b; });
|
|
|
|
|
|
|
|
input.data.Resize(len * sizeof(float));
|
|
|
|
|
|
|
|
input.lod.assign({{0, static_cast<size_t>(FLAGS_batch_size)}});
|
|
|
|
|
|
|
|
float *input_data = static_cast<float *>(input.data.data());
|
|
|
|
|
|
|
|
// fill input data, for profile easily, do not use random data here.
|
|
|
|
|
|
|
|
for (size_t j = 0; j < len; ++j) {
|
|
|
|
|
|
|
|
*(input_data + j) = static_cast<float>(j) / len;
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
std::vector<PaddleTensor> input_slots;
|
|
|
|
|
|
|
|
input_slots.assign({input});
|
|
|
|
|
|
|
|
(*inputs).emplace_back(input_slots);
|
|
|
|
(*inputs).emplace_back(input_slots);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|