upgrade inference tensor apis, test=develop (#31402)
parent
8491ae9a02
commit
bc7632be73
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,138 @@
|
||||
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <glog/logging.h>
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <functional>
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
#include <random>
|
||||
|
||||
#include "paddle/fluid/framework/data_type.h"
|
||||
#include "paddle/fluid/framework/scope.h"
|
||||
#include "paddle/fluid/inference/api/helper.h"
|
||||
#include "paddle/fluid/inference/api/paddle_tensor.h"
|
||||
#include "paddle/fluid/platform/place.h"
|
||||
|
||||
namespace paddle_infer {
|
||||
|
||||
struct TensorWrapper : public Tensor {
|
||||
TensorWrapper(paddle_infer::PlaceType place, paddle::framework::Scope* scope,
|
||||
const std::string& name)
|
||||
: Tensor{static_cast<void*>(scope)} {
|
||||
SetPlace(place, 0 /*device_id*/);
|
||||
SetName(name);
|
||||
input_or_output_ = true;
|
||||
}
|
||||
};
|
||||
|
||||
std::unique_ptr<Tensor> CreateTensor(paddle_infer::PlaceType place,
|
||||
paddle::framework::Scope* scope,
|
||||
const std::string& name) {
|
||||
return std::unique_ptr<Tensor>(new TensorWrapper{place, scope, name});
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
struct RandomGenerator {
|
||||
RandomGenerator(double min = (std::numeric_limits<T>::min)(),
|
||||
double max = (std::numeric_limits<T>::max)())
|
||||
: dist_{static_cast<double>(min), static_cast<double>(max)} {}
|
||||
T operator()() { return static_cast<T>(dist_(random_engine_)); }
|
||||
|
||||
private:
|
||||
std::mt19937_64 random_engine_{std::random_device()()};
|
||||
std::uniform_real_distribution<double> dist_;
|
||||
};
|
||||
|
||||
template <typename T, template <typename> typename G>
|
||||
bool FillRandomDataAndCheck(PlaceType place, size_t length, G<T>&& generator,
|
||||
float threshold = 10e-5) {
|
||||
std::vector<T> data_in(length);
|
||||
std::generate(data_in.begin(), data_in.end(), std::forward<G<T>>(generator));
|
||||
paddle::framework::Scope scope;
|
||||
const std::string name{"name"};
|
||||
scope.Var(name);
|
||||
auto tensor = CreateTensor(place, &scope, name);
|
||||
tensor->CopyFromCpu<T>(data_in.data());
|
||||
if (tensor->type() != paddle::inference::ConvertToPaddleDType(
|
||||
paddle::framework::DataTypeTrait<T>::DataType())) {
|
||||
return false;
|
||||
}
|
||||
std::vector<T> data_out(length);
|
||||
tensor->CopyToCpu<T>(data_out.data());
|
||||
for (size_t i = 0; i < length; ++i) {
|
||||
if (std::abs(data_out[i] - data_out[i]) > threshold) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool SetPlaceAndCheck(PlaceType place, size_t length) {
|
||||
paddle::framework::Scope scope;
|
||||
const std::string name{"name"};
|
||||
const std::vector<std::vector<size_t>> lod{{0, length}};
|
||||
scope.Var(name);
|
||||
auto tensor = CreateTensor(place, &scope, name);
|
||||
tensor->Reshape({static_cast<int>(length)});
|
||||
tensor->mutable_data<T>(place);
|
||||
tensor->SetLoD(lod);
|
||||
|
||||
PlaceType place_out{PlaceType::kUNK};
|
||||
int length_out{-1};
|
||||
tensor->data<T>(&place_out, &length_out);
|
||||
if (length_out != static_cast<int>(length) || place_out != place) {
|
||||
return false;
|
||||
}
|
||||
if (tensor->name() != name || tensor->lod() != lod) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool FillRandomDataAndCheck(PlaceType place) {
|
||||
const size_t length{RandomGenerator<size_t>{1, 1000}()};
|
||||
VLOG(3) << "FillRandomDataAndCheck: length = " << length;
|
||||
return FillRandomDataAndCheck<float>(place, length,
|
||||
RandomGenerator<float>{}) &&
|
||||
FillRandomDataAndCheck<int64_t>(place, length,
|
||||
RandomGenerator<int64_t>{}) &&
|
||||
FillRandomDataAndCheck<int32_t>(place, length,
|
||||
RandomGenerator<int32_t>{}) &&
|
||||
FillRandomDataAndCheck<uint8_t>(place, length,
|
||||
RandomGenerator<uint8_t>{});
|
||||
}
|
||||
|
||||
bool SetPlaceAndCheck(PlaceType place) {
|
||||
const size_t length{RandomGenerator<size_t>{1, 1000}()};
|
||||
VLOG(3) << "SetPlaceAndCheck: length = " << length;
|
||||
return SetPlaceAndCheck<float>(place, length) &&
|
||||
SetPlaceAndCheck<int64_t>(place, length) &&
|
||||
SetPlaceAndCheck<int32_t>(place, length) &&
|
||||
SetPlaceAndCheck<uint8_t>(place, length);
|
||||
}
|
||||
|
||||
TEST(Tensor, FillRandomDataAndCheck) {
|
||||
ASSERT_TRUE(FillRandomDataAndCheck(PlaceType::kCPU));
|
||||
ASSERT_TRUE(SetPlaceAndCheck(PlaceType::kCPU));
|
||||
#ifdef PADDLE_WITH_CUDA
|
||||
ASSERT_TRUE(FillRandomDataAndCheck(PlaceType::kGPU));
|
||||
ASSERT_TRUE(SetPlaceAndCheck(PlaceType::kGPU));
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace paddle_infer
|
@ -0,0 +1,111 @@
|
||||
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "paddle_infer_declare.h" // NOLINT
|
||||
|
||||
namespace paddle_infer {
|
||||
|
||||
/// \brief Paddle data type.
|
||||
enum DataType {
|
||||
FLOAT32,
|
||||
INT64,
|
||||
INT32,
|
||||
UINT8,
|
||||
INT8,
|
||||
// TODO(Superjomn) support more data types if needed.
|
||||
};
|
||||
|
||||
enum class PlaceType { kUNK = -1, kCPU, kGPU, kXPU };
|
||||
|
||||
/// \brief Represents an n-dimensional array of values.
|
||||
/// The Tensor is used to store the input or output of the network.
|
||||
/// Zero copy means that the tensor supports direct copy of host or device data
|
||||
/// to device,
|
||||
/// eliminating additional CPU copy. Tensor is only used in the
|
||||
/// AnalysisPredictor.
|
||||
/// It is obtained through PaddlePredictor::GetinputTensor()
|
||||
/// and PaddlePredictor::GetOutputTensor() interface.
|
||||
class PD_INFER_DECL Tensor {
|
||||
public:
|
||||
/// \brief Reset the shape of the tensor.
|
||||
/// Generally it's only used for the input tensor.
|
||||
/// Reshape must be called before calling mutable_data() or copy_from_cpu()
|
||||
/// \param shape The shape to set.
|
||||
void Reshape(const std::vector<int>& shape);
|
||||
|
||||
/// \brief Get the memory pointer in CPU or GPU with specific data type.
|
||||
/// Please Reshape the tensor first before call this.
|
||||
/// It's usually used to get input data pointer.
|
||||
/// \param place The place of the tensor.
|
||||
template <typename T>
|
||||
T* mutable_data(PlaceType place);
|
||||
|
||||
/// \brief Get the memory pointer directly.
|
||||
/// It's usually used to get the output data pointer.
|
||||
/// \param[out] place To get the device type of the tensor.
|
||||
/// \param[out] size To get the data size of the tensor.
|
||||
/// \return The tensor data buffer pointer.
|
||||
template <typename T>
|
||||
T* data(PlaceType* place, int* size) const;
|
||||
|
||||
/// \brief Copy the host memory to tensor data.
|
||||
/// It's usually used to set the input tensor data.
|
||||
/// \param data The pointer of the data, from which the tensor will copy.
|
||||
template <typename T>
|
||||
void CopyFromCpu(const T* data);
|
||||
|
||||
/// \brief Copy the tensor data to the host memory.
|
||||
/// It's usually used to get the output tensor data.
|
||||
/// \param[out] data The tensor will copy the data to the address.
|
||||
template <typename T>
|
||||
void CopyToCpu(T* data);
|
||||
|
||||
/// \brief Return the shape of the Tensor.
|
||||
std::vector<int> shape() const;
|
||||
|
||||
/// \brief Set lod info of the tensor.
|
||||
/// More about LOD can be seen here:
|
||||
/// https://www.paddlepaddle.org.cn/documentation/docs/zh/beginners_guide/basic_concept/lod_tensor.html#lodtensor
|
||||
/// \param x the lod info.
|
||||
void SetLoD(const std::vector<std::vector<size_t>>& x);
|
||||
/// \brief Return the lod info of the tensor.
|
||||
std::vector<std::vector<size_t>> lod() const;
|
||||
/// \brief Return the name of the tensor.
|
||||
const std::string& name() const;
|
||||
|
||||
/// \brief Return the data type of the tensor.
|
||||
/// It's usually used to get the output tensor data type.
|
||||
/// \return The data type of the tensor.
|
||||
DataType type() const;
|
||||
|
||||
protected:
|
||||
explicit Tensor(void* scope);
|
||||
void* FindTensor() const;
|
||||
void SetPlace(PlaceType place, int device = -1);
|
||||
void SetName(const std::string& name);
|
||||
|
||||
std::string name_;
|
||||
// The corresponding tensor pointer inside Paddle workspace is cached for
|
||||
// performance.
|
||||
mutable void* tensor_{nullptr};
|
||||
DataType dtype_;
|
||||
bool input_or_output_;
|
||||
void* scope_{nullptr};
|
||||
PlaceType place_;
|
||||
int device_;
|
||||
};
|
||||
|
||||
} // namespace paddle_infer
|
Loading…
Reference in new issue