commit
5ff9c08680
File diff suppressed because it is too large
Load Diff
@ -1,71 +0,0 @@
|
||||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_INCLUDE_API_LITE_CONTEXT_H
|
||||
#define MINDSPORE_INCLUDE_API_LITE_CONTEXT_H
|
||||
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <map>
|
||||
#include <any>
|
||||
#include "include/api/types.h"
|
||||
#include "include/lite_types.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
class Allocator;
|
||||
} // namespace lite
|
||||
|
||||
struct MS_API Context {
|
||||
public:
|
||||
static void Clear(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void SetAsDefault(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void SetVendorName(const std::shared_ptr<Context> &context, const std::string &name);
|
||||
static std::string GetVendorName(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void SetThreadNum(const std::shared_ptr<Context> &context, int num);
|
||||
static int GetThreadNum(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void SetAllocator(const std::shared_ptr<Context> &context, std::shared_ptr<lite::Allocator> alloc);
|
||||
static std::shared_ptr<lite::Allocator> GetAllocator(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void ConfigCPU(const std::shared_ptr<Context> &context, bool config);
|
||||
static bool IfCPUEnabled(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void ConfigCPUFp16(const std::shared_ptr<Context> &context, bool config);
|
||||
static bool IfCPUFp16Enabled(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void SetCPUBindMode(const std::shared_ptr<Context> &context, lite::CpuBindMode mode);
|
||||
static lite::CpuBindMode GetCPUBindMode(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void ConfigGPU(const std::shared_ptr<Context> &context, bool config);
|
||||
static bool IfGPUEnabled(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void ConfigGPUFp16(const std::shared_ptr<Context> &context, bool config);
|
||||
static bool IfGPUFp16Enabled(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void ConfigNPU(const std::shared_ptr<Context> &context, bool config);
|
||||
static bool IfNPUEnabled(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void SetNPUFrequency(const std::shared_ptr<Context> &context, int freq);
|
||||
static int GetNPUFrequency(const std::shared_ptr<Context> &context);
|
||||
|
||||
private:
|
||||
std::map<std::string, std::any> context_;
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_INCLUDE_API_LITE_CONTEXT_H
|
@ -1,134 +0,0 @@
|
||||
/**
|
||||
* Copyright 2019 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_INFERENCE_LOG_H_
|
||||
#define MINDSPORE_INFERENCE_LOG_H_
|
||||
|
||||
#include <stdarg.h>
|
||||
#include <stdint.h>
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
#include <memory>
|
||||
#include <iostream>
|
||||
#include <chrono>
|
||||
#include <vector>
|
||||
|
||||
#ifndef ENABLE_ACL
|
||||
#include "mindspore/core/utils/log_adapter.h"
|
||||
#else // ENABLE_ACL
|
||||
#include "acl/acl.h"
|
||||
#endif
|
||||
|
||||
namespace mindspore::inference {
|
||||
|
||||
class LogStream {
|
||||
public:
|
||||
LogStream() { sstream_ = std::make_shared<std::stringstream>(); }
|
||||
~LogStream() = default;
|
||||
|
||||
template <typename T>
|
||||
LogStream &operator<<(const T &val) noexcept {
|
||||
(*sstream_) << val;
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
LogStream &operator<<(const std::vector<T> &val) noexcept {
|
||||
(*sstream_) << "[";
|
||||
for (size_t i = 0; i < val.size(); i++) {
|
||||
(*this) << val[i];
|
||||
if (i + 1 < val.size()) {
|
||||
(*sstream_) << ", ";
|
||||
}
|
||||
}
|
||||
(*sstream_) << "]";
|
||||
return *this;
|
||||
}
|
||||
|
||||
LogStream &operator<<(std::ostream &func(std::ostream &os)) noexcept {
|
||||
(*sstream_) << func;
|
||||
return *this;
|
||||
}
|
||||
|
||||
friend class LogWriter;
|
||||
friend class Status;
|
||||
|
||||
private:
|
||||
std::shared_ptr<std::stringstream> sstream_;
|
||||
};
|
||||
|
||||
#ifndef ENABLE_ACL
|
||||
#define MSI_LOG(level) MS_LOG(level)
|
||||
|
||||
#define MSI_LOG_DEBUG MSI_LOG(DEBUG)
|
||||
#define MSI_LOG_INFO MSI_LOG(INFO)
|
||||
#define MSI_LOG_WARNING MSI_LOG(WARNING)
|
||||
#define MSI_LOG_ERROR MSI_LOG(ERROR)
|
||||
|
||||
#define MSI_ASSERT(item) MS_ASSERT(item)
|
||||
|
||||
#else // ENABLE_ACL
|
||||
|
||||
class LogWriter {
|
||||
public:
|
||||
LogWriter(const char *file, int line, const char *func, aclLogLevel log_level)
|
||||
: file_(file), line_(line), func_(func), log_level_(log_level) {}
|
||||
~LogWriter() = default;
|
||||
|
||||
void operator<(const LogStream &stream) const noexcept __attribute__((visibility("default"))) {
|
||||
std::ostringstream msg;
|
||||
msg << stream.sstream_->rdbuf();
|
||||
OutputLog(msg);
|
||||
}
|
||||
|
||||
private:
|
||||
void OutputLog(const std::ostringstream &msg) const { aclAppLog(log_level_, func_, file_, line_, msg.str().c_str()); }
|
||||
|
||||
const char *file_;
|
||||
int line_;
|
||||
const char *func_;
|
||||
aclLogLevel log_level_;
|
||||
};
|
||||
|
||||
#define MSILOG_IF(level) inference::LogWriter(__FILE__, __LINE__, __FUNCTION__, ACL_##level) < inference::LogStream()
|
||||
|
||||
#define MSI_LOG(level) MSI_LOG_##level
|
||||
|
||||
#define MSI_LOG_DEBUG MSILOG_IF(DEBUG)
|
||||
#define MSI_LOG_INFO MSILOG_IF(INFO)
|
||||
#define MSI_LOG_WARNING MSILOG_IF(WARNING)
|
||||
#define MSI_LOG_ERROR MSILOG_IF(ERROR)
|
||||
|
||||
#define MSI_ASSERT(item)
|
||||
|
||||
#endif // ENABLE_ACL
|
||||
|
||||
#define MSI_TIME_STAMP_START(name) auto time_start_##name = std::chrono::steady_clock::now();
|
||||
#define MSI_TIME_STAMP_END(name) \
|
||||
{ \
|
||||
auto time_end_##name = std::chrono::steady_clock::now(); \
|
||||
auto time_cost = std::chrono::duration<double, std::milli>(time_end_##name - time_start_##name).count(); \
|
||||
MSI_LOG_INFO << #name " Time Cost # " << time_cost << " ms ---------------------"; \
|
||||
}
|
||||
|
||||
#define INFER_STATUS(code) inference::Status(code) < inference::LogStream()
|
||||
#define ERROR_INFER_STATUS(status, type, msg) \
|
||||
MSI_LOG_ERROR << msg; \
|
||||
status = inference::Status(type, msg)
|
||||
|
||||
} // namespace mindspore::inference
|
||||
|
||||
#endif // MINDSPORE_INFERENCE_LOG_H_
|
@ -1,217 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_INCLUDE_INFER_TENSOR_H_
|
||||
#define MINDSPORE_INCLUDE_INFER_TENSOR_H_
|
||||
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include <numeric>
|
||||
#include <map>
|
||||
#include <functional>
|
||||
|
||||
#include "securec/include/securec.h"
|
||||
#include "include/infer_log.h"
|
||||
|
||||
namespace mindspore {
|
||||
#define MS_API __attribute__((visibility("default")))
|
||||
namespace inference {
|
||||
enum DataType {
|
||||
kMSI_Unknown = 0,
|
||||
kMSI_Bool = 1,
|
||||
kMSI_Int8 = 2,
|
||||
kMSI_Int16 = 3,
|
||||
kMSI_Int32 = 4,
|
||||
kMSI_Int64 = 5,
|
||||
kMSI_Uint8 = 6,
|
||||
kMSI_Uint16 = 7,
|
||||
kMSI_Uint32 = 8,
|
||||
kMSI_Uint64 = 9,
|
||||
kMSI_Float16 = 10,
|
||||
kMSI_Float32 = 11,
|
||||
kMSI_Float64 = 12,
|
||||
};
|
||||
|
||||
class InferTensorBase {
|
||||
public:
|
||||
InferTensorBase() = default;
|
||||
virtual ~InferTensorBase() = default;
|
||||
|
||||
virtual DataType data_type() const = 0;
|
||||
virtual void set_data_type(DataType type) = 0;
|
||||
virtual std::vector<int64_t> shape() const = 0;
|
||||
virtual void set_shape(const std::vector<int64_t> &shape) = 0;
|
||||
virtual const void *data() const = 0;
|
||||
virtual size_t data_size() const = 0;
|
||||
virtual bool resize_data(size_t data_len) = 0;
|
||||
virtual void *mutable_data() = 0;
|
||||
|
||||
bool set_data(const void *data, size_t data_len) {
|
||||
resize_data(data_len);
|
||||
if (mutable_data() == nullptr) {
|
||||
MSI_LOG_ERROR << "set data failed, data len " << data_len;
|
||||
return false;
|
||||
}
|
||||
if (data_size() != data_len) {
|
||||
MSI_LOG_ERROR << "set data failed, tensor current data size " << data_size() << " not match data len "
|
||||
<< data_len;
|
||||
return false;
|
||||
}
|
||||
if (data_len == 0) {
|
||||
return true;
|
||||
}
|
||||
auto ret = memcpy_s(mutable_data(), data_size(), data, data_len);
|
||||
if (ret != 0) {
|
||||
MSI_LOG_ERROR << "Set data memcpy_s failed";
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
int64_t ElementNum() const {
|
||||
std::vector<int64_t> shapex = shape();
|
||||
return std::accumulate(shapex.begin(), shapex.end(), 1LL, std::multiplies<int64_t>());
|
||||
}
|
||||
|
||||
int GetTypeSize(DataType type) const {
|
||||
const std::map<DataType, size_t> type_size_map{
|
||||
{kMSI_Bool, sizeof(bool)}, {kMSI_Float64, sizeof(double)}, {kMSI_Int8, sizeof(int8_t)},
|
||||
{kMSI_Uint8, sizeof(uint8_t)}, {kMSI_Int16, sizeof(int16_t)}, {kMSI_Uint16, sizeof(uint16_t)},
|
||||
{kMSI_Int32, sizeof(int32_t)}, {kMSI_Uint32, sizeof(uint32_t)}, {kMSI_Int64, sizeof(int64_t)},
|
||||
{kMSI_Uint64, sizeof(uint64_t)}, {kMSI_Float16, sizeof(uint16_t)}, {kMSI_Float32, sizeof(float)},
|
||||
};
|
||||
auto it = type_size_map.find(type);
|
||||
if (it != type_size_map.end()) {
|
||||
return it->second;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
class InferTensor : public InferTensorBase {
|
||||
public:
|
||||
DataType type_;
|
||||
std::vector<int64_t> shape_;
|
||||
std::vector<uint8_t> data_;
|
||||
|
||||
public:
|
||||
InferTensor() = default;
|
||||
~InferTensor() = default;
|
||||
InferTensor(DataType type, std::vector<int64_t> shape, const void *data, size_t data_len) {
|
||||
set_data_type(type);
|
||||
set_shape(shape);
|
||||
set_data(data, data_len);
|
||||
}
|
||||
|
||||
void set_data_type(DataType type) override { type_ = type; }
|
||||
DataType data_type() const override { return type_; }
|
||||
|
||||
void set_shape(const std::vector<int64_t> &shape) override { shape_ = shape; }
|
||||
std::vector<int64_t> shape() const override { return shape_; }
|
||||
|
||||
const void *data() const override { return data_.data(); }
|
||||
size_t data_size() const override { return data_.size(); }
|
||||
|
||||
bool resize_data(size_t data_len) override {
|
||||
data_.resize(data_len);
|
||||
return true;
|
||||
}
|
||||
void *mutable_data() override { return data_.data(); }
|
||||
};
|
||||
|
||||
class InferImagesBase {
|
||||
public:
|
||||
InferImagesBase() = default;
|
||||
virtual ~InferImagesBase() = default;
|
||||
virtual size_t batch_size() const = 0;
|
||||
virtual bool get(size_t index, const void *&pic_buffer, uint32_t &pic_size) const = 0;
|
||||
virtual size_t input_index() const = 0; // the index of images as input in model
|
||||
};
|
||||
|
||||
class RequestBase {
|
||||
public:
|
||||
RequestBase() = default;
|
||||
virtual ~RequestBase() = default;
|
||||
virtual size_t size() const = 0;
|
||||
virtual const InferTensorBase *operator[](size_t index) const = 0;
|
||||
};
|
||||
|
||||
class ImagesRequestBase {
|
||||
public:
|
||||
ImagesRequestBase() = default;
|
||||
virtual ~ImagesRequestBase() = default;
|
||||
virtual size_t size() const = 0;
|
||||
virtual const InferImagesBase *operator[](size_t index) const = 0;
|
||||
};
|
||||
|
||||
class ReplyBase {
|
||||
public:
|
||||
ReplyBase() = default;
|
||||
virtual ~ReplyBase() = default;
|
||||
virtual size_t size() const = 0;
|
||||
virtual InferTensorBase *operator[](size_t index) = 0;
|
||||
virtual const InferTensorBase *operator[](size_t index) const = 0;
|
||||
virtual InferTensorBase *add() = 0;
|
||||
virtual void clear() = 0;
|
||||
};
|
||||
|
||||
class VectorInferTensorWrapReply : public ReplyBase {
|
||||
public:
|
||||
explicit VectorInferTensorWrapReply(std::vector<InferTensor> &tensor_list) : tensor_list_(tensor_list) {}
|
||||
~VectorInferTensorWrapReply() = default;
|
||||
|
||||
size_t size() const { return tensor_list_.size(); }
|
||||
InferTensorBase *operator[](size_t index) {
|
||||
if (index >= tensor_list_.size()) {
|
||||
MSI_LOG_ERROR << "visit invalid index " << index << " total size " << tensor_list_.size();
|
||||
return nullptr;
|
||||
}
|
||||
return &(tensor_list_[index]);
|
||||
}
|
||||
const InferTensorBase *operator[](size_t index) const {
|
||||
if (index >= tensor_list_.size()) {
|
||||
MSI_LOG_ERROR << "visit invalid index " << index << " total size " << tensor_list_.size();
|
||||
return nullptr;
|
||||
}
|
||||
return &(tensor_list_[index]);
|
||||
}
|
||||
InferTensorBase *add() {
|
||||
tensor_list_.push_back(InferTensor());
|
||||
return &(tensor_list_.back());
|
||||
}
|
||||
void clear() { tensor_list_.clear(); }
|
||||
std::vector<InferTensor> &tensor_list_;
|
||||
};
|
||||
|
||||
class VectorInferTensorWrapRequest : public RequestBase {
|
||||
public:
|
||||
explicit VectorInferTensorWrapRequest(const std::vector<InferTensor> &tensor_list) : tensor_list_(tensor_list) {}
|
||||
~VectorInferTensorWrapRequest() = default;
|
||||
|
||||
size_t size() const { return tensor_list_.size(); }
|
||||
const InferTensorBase *operator[](size_t index) const {
|
||||
if (index >= tensor_list_.size()) {
|
||||
MSI_LOG_ERROR << "visit invalid index " << index << " total size " << tensor_list_.size();
|
||||
return nullptr;
|
||||
}
|
||||
return &(tensor_list_[index]);
|
||||
}
|
||||
const std::vector<InferTensor> &tensor_list_;
|
||||
};
|
||||
} // namespace inference
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_INCLUDE_INFER_TENSOR_H_
|
@ -1,86 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_INCLUDE_MS_SESSION_H
|
||||
#define MINDSPORE_INCLUDE_MS_SESSION_H
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include "include/infer_tensor.h"
|
||||
#include "include/infer_log.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace inference {
|
||||
enum StatusCode { SUCCESS = 0, FAILED, INVALID_INPUTS };
|
||||
|
||||
class Status {
|
||||
public:
|
||||
Status() : status_code_(FAILED) {}
|
||||
Status(enum StatusCode status_code, const std::string &status_msg = "")
|
||||
: status_code_(status_code), status_msg_(status_msg) {}
|
||||
~Status() = default;
|
||||
|
||||
bool IsSuccess() const { return status_code_ == SUCCESS; }
|
||||
enum StatusCode StatusCode() const { return status_code_; }
|
||||
std::string StatusMessage() const { return status_msg_; }
|
||||
bool operator==(const Status &other) const { return status_code_ == other.status_code_; }
|
||||
bool operator==(enum StatusCode other_code) const { return status_code_ == other_code; }
|
||||
bool operator!=(const Status &other) const { return status_code_ != other.status_code_; }
|
||||
bool operator!=(enum StatusCode other_code) const { return status_code_ != other_code; }
|
||||
operator bool() const = delete;
|
||||
Status &operator<(const LogStream &stream) noexcept __attribute__((visibility("default"))) {
|
||||
status_msg_ = stream.sstream_->str();
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
enum StatusCode status_code_;
|
||||
std::string status_msg_;
|
||||
};
|
||||
|
||||
class MS_API InferSession {
|
||||
public:
|
||||
InferSession() = default;
|
||||
virtual ~InferSession() = default;
|
||||
virtual Status InitEnv(const std::string &device_type, uint32_t device_id) = 0;
|
||||
virtual Status FinalizeEnv() = 0;
|
||||
virtual Status LoadModelFromFile(const std::string &file_name, uint32_t &model_id) = 0;
|
||||
virtual Status UnloadModel(uint32_t model_id) = 0;
|
||||
// override this method to avoid request/reply data copy
|
||||
virtual Status ExecuteModel(uint32_t model_id, const RequestBase &request, ReplyBase &reply) = 0;
|
||||
|
||||
virtual Status ExecuteModel(uint32_t model_id, const std::vector<InferTensor> &inputs,
|
||||
std::vector<InferTensor> &outputs) {
|
||||
VectorInferTensorWrapRequest request(inputs);
|
||||
VectorInferTensorWrapReply reply(outputs);
|
||||
return ExecuteModel(model_id, request, reply);
|
||||
}
|
||||
// default not support input data preprocess(decode, resize, crop, crop&paste, etc.)
|
||||
virtual Status ExecuteModel(uint32_t /*model_id*/,
|
||||
const ImagesRequestBase & /*images_inputs*/, // images for preprocess
|
||||
const RequestBase & /*request*/, ReplyBase & /*reply*/) {
|
||||
return FAILED;
|
||||
}
|
||||
virtual Status GetModelInputsInfo(uint32_t graph_id, std::vector<inference::InferTensor> *tensor_list) const {
|
||||
Status status(SUCCESS);
|
||||
return status;
|
||||
}
|
||||
static std::shared_ptr<InferSession> CreateSession(const std::string &device, uint32_t device_id);
|
||||
};
|
||||
} // namespace inference
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_INCLUDE_MS_SESSION_H
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue