!3807 serving support acl dvpp process

Merge pull request !3807 from 徐永飞/master
pull/3807/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit 98c415a1d5

@ -15,7 +15,7 @@ include(${CMAKE_SOURCE_DIR}/cmake/external_libs/json.cmake)
include(${CMAKE_SOURCE_DIR}/cmake/dependency_securec.cmake)
include(${CMAKE_SOURCE_DIR}/cmake/external_libs/protobuf.cmake)
if (ENABLE_DEBUGGER OR ENABLE_SERVING)
if (ENABLE_DEBUGGER OR ENABLE_SERVING OR ENABLE_TESTCASES)
# build dependencies of gRPC
include(${CMAKE_SOURCE_DIR}/cmake/external_libs/absl.cmake)
include(${CMAKE_SOURCE_DIR}/cmake/external_libs/c-ares.cmake)
@ -69,7 +69,6 @@ endif()
if (ENABLE_MINDDATA)
include(${CMAKE_SOURCE_DIR}/cmake/external_libs/icu4c.cmake)
include(${CMAKE_SOURCE_DIR}/cmake/external_libs/jpeg_turbo.cmake)
include(${CMAKE_SOURCE_DIR}/cmake/external_libs/libtiff.cmake)
include(${CMAKE_SOURCE_DIR}/cmake/external_libs/opencv.cmake)
include(${CMAKE_SOURCE_DIR}/cmake/external_libs/sqlite.cmake)
@ -78,6 +77,10 @@ if (ENABLE_MINDDATA)
include(${CMAKE_SOURCE_DIR}/cmake/external_libs/sentencepiece.cmake)
endif()
if (ENABLE_MINDDATA OR ENABLE_SERVING)
include(${CMAKE_SOURCE_DIR}/cmake/external_libs/jpeg_turbo.cmake)
endif()
include(${CMAKE_SOURCE_DIR}/cmake/external_libs/gtest.cmake)
include(${CMAKE_SOURCE_DIR}/cmake/external_libs/onnx.cmake)
set(CMAKE_CXX_FLAGS ${_ms_tmp_CMAKE_CXX_FLAGS_F})

@ -26,19 +26,10 @@
#ifndef ENABLE_ACL
#include "mindspore/core/utils/log_adapter.h"
namespace mindspore::inference {
#define MSI_LOG(level) MS_LOG(level)
#define MSI_LOG_DEBUG MSI_LOG(DEBUG)
#define MSI_LOG_INFO MSI_LOG(INFO)
#define MSI_LOG_WARNING MSI_LOG(WARNING)
#define MSI_LOG_ERROR MSI_LOG(ERROR)
#define MSI_ASSERT(item) MS_ASSERT(item)
} // namespace mindspore::inference
#else // ENABLE_ACL
#include "acl/acl.h"
#endif
namespace mindspore::inference {
class LogStream {
@ -58,15 +49,23 @@ class LogStream {
}
friend class LogWriter;
friend class Status;
private:
std::shared_ptr<std::stringstream> sstream_;
};
template <class T, typename std::enable_if<std::is_enum<T>::value, int>::type = 0>
constexpr std::ostream &operator<<(std::ostream &stream, const T &value) {
return stream << static_cast<typename std::underlying_type<T>::type>(value);
}
#ifndef ENABLE_ACL
#define MSI_LOG(level) MS_LOG(level)
#define MSI_LOG_DEBUG MSI_LOG(DEBUG)
#define MSI_LOG_INFO MSI_LOG(INFO)
#define MSI_LOG_WARNING MSI_LOG(WARNING)
#define MSI_LOG_ERROR MSI_LOG(ERROR)
#define MSI_ASSERT(item) MS_ASSERT(item)
#else // ENABLE_ACL
class LogWriter {
public:
@ -100,8 +99,10 @@ class LogWriter {
#define MSI_ASSERT(item)
} // namespace mindspore::inference
#endif // ENABLE_ACL
#define INFER_STATUS(code) inference::Status(code) < inference::LogStream()
} // namespace mindspore::inference
#endif // MINDSPORE_INFERENCE_LOG_H_

@ -129,12 +129,25 @@ class InferTensor : public InferTensorBase {
void *mutable_data() override { return data_.data(); }
};
class InferImagesBase {
public:
virtual size_t batch_size() const = 0;
virtual bool get(size_t index, const void *&pic_buffer, uint32_t &pic_size) const = 0;
virtual size_t input_index() const = 0; // the index of images as input in model
};
class RequestBase {
public:
virtual size_t size() const = 0;
virtual const InferTensorBase *operator[](size_t index) const = 0;
};
class ImagesRequestBase {
public:
virtual size_t size() const = 0;
virtual const InferImagesBase *operator[](size_t index) const = 0;
};
class ReplyBase {
public:
virtual size_t size() const = 0;

@ -21,10 +21,36 @@
#include <vector>
#include <string>
#include "include/infer_tensor.h"
#include "include/infer_log.h"
namespace mindspore {
namespace inference {
enum Status { SUCCESS = 0, FAILED, INVALID_INPUTS };
enum StatusCode { SUCCESS = 0, FAILED, INVALID_INPUTS };
class Status {
public:
Status() : status_code_(FAILED) {}
Status(enum StatusCode status_code, const std::string &status_msg = "")
: status_code_(status_code), status_msg_(status_msg) {}
bool IsSuccess() const { return status_code_ == SUCCESS; }
enum StatusCode StatusCode() const { return status_code_; }
std::string StatusMessage() const { return status_msg_; }
bool operator==(const Status &other) const { return status_code_ == other.status_code_; }
bool operator==(enum StatusCode other_code) const { return status_code_ == other_code; }
bool operator!=(const Status &other) const { return status_code_ != other.status_code_; }
bool operator!=(enum StatusCode other_code) const { return status_code_ != other_code; }
operator bool() const = delete;
Status &operator<(const LogStream &stream) noexcept __attribute__((visibility("default"))) {
status_msg_ = stream.sstream_->str();
return *this;
}
private:
enum StatusCode status_code_;
std::string status_msg_;
};
class MS_API InferSession {
public:
InferSession() = default;
@ -42,7 +68,12 @@ class MS_API InferSession {
VectorInferTensorWrapReply reply(outputs);
return ExecuteModel(model_id, request, reply);
}
// default not support input data preprocess(decode, resize, crop, crop&paste, etc.)
virtual Status ExecuteModel(uint32_t /*model_id*/,
const ImagesRequestBase & /*images_inputs*/, // images for preprocess
const RequestBase & /*request*/, ReplyBase & /*reply*/) {
return FAILED;
}
static std::shared_ptr<InferSession> CreateSession(const std::string &device, uint32_t device_id);
};

@ -87,7 +87,8 @@ GraphId AscendInferenceSession::CompileGraph(NotNull<FuncGraphPtr> func_graph) {
return graph_id;
}
bool AscendInferenceSession::CheckModelInputs(uint32_t graph_id, const std::vector<tensor::TensorPtr> &inputs) const {
bool AscendInferenceSession::CheckModelInputs(uint32_t graph_id, const std::vector<tensor::TensorPtr> &inputs,
std::string *error_msg) const {
MS_LOG(INFO) << "Start check client inputs, graph id : " << graph_id;
auto kernel_graph = GetGraph(graph_id);
MS_EXCEPTION_IF_NULL(kernel_graph);
@ -113,12 +114,25 @@ bool AscendInferenceSession::CheckModelInputs(uint32_t graph_id, const std::vect
MS_LOG(ERROR) << "Input number is inconsistent. The actual input number [" << inputs.size()
<< "] but the graph input number is [" << paras.size() << "]";
MS_LOG(ERROR) << "InputsInfo --" << InputsInfo(paras, inputs);
if (error_msg != nullptr) {
std::stringstream str_stream;
str_stream << "Input number is inconsistent. The given input number [" << inputs.size()
<< "] but the graph input number is [" << paras.size() << "]\n";
str_stream << "InputsInfo --" << InputsInfo(paras, inputs);
*error_msg = str_stream.str();
}
return false;
}
auto input = inputs[no_weight_input++];
if (!CompareInput(input, paras[i])) {
MS_LOG(ERROR) << "Please check the input information.";
MS_LOG(ERROR) << "InputsInfo --" << InputsInfo(paras, inputs);
if (error_msg != nullptr) {
std::stringstream str_stream;
str_stream << "Please check the input information.\n";
str_stream << "InputsInfo --" << InputsInfo(paras, inputs);
*error_msg = str_stream.str();
}
return false;
}
}
@ -165,17 +179,35 @@ std::string AscendInferenceSession::PrintInputShape(std::vector<T> shape) const
std::string AscendInferenceSession::InputsInfo(const std::vector<ParameterPtr> &paras,
const std::vector<tensor::TensorPtr> &inputs) const {
const std::map<TypeId, std::string> dtype_name_map{
{TypeId::kNumberTypeBegin, "Unknown"}, {TypeId::kNumberTypeBool, "Bool"},
{TypeId::kNumberTypeFloat64, "Float64"}, {TypeId::kNumberTypeInt8, "Int8"},
{TypeId::kNumberTypeUInt8, "Uint8"}, {TypeId::kNumberTypeInt16, "Int16"},
{TypeId::kNumberTypeUInt16, "Uint16"}, {TypeId::kNumberTypeInt32, "Int32"},
{TypeId::kNumberTypeUInt32, "Uint32"}, {TypeId::kNumberTypeInt64, "Int64"},
{TypeId::kNumberTypeUInt64, "Uint64"}, {TypeId::kNumberTypeFloat16, "Float16"},
{TypeId::kNumberTypeFloat32, "Float32"},
};
auto data_type_to_string = [&dtype_name_map](TypeId type_id) {
auto it = dtype_name_map.find(type_id);
if (it == dtype_name_map.end()) {
return std::string("Unknown");
}
return it->second;
};
std::string graph = "graph inputs:{ ";
for (size_t i = 0; i < paras.size(); ++i) {
graph += std::to_string(i) + ": dims " + std::to_string(AnfAlgo::GetOutputDeviceShape(paras[i], 0).size()) +
", shape " + PrintInputShape(AnfAlgo::GetOutputDeviceShape(paras[i], 0)) + ", data type " +
std::to_string(AnfAlgo::GetSelectKernelBuildInfo(paras[i])->GetOutputDeviceType(0)) + " }";
auto &para = paras[i];
graph += std::to_string(i) + ": dims " + std::to_string(AnfAlgo::GetOutputDeviceShape(para, 0).size()) +
", shape " + PrintInputShape(AnfAlgo::GetOutputDeviceShape(para, 0)) + ", data type " +
data_type_to_string(AnfAlgo::GetSelectKernelBuildInfo(para)->GetOutputDeviceType(0)) + " }";
}
std::string actual = "actual inputs:{ ";
std::string actual = "given inputs:{ ";
for (size_t i = 0; i < inputs.size(); ++i) {
actual += std::to_string(i) + ": dims " + std::to_string(inputs[i]->shape().size()) + ", shape " +
PrintInputShape(inputs[i]->shape()) + ", data type " + std::to_string(inputs[i]->data_type()) + " }";
PrintInputShape(inputs[i]->shape()) + ", data type " + data_type_to_string(inputs[i]->data_type()) + " }";
}
return graph + " " + actual;
}

@ -39,7 +39,8 @@ class AscendInferenceSession : public AscendSession {
void LoadInputData(const std::shared_ptr<KernelGraph> &kernel_graph,
const std::vector<tensor::TensorPtr> &inputs_const) const;
GraphId CompileGraph(NotNull<FuncGraphPtr> func_graph) override;
bool CheckModelInputs(uint32_t graph_id, const std::vector<tensor::TensorPtr> &inputs) const override;
bool CheckModelInputs(uint32_t graph_id, const std::vector<tensor::TensorPtr> &inputs,
std::string *error_msg) const override;
bool CompareInput(const tensor::TensorPtr &input, const ParameterPtr &parameter) const;
template <typename T>
std::string PrintInputShape(std::vector<T> shape) const;

@ -116,7 +116,7 @@ Status MSInferSession::LoadModelFromFile(const std::string &file_name, uint32_t
Status MSInferSession::UnloadModel(uint32_t model_id) { return SUCCESS; }
tensor::TensorPtr ServingTensor2MSTensor(const InferTensorBase &out_tensor) {
Status ServingTensor2MSTensor(size_t index, const InferTensorBase &out_tensor, tensor::TensorPtr &ms_tensor) {
std::vector<int> shape;
for (auto dim : out_tensor.shape()) {
shape.push_back(static_cast<int>(dim));
@ -134,14 +134,22 @@ tensor::TensorPtr ServingTensor2MSTensor(const InferTensorBase &out_tensor) {
auto it = type2id_map.find(out_tensor.data_type());
if (it == type2id_map.end()) {
MSI_LOG_WARNING << "undefined MSI data type " << out_tensor.data_type();
return nullptr;
return FAILED;
} else {
data_type = it->second;
}
auto ms_tensor = std::make_shared<tensor::Tensor>(data_type, shape);
ms_tensor = std::make_shared<tensor::Tensor>(data_type, shape);
if (ms_tensor->Size() != out_tensor.data_size()) {
MSI_LOG_ERROR << "input " << std::to_string(index)
<< " data size not match shape and dtype, calculated required size " << ms_tensor->Size()
<< ", given " << out_tensor.data_size();
return INFER_STATUS(INVALID_INPUTS) << "input " << std::to_string(index)
<< " data size not match shape and dtype, calculated required size "
<< ms_tensor->Size() << ", given " << out_tensor.data_size();
}
memcpy_s(ms_tensor->data_c(), ms_tensor->Size(), out_tensor.data(), out_tensor.data_size());
return ms_tensor;
return SUCCESS;
}
void MSTensor2ServingTensor(tensor::TensorPtr ms_tensor, InferTensorBase &out_tensor) {
@ -189,16 +197,18 @@ Status MSInferSession::ExecuteModel(uint32_t model_id, const RequestBase &reques
MS_LOG(ERROR) << "Execute Model " << model_id << " Failed input tensor is null, index " << i;
return FAILED;
}
auto input = ServingTensor2MSTensor(*request[i]);
if (input == nullptr) {
tensor::TensorPtr input = nullptr;
auto ret = ServingTensor2MSTensor(i, *request[i], input);
if (ret != SUCCESS) {
MS_LOG(ERROR) << "Tensor convert failed";
return FAILED;
return ret;
}
inputs.push_back(input);
}
if (!CheckModelInputs(model_id, inputs)) {
auto ret = CheckModelInputs(model_id, inputs);
if (ret != SUCCESS) {
MS_LOG(ERROR) << "Check Model " << model_id << " Inputs Failed";
return INVALID_INPUTS;
return ret;
}
vector<tensor::TensorPtr> outputs = RunGraph(model_id, inputs);
if (outputs.empty()) {
@ -354,9 +364,13 @@ Status MSInferSession::InitEnv(const std::string &device, uint32_t device_id) {
return SUCCESS;
}
bool MSInferSession::CheckModelInputs(uint32_t graph_id, const std::vector<tensor::TensorPtr> &inputs) const {
Status MSInferSession::CheckModelInputs(uint32_t graph_id, const std::vector<tensor::TensorPtr> &inputs) const {
MS_ASSERT(session_impl_ != nullptr);
return session_impl_->CheckModelInputs(graph_id, inputs);
std::string error_msg;
if (!session_impl_->CheckModelInputs(graph_id, inputs, &error_msg)) {
return INFER_STATUS(INVALID_INPUTS) << error_msg;
}
return SUCCESS;
}
} // namespace mindspore::inference

@ -58,7 +58,7 @@ class MSInferSession : public InferSession {
static void RegAllOp();
string AjustTargetName(const std::string &device);
Status CompileGraph(std::shared_ptr<FuncGraph> funcGraphPtr, uint32_t &model_id);
bool CheckModelInputs(uint32_t graph_id, const std::vector<tensor::TensorPtr> &inputs) const;
Status CheckModelInputs(uint32_t graph_id, const std::vector<tensor::TensorPtr> &inputs) const;
std::vector<tensor::TensorPtr> RunGraph(uint32_t graph_id, const std::vector<tensor::TensorPtr> &inputs);
};
} // namespace inference

@ -97,7 +97,10 @@ class SessionBasic {
virtual GraphId GetFinalRunGraph() const { return kInvalidGraphId; }
void AssignParamKey(const KernelGraphPtr &kernel_graph);
void InitPSParamAndOptim(const KernelGraphPtr &kernel_graph, const std::vector<tensor::TensorPtr> &inputs_const);
virtual bool CheckModelInputs(uint32_t graph_id, const std::vector<tensor::TensorPtr> &inputs) const { return true; }
virtual bool CheckModelInputs(uint32_t graph_id, const std::vector<tensor::TensorPtr> &inputs,
std::string *error_msg) const {
return true;
}
#ifdef ENABLE_DEBUGGER
// set debugger

@ -103,9 +103,12 @@ endif ()
if (ENABLE_ACL)
add_compile_definitions(ENABLE_ACL)
add_compile_definitions(ENABLE_DVPP_INTERFACE)
set(ALC_LIB_SO ${ACL_LIB_DIR}/lib64/libruntime.so ${ACL_LIB_DIR}/lib64/libascendcl.so
${ACL_LIB_DIR}/lib64/libacl_retr.so ${ACL_LIB_DIR}/lib64/libacl_cblas.so)
${ACL_LIB_DIR}/lib64/libacl_retr.so ${ACL_LIB_DIR}/lib64/libacl_cblas.so
${ACL_LIB_DIR}/lib64/libacl_dvpp.so)
target_link_libraries(ms_serving ${ALC_LIB_SO})
target_link_libraries(ms_serving jpeg_turbo::jpeg)
else ()
target_link_libraries(ms_serving inference mindspore_gvar)
endif ()

@ -16,6 +16,7 @@
#include <memory>
#include <algorithm>
#include <fstream>
#include "serving/acl/acl_session.h"
#include "include/infer_log.h"
@ -25,7 +26,7 @@ std::shared_ptr<InferSession> InferSession::CreateSession(const std::string &dev
try {
auto session = std::make_shared<AclSession>();
auto ret = session->InitEnv(device, device_id);
if (!ret) {
if (ret != SUCCESS) {
return nullptr;
}
return session;
@ -36,22 +37,123 @@ std::shared_ptr<InferSession> InferSession::CreateSession(const std::string &dev
}
Status AclSession::LoadModelFromFile(const std::string &file_name, uint32_t &model_id) {
return model_process_.LoadModelFromFile(file_name, model_id) ? SUCCESS : FAILED;
Status ret = model_process_.LoadModelFromFile(file_name, model_id);
if (ret != SUCCESS) {
MSI_LOG_ERROR << "Load model from file failed, model file " << file_name;
return FAILED;
}
std::string dvpp_config_file;
auto index = file_name.rfind(".");
if (index == std::string::npos) {
dvpp_config_file = file_name;
} else {
dvpp_config_file = file_name.substr(0, index);
}
dvpp_config_file += "_dvpp_config.json";
std::ifstream fp(dvpp_config_file);
if (!fp.is_open()) {
MSI_LOG_INFO << "Dvpp config file not exist, model will execute with tensors as inputs, dvpp config file "
<< dvpp_config_file;
return SUCCESS;
}
fp.close();
if (dvpp_process_.InitWithJsonConfig(dvpp_config_file) != SUCCESS) {
MSI_LOG_ERROR << "Dvpp config file parse error, dvpp config file " << dvpp_config_file;
return FAILED;
}
execute_with_dvpp_ = true;
MSI_LOG_INFO << "Dvpp config success";
return SUCCESS;
}
Status AclSession::UnloadModel(uint32_t model_id) {
Status AclSession::UnloadModel(uint32_t /*model_id*/) {
model_process_.UnLoad();
return SUCCESS;
}
Status AclSession::ExecuteModel(uint32_t model_id, const RequestBase &request,
Status AclSession::ExecuteModel(uint32_t /*model_id*/, const RequestBase &request,
ReplyBase &reply) { // set d context
aclError rt_ret = aclrtSetCurrentContext(context_);
if (rt_ret != ACL_ERROR_NONE) {
MSI_LOG_ERROR << "set the ascend device context failed";
return FAILED;
}
return model_process_.Execute(request, reply) ? SUCCESS : FAILED;
return model_process_.Execute(request, reply);
}
Status AclSession::PreProcess(uint32_t /*model_id*/, const InferImagesBase *images_input,
ImagesDvppOutput &dvpp_output) {
if (images_input == nullptr) {
MSI_LOG_ERROR << "images input is nullptr";
return FAILED;
}
auto batch_size = images_input->batch_size();
if (batch_size <= 0) {
MSI_LOG_ERROR << "invalid batch size " << images_input->batch_size();
return FAILED;
}
std::vector<const void *> pic_buffer_list;
std::vector<size_t> pic_size_list;
for (size_t i = 0; i < batch_size; i++) {
const void *pic_buffer = nullptr;
uint32_t pic_size = 0;
if (!images_input->get(i, pic_buffer, pic_size) || pic_buffer == nullptr || pic_size == 0) {
MSI_LOG_ERROR << "Get request " << 0 << "th buffer failed";
return FAILED;
}
pic_buffer_list.push_back(pic_buffer);
pic_size_list.push_back(pic_size);
}
auto ret = dvpp_process_.Process(pic_buffer_list, pic_size_list, dvpp_output.buffer_device, dvpp_output.buffer_size);
if (ret != SUCCESS) {
MSI_LOG_ERROR << "dvpp process failed";
return ret;
}
return SUCCESS;
}
Status AclSession::ExecuteModel(uint32_t model_id, const ImagesRequestBase &images_inputs, // images for preprocess
const RequestBase &request, ReplyBase &reply) {
if (!execute_with_dvpp_) {
MSI_LOG_ERROR << "Unexpected images as inputs, DVPP not config";
return INFER_STATUS(INVALID_INPUTS) << "Unexpected images as inputs, DVPP not config";
}
aclError rt_ret = aclrtSetCurrentContext(context_);
if (rt_ret != ACL_ERROR_NONE) {
MSI_LOG_ERROR << "set the ascend device context failed";
return FAILED;
}
if (images_inputs.size() != 1) {
MSI_LOG_ERROR << "Only support one input to do DVPP preprocess";
return INFER_STATUS(INVALID_INPUTS) << "Only support one input to do DVPP preprocess";
}
if (images_inputs[0] == nullptr) {
MSI_LOG_ERROR << "Get first images input failed";
return FAILED;
}
if (images_inputs[0]->batch_size() != model_process_.GetBatchSize()) {
MSI_LOG_ERROR << "Input batch size " << images_inputs[0]->batch_size() << " not match Model batch size "
<< model_process_.GetBatchSize();
return INFER_STATUS(INVALID_INPUTS) << "Input batch size " << images_inputs[0]->batch_size()
<< " not match Model batch size " << model_process_.GetBatchSize();
}
if (request.size() != 0) {
MSI_LOG_ERROR << "only support one input, images input size is 1, tensor inputs is not 0 " << request.size();
return INFER_STATUS(INVALID_INPUTS) << "only support one input, images input size is 1, tensor inputs is not 0 "
<< request.size();
}
ImagesDvppOutput dvpp_output;
Status ret = PreProcess(model_id, images_inputs[0], dvpp_output);
if (ret != SUCCESS) {
MSI_LOG_ERROR << "DVPP preprocess failed";
return ret;
}
ret = model_process_.Execute(dvpp_output.buffer_device, dvpp_output.buffer_size, reply);
if (ret != SUCCESS) {
MSI_LOG_ERROR << "Execute model failed";
return ret;
}
return SUCCESS;
}
Status AclSession::InitEnv(const std::string &device_type, uint32_t device_id) {
@ -95,11 +197,16 @@ Status AclSession::InitEnv(const std::string &device_type, uint32_t device_id) {
model_process_.SetIsDevice(is_device);
MSI_LOG_INFO << "get run mode success is device input/output " << is_device;
if (dvpp_process_.InitResource(stream_) != SUCCESS) {
MSI_LOG_ERROR << "dvpp init resource failed";
return FAILED;
}
MSI_LOG_INFO << "Init acl success, device id " << device_id_;
return SUCCESS;
}
Status AclSession::FinalizeEnv() {
dvpp_process_.Finalize();
aclError ret;
if (stream_ != nullptr) {
ret = aclrtDestroyStream(stream_);

@ -25,9 +25,11 @@
#include "include/inference.h"
#include "serving/acl/model_process.h"
#include "serving/acl/dvpp_process.h"
namespace mindspore {
namespace inference {
class AclSession : public InferSession {
public:
AclSession();
@ -37,6 +39,8 @@ class AclSession : public InferSession {
Status LoadModelFromFile(const std::string &file_name, uint32_t &model_id) override;
Status UnloadModel(uint32_t model_id) override;
Status ExecuteModel(uint32_t model_id, const RequestBase &request, ReplyBase &reply) override;
Status ExecuteModel(uint32_t model_id, const ImagesRequestBase &images_inputs, // images for preprocess
const RequestBase &request, ReplyBase &reply) override;
private:
std::string device_type_;
@ -44,6 +48,10 @@ class AclSession : public InferSession {
aclrtStream stream_ = nullptr;
aclrtContext context_ = nullptr;
ModelProcess model_process_;
bool execute_with_dvpp_ = false;
DvppProcess dvpp_process_;
Status PreProcess(uint32_t model_id, const InferImagesBase *images_input, ImagesDvppOutput &dvpp_output);
};
} // namespace inference
} // namespace mindspore

File diff suppressed because it is too large Load Diff

@ -0,0 +1,159 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef INC_DVPP_PROCESS_ACL
#define INC_DVPP_PROCESS_ACL
#include <vector>
#include <string>
#include "acl/acl.h"
#include "acl/acl_mdl.h"
#include "acl/acl_rt.h"
#include "acl/ops/acl_dvpp.h"
#include "include/inference.h"
namespace mindspore::inference {
struct DvppDecodePara {
acldvppPixelFormat pixel_format = PIXEL_FORMAT_YUV_SEMIPLANAR_420;
};
struct DvppResizePara {
uint32_t output_width = 0;
uint32_t output_height = 0;
};
enum DvppCropType {
// crop left,top,right,bottom is given in config
kDvppCropTypeOffset = 0,
// crop left,top,right,bottom is calculated by image width/height and output crop width/height
kDvppCropTypeCentre = 1,
};
struct DvppRoiArea {
uint32_t left = 0;
uint32_t top = 0;
uint32_t right = 0;
uint32_t bottom = 0;
};
struct DvppCropInfo {
DvppCropType crop_type = kDvppCropTypeOffset;
DvppRoiArea crop_area; // when kDvppCropTypeOffset
uint32_t crop_width = 0; // when kDvppCropTypeCentre
uint32_t crop_height = 0; // when kDvppCropTypeCentre
};
struct DvppCropPara {
DvppCropInfo crop_info;
uint32_t output_width = 0;
uint32_t output_height = 0;
};
struct DvppCropAndPastePara {
DvppCropInfo crop_info;
DvppRoiArea paste_area;
uint32_t output_width = 0;
uint32_t output_height = 0;
};
class DvppProcess {
public:
DvppProcess();
~DvppProcess();
Status InitResource(aclrtStream stream);
void Finalize();
Status InitJpegDecodePara(const DvppDecodePara &decode_para); // jpeg decode + (resize | crop)
Status InitResizePara(const DvppResizePara &resize_para); // jpeg decode + resize
Status InitCropPara(const DvppCropPara &crop_para); // jpeg decode + crop
Status InitCropAndPastePara(const DvppCropAndPastePara &crop_and_paste_para); // jpeg decode + crop&paste
Status InitWithJsonConfig(const std::string &json_config);
// output device buffer will be destroy by DvppProcess itself.
Status Process(const void *pic_buffer, size_t pic_buffer_size, void *&output_device_buffer, size_t &output_size);
Status Process(const std::vector<const void *> &pic_buffer_list, const std::vector<size_t> &pic_buffer_size_list,
void *&output_device_buffer, size_t &output_size);
private:
uint32_t pic_width_ = 0;
uint32_t pic_height_ = 0;
DvppDecodePara decode_para_;
DvppResizePara resize_para_;
DvppCropPara crop_para_;
DvppCropAndPastePara crop_and_paste_para_;
// only one of the resize or crop flag can be true
bool to_resize_flag_ = false;
bool to_crop_flag_ = false;
bool to_crop_and_paste_flag_ = false;
void *input_pic_dev_buffer_ = nullptr;
uint32_t input_pic_buffer_size_ = 0;
uint32_t decode_output_buffer_size_ = 0;
void *decode_output_buffer_dev_ = nullptr;
acldvppPicDesc *decode_output_desc_ = nullptr;
acldvppResizeConfig *resize_config_ = nullptr;
acldvppRoiConfig *crop_area_ = nullptr;
acldvppRoiConfig *paste_area_ = nullptr;
acldvppPicDesc *vpc_output_desc_ = nullptr;
void *vpc_output_buffer_dev_ = nullptr; // vpc_output_buffer_size_ length
uint32_t vpc_output_buffer_size_ = 0;
void *batch_vpc_output_buffer_dev_ = nullptr; // batch_size_ * vpc_output_buffer_size_ length
uint32_t batch_size_ = 0;
aclrtStream stream_ = nullptr;
acldvppChannelDesc *dvpp_channel_desc_ = nullptr;
uint32_t AlignmentHelper(uint32_t org_size, uint32_t alignment) const;
uint32_t GetImageBufferSize(uint32_t stride_width, uint32_t stride_height, acldvppPixelFormat pixel_format) const;
Status GetPicDescStride(uint32_t width, uint32_t height, uint32_t &stride_width, uint32_t &stride_height);
Status GetPicDescStrideDecode(uint32_t width, uint32_t height, uint32_t &stride_width, uint32_t &stride_height);
Status InputInputBuffer(const void *pic_buffer, size_t pic_buffer_size);
Status InitDecodeOutputDesc(uint32_t image_width,
uint32_t image_height); // decode_output_desc_, decode_output_buffer_dev_
Status CheckRoiAreaWidthHeight(uint32_t width, uint32_t height);
Status CheckAndAdjustRoiArea(DvppRoiArea &area);
Status UpdateCropArea(uint32_t image_width, uint32_t image_height);
Status CheckResizeImageInfo(uint32_t image_width, uint32_t image_height) const;
void DestroyDecodeDesc();
Status InitVpcOutputDesc(uint32_t output_width, uint32_t output_height,
acldvppPixelFormat pixel_format); // vpc_output_desc_, vpc_output_buffer_dev_batch_
Status InitRoiAreaConfig(acldvppRoiConfig *&roi_area, const DvppRoiArea &init_para);
Status InitCommonCropPara(DvppCropInfo &crop_info, uint32_t out_width, uint32_t out_height);
Status InitResizeOutputDesc(); // vpc_output_desc_, vpc_output_buffer_dev_, resize_config
Status InitCropOutputDesc(); // vpc_output_desc_, vpc_output_buffer_dev_, crop_area_
Status InitCropAndPasteOutputDesc(); // vpc_output_desc_, vpc_output_buffer_dev_, crop_area_, paste_area_
void DestroyVpcOutputDesc();
Status ProcessDecode();
Status ProcessResize();
Status ProcessCrop();
Status ProcessCropAndPaste();
void DestroyResource();
Status GetJpegWidthHeight(const void *pic_buffer, size_t pic_buffer_size, uint32_t &image_width,
uint32_t &image_height);
};
} // namespace mindspore::inference
#endif // INC_DVPP_PROCESS_ACL

@ -0,0 +1,68 @@
{
"preprocess": [
{
"input": {
"index": 0
},
"decode_para": {
"out_pixel_format": "YUV420SP"
},
"dvpp_process": {
"op_name": "resize",
"out_width": 224,
"out_height": 224
},
"sample of dvpp_process content": [
{
"op_name": "resize",
"out_width": 224,
"out_height": 224
},
{
"op_name": "crop",
"crop_type": "offset",
"crop_left": 10,
"crop_top": 10,
"crop_right": 100,
"crop_bottom": 200,
"out_width": 224,
"out_height": 224
},
{
"op_name": "crop",
"crop_type": "centre",
"crop_width": 100,
"crop_height": 100,
"out_width": 224,
"out_height": 224
},
{
"op_name": "crop_and_paste",
"crop_type": "offset",
"crop_left": 10,
"crop_top": 10,
"crop_right": 100,
"crop_bottom": 200,
"paste_left": 10,
"paste_top": 10,
"paste_right": 100,
"paste_bottom": 200,
"out_width": 224,
"out_height": 224
},
{
"op_name": "crop_and_paste",
"crop_type": "centre",
"crop_width": 100,
"crop_height": 100,
"paste_left": 10,
"paste_top": 10,
"paste_right": 100,
"paste_bottom": 200,
"out_width": 224,
"out_height": 224
}
]
}
]
}

File diff suppressed because it is too large Load Diff

@ -21,7 +21,6 @@
#include "acl/acl.h"
#include "acl/acl_mdl.h"
#include "acl/acl_rt.h"
#include "serving/core/util/status.h"
#include "include/inference.h"
namespace mindspore {
@ -34,21 +33,30 @@ struct AclTensorInfo {
std::vector<int64_t> dims;
};
struct ImagesDvppOutput {
void *buffer_device = nullptr;
size_t buffer_size = 0;
size_t input_index = 0;
};
class ModelProcess {
public:
ModelProcess() {}
~ModelProcess() {}
bool LoadModelFromFile(const std::string &file_name, uint32_t &model_id);
Status LoadModelFromFile(const std::string &file_name, uint32_t &model_id);
void UnLoad();
// override this method to avoid request/reply data copy
bool Execute(const RequestBase &request, ReplyBase &reply);
Status Execute(const RequestBase &request, ReplyBase &reply);
Status Execute(const void *dvpp_outputs_buffer_dev, size_t dvpp_outputs_buffer_size, ReplyBase &reply);
void SetIsDevice(bool is_device) { is_run_on_device_ = is_device; }
size_t GetBatchSize() const;
private:
uint32_t model_id_ = 0xffffffff;
// if run one device(AICPU), there is no need to alloc device memory and copy inputs to(/outputs from) device
bool is_run_on_device_ = false;
aclmdlDesc *model_desc_ = nullptr;
aclmdlDataset *inputs_ = nullptr;
@ -56,12 +64,15 @@ class ModelProcess {
std::vector<AclTensorInfo> input_infos_;
std::vector<AclTensorInfo> output_infos_;
bool CreateDataBuffer(void *&data_mem_buffer, size_t buffer_size, aclmdlDataset *dataset);
bool CheckAndInitInput(const RequestBase &request);
bool BuildOutputs(ReplyBase &reply);
Status PreInitModelResource();
Status CreateDataBuffer(void *&data_mem_buffer, size_t buffer_size, aclmdlDataset *dataset);
Status CheckAndInitInput(const RequestBase &request);
Status CheckAndInitDvppInput(const void *dvpp_outputs_buffer_dev, size_t dvpp_outputs_buffer_size,
size_t input_index);
Status BuildOutputs(ReplyBase &reply);
bool InitInputsBuffer();
bool InitOutputsBuffer();
Status InitInputsBuffer();
Status InitOutputsBuffer();
void DestroyInputsDataset();
void DestroyInputsDataMem();
void DestroyInputsBuffer();

@ -31,7 +31,6 @@
#include "core/version_control/version_controller.h"
#include "core/util/file_system_operation.h"
#include "core/serving_tensor.h"
#include "util/status.h"
using ms_serving::MSService;
using ms_serving::PredictReply;
@ -45,7 +44,7 @@ namespace serving {
{ \
auto time_end_##name = std::chrono::steady_clock::now(); \
auto time_cost = std::chrono::duration<double, std::milli>(time_end_##name - time_start_##name).count(); \
MSI_LOG_INFO << #name " Time Cost " << time_cost << "ms ---------------------"; \
MSI_LOG_INFO << #name " Time Cost # " << time_cost << " ms ---------------------"; \
}
Status Session::CreatDeviceSession(const std::string &device, uint32_t device_id) {
@ -75,15 +74,26 @@ Status Session::Predict(const PredictRequest &request, PredictReply &reply) {
std::lock_guard<std::mutex> lock(mutex_);
MSI_LOG(INFO) << "run Predict";
ServingRequest serving_request(request);
ServingReply serving_reply(reply);
if (request.images_size() > 0) {
ServingImagesRequest serving_images(request);
ServingRequest serving_request(request);
ServingReply serving_reply(reply);
Status ret = session_->ExecuteModel(graph_id_, serving_images, serving_request, serving_reply);
if (ret != SUCCESS) {
MSI_LOG(ERROR) << "execute model with images return failed";
return ret;
}
} else if (request.data_size() > 0) {
ServingRequest serving_request(request);
ServingReply serving_reply(reply);
Status ret = session_->ExecuteModel(graph_id_, serving_request, serving_reply);
if (ret != SUCCESS) {
MSI_LOG(ERROR) << "execute model with datas return failed";
return ret;
}
}
auto ret = session_->ExecuteModel(graph_id_, serving_request, serving_reply);
MSI_LOG(INFO) << "run Predict finished";
if (Status(ret) != SUCCESS) {
MSI_LOG(ERROR) << "execute model return failed";
return Status(ret);
}
return SUCCESS;
}
@ -98,9 +108,9 @@ Status Session::Warmup(const MindSporeModelPtr model) {
MSI_TIME_STAMP_START(LoadModelFromFile)
auto ret = session_->LoadModelFromFile(file_name, graph_id_);
MSI_TIME_STAMP_END(LoadModelFromFile)
if (Status(ret) != SUCCESS) {
if (ret != SUCCESS) {
MSI_LOG(ERROR) << "Load graph model failed, file name is " << file_name.c_str();
return Status(ret);
return ret;
}
model_loaded_ = true;
MSI_LOG(INFO) << "Session Warmup finished";
@ -123,14 +133,19 @@ std::promise<void> exit_requested;
void ClearEnv() { Session::Instance().Clear(); }
void HandleSignal(int sig) { exit_requested.set_value(); }
grpc::Status CreatGRPCStatus(Status status) {
switch (status) {
grpc::Status CreatGRPCStatus(const Status &status) {
switch (status.StatusCode()) {
case SUCCESS:
return grpc::Status::OK;
case FAILED:
return grpc::Status::CANCELLED;
case INVALID_INPUTS:
return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "The Predict Inputs do not match the Model Request!");
case INVALID_INPUTS: {
auto status_msg = status.StatusMessage();
if (status_msg.empty()) {
status_msg = "The Predict Inputs do not match the Model Request!";
}
return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, status_msg);
}
default:
return grpc::Status::CANCELLED;
}

@ -31,6 +31,10 @@ namespace serving {
using ms_serving::PredictReply;
using ms_serving::PredictRequest;
using inference::Status;
using inference::SUCCESS;
using inference::FAILED;
using inference::INVALID_INPUTS;
class Session {
public:

@ -120,7 +120,7 @@ ServingRequest::ServingRequest(const ms_serving::PredictRequest &request) : requ
[](const ms_serving::Tensor &item) { return ServingTensor(const_cast<ms_serving::Tensor &>(item)); });
}
size_t ServingRequest::size() const { return request_.data_size(); }
size_t ServingRequest::size() const { return cache_.size(); }
const InferTensorBase *ServingRequest::operator[](size_t index) const {
if (index >= cache_.size()) {
@ -130,6 +130,22 @@ const InferTensorBase *ServingRequest::operator[](size_t index) const {
return &(cache_[index]);
}
ServingImages::ServingImages(const ms_serving::Images &images) : images_(images) {}
size_t ServingImages::batch_size() const { return images_.images_size(); }
bool ServingImages::get(size_t index, const void *&pic_buffer, uint32_t &pic_size) const {
if (index >= static_cast<size_t>(images_.images_size())) {
MSI_LOG_ERROR << "visit invalid index " << index << " total size " << images_.images_size();
return false;
}
pic_buffer = images_.images(index).data();
pic_size = images_.images(index).size();
return true;
}
size_t ServingImages::input_index() const { return static_cast<size_t>(images_.input_index()); }
size_t ServingReply::size() const { return cache_.size(); }
InferTensorBase *ServingReply::operator[](size_t index) {
@ -160,5 +176,21 @@ InferTensorBase *ServingReply::add() {
void ServingReply::clear() { reply_.mutable_result()->Clear(); }
ServingImagesRequest::ServingImagesRequest(const ms_serving::PredictRequest &request) : request_(request) {
auto &images_inputs = request_.images();
std::transform(images_inputs.begin(), images_inputs.end(), std::back_inserter(cache_),
[](const ms_serving::Images &item) { return ServingImages(const_cast<ms_serving::Images &>(item)); });
}
size_t ServingImagesRequest::size() const { return cache_.size(); }
const inference::InferImagesBase *ServingImagesRequest::operator[](size_t index) const {
if (index >= cache_.size()) {
MSI_LOG_ERROR << "visit invalid index " << index << " total size " << cache_.size();
return nullptr;
}
return &(cache_[index]);
}
} // namespace serving
} // namespace mindspore

@ -47,6 +47,18 @@ class MS_API ServingTensor : public inference::InferTensorBase {
ms_serving::Tensor &tensor_;
};
class ServingImages : public inference::InferImagesBase {
public:
explicit ServingImages(const ms_serving::Images &images);
size_t batch_size() const override;
bool get(size_t index, const void *&pic_buffer, uint32_t &pic_size) const override;
size_t input_index() const override;
private:
const ms_serving::Images &images_;
};
class ServingRequest : public inference::RequestBase {
public:
explicit ServingRequest(const ms_serving::PredictRequest &request);
@ -74,6 +86,18 @@ class ServingReply : public inference::ReplyBase {
std::vector<ServingTensor> cache_;
};
class ServingImagesRequest : public inference::ImagesRequestBase {
public:
explicit ServingImagesRequest(const ms_serving::PredictRequest &request);
size_t size() const override;
const inference::InferImagesBase *operator[](size_t index) const override;
private:
const ms_serving::PredictRequest &request_;
std::vector<ServingImages> cache_;
};
} // namespace serving
} // namespace mindspore
#endif // MINDSPORE_SERVING_TENSOR_H_

@ -15,10 +15,14 @@
*/
#ifndef MINDSPORE_STATUS_H
#define MINDSPORE_STATUS_H
#include "include/inference.h"
namespace mindspore {
namespace serving {
using Status = uint32_t;
enum ServingStatus { SUCCESS = 0, FAILED, INVALID_INPUTS };
using inference::Status;
using inference::SUCCESS;
using inference::FAILED;
using inference::INVALID_INPUTS;
} // namespace serving
} // namespace mindspore

@ -20,17 +20,19 @@ syntax = "proto3";
package ms_serving;
service MSService {
rpc Predict(PredictRequest) returns (PredictReply) {}
rpc Test(PredictRequest) returns (PredictReply) {}
rpc Predict(PredictRequest) returns (PredictReply) {}
rpc Test(PredictRequest) returns (PredictReply) {}
}
message PredictRequest {
repeated Tensor data = 1;
repeated Tensor data = 1;
repeated Images images = 2;
}
message PredictReply {
repeated Tensor result = 1;
repeated Tensor result = 1;
}
enum DataType {
MS_UNKNOWN = 0;
MS_BOOL = 1;
@ -62,3 +64,7 @@ message Tensor {
bytes data = 3;
}
message Images{
repeated bytes images = 1;
uint32 input_index = 2;
}

@ -51,6 +51,10 @@ else()
endif()
endforeach ()
endif()
# removing serving ut
file(GLOB_RECURSE SERVING_ACL_UT_SRCS RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} serving/*.cc)
list(REMOVE_ITEM UT_SRCS ${SERVING_ACL_UT_SRCS})
add_subdirectory(serving)
file(GLOB_RECURSE MINDSPORE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}
"../../../mindspore/core/base/*.cc"
@ -163,7 +167,7 @@ file(GLOB_RECURSE UT_SUTB_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}
"stub/ge/*.cc"
)
add_executable(ut_tests ${UT_SRCS} ${MINDSPORE_SRC_LIST} ${UT_SUTB_SRC_LIST})
add_executable(ut_tests ${UT_SRCS} ${MINDSPORE_SRC_LIST} ${UT_SUTB_SRC_LIST} $<TARGET_OBJECTS:ut_serving_obj>)
if (ENABLE_GE)
if(ENABLE_TRAIN)
@ -188,3 +192,14 @@ if (USE_GLOG)
endif()
target_link_libraries(ut_tests PRIVATE securec graph)
# link grpc
if (EXISTS ${grpc_ROOT}/lib64)
set(gRPC_DIR "${grpc_ROOT}/lib64/cmake/grpc")
else ()
set(gRPC_DIR "${grpc_ROOT}/lib/cmake/grpc")
endif ()
find_package(gRPC CONFIG REQUIRED)
target_link_libraries(ut_tests PRIVATE gRPC::grpc++)
target_link_libraries(ut_tests PRIVATE gRPC::grpc++_reflection)
target_link_libraries(ut_tests PRIVATE protobuf::libprotobuf)

@ -0,0 +1,90 @@
find_package(Threads REQUIRED)
# This branch assumes that gRPC and all its dependencies are already installed
# on this system, so they can be located by find_package().
# Find Protobuf installation
# Looks for protobuf-config.cmake file installed by Protobuf's cmake installation.
#set(protobuf_MODULE_COMPATIBLE TRUE)
#find_package(Protobuf CONFIG REQUIRED)
#message(STATUS "Using protobuf ${protobuf_VERSION}")
add_library(protobuf::libprotobuf ALIAS protobuf::protobuf)
add_executable(protobuf::libprotoc ALIAS protobuf::protoc)
set(_PROTOBUF_LIBPROTOBUF protobuf::libprotobuf)
if (CMAKE_CROSSCOMPILING)
find_program(_PROTOBUF_PROTOC protoc)
else ()
set(_PROTOBUF_PROTOC $<TARGET_FILE:protobuf::protoc>)
endif ()
# Find gRPC installation
# Looks for gRPCConfig.cmake file installed by gRPC's cmake installation.
if (EXISTS ${grpc_ROOT}/lib64)
set(gRPC_DIR "${grpc_ROOT}/lib64/cmake/grpc")
else ()
set(gRPC_DIR "${grpc_ROOT}/lib/cmake/grpc")
endif ()
message("serving ut using grpc_DIR : " ${gPRC_DIR})
find_package(gRPC CONFIG REQUIRED)
message(STATUS "Using gRPC ${gRPC_VERSION}")
set(_GRPC_GRPCPP gRPC::grpc++)
set(_REFLECTION gRPC::grpc++_reflection)
if (CMAKE_CROSSCOMPILING)
find_program(_GRPC_CPP_PLUGIN_EXECUTABLE grpc_cpp_plugin)
find_program(_GRPC_PYTHON_PLUGIN_EXECUTABLE grpc_python_plugin)
else ()
set(_GRPC_CPP_PLUGIN_EXECUTABLE $<TARGET_FILE:gRPC::grpc_cpp_plugin>)
set(_GRPC_PYTHON_PLUGIN_EXECUTABLE $<TARGET_FILE:gRPC::grpc_python_plugin>)
endif ()
# Proto file
get_filename_component(hw_proto "ms_service.proto" ABSOLUTE)
get_filename_component(hw_proto_path ${hw_proto} PATH)
# Generated sources
set(hw_proto_srcs "${CMAKE_CURRENT_BINARY_DIR}/ms_service.pb.cc")
set(hw_proto_hdrs "${CMAKE_CURRENT_BINARY_DIR}/ms_service.pb.h")
set(hw_grpc_srcs "${CMAKE_CURRENT_BINARY_DIR}/ms_service.grpc.pb.cc")
set(hw_grpc_hdrs "${CMAKE_CURRENT_BINARY_DIR}/ms_service.grpc.pb.h")
set(hw_py_pb2 "${CMAKE_CURRENT_BINARY_DIR}/ms_service_pb2.py")
set(hw_py_pb2_grpc "${CMAKE_CURRENT_BINARY_DIR}/ms_service_pb2_grpc.py")
add_custom_command(
OUTPUT "${hw_proto_srcs}" "${hw_proto_hdrs}" "${hw_grpc_srcs}" "${hw_grpc_hdrs}" "${hw_py_pb2}" "${hw_py_pb2_grpc}"
COMMAND ${_PROTOBUF_PROTOC}
ARGS --grpc_out "${CMAKE_CURRENT_BINARY_DIR}"
--cpp_out "${CMAKE_CURRENT_BINARY_DIR}"
-I "${hw_proto_path}"
--plugin=protoc-gen-grpc="${_GRPC_CPP_PLUGIN_EXECUTABLE}"
"${hw_proto}"
COMMAND ${_PROTOBUF_PROTOC}
ARGS --grpc_out "${CMAKE_CURRENT_BINARY_DIR}"
--python_out "${CMAKE_CURRENT_BINARY_DIR}"
-I "${hw_proto_path}"
--plugin=protoc-gen-grpc="${_GRPC_PYTHON_PLUGIN_EXECUTABLE}"
"${hw_proto}"
DEPENDS "${hw_proto}")
list(APPEND SERVING_SRC_TEST ${hw_proto_srcs} ${hw_grpc_srcs})
file(GLOB_RECURSE ACL_SESSION_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}
"../../../../serving/acl/*.cc"
"../../../../serving/core/*.cc")
list(APPEND SERVING_SRC_TEST ${ACL_SESSION_SRC_LIST})
# utest files
file(GLOB_RECURSE ACL_UTEST_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "*.cc")
list(APPEND SERVING_SRC_TEST ${ACL_UTEST_SRC_LIST})
include_directories(${CMAKE_SOURCE_DIR}/serving/core)
include_directories(${CMAKE_SOURCE_DIR}/serving/acl)
include_directories(${CMAKE_SOURCE_DIR}/serving)
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
include_directories(${CMAKE_CURRENT_BINARY_DIR})
include_directories(${CMAKE_CURRENT_BINARY_DIR}/../)
add_library(ut_serving_obj OBJECT ${SERVING_SRC_TEST})

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save