!7524 remove OpenCLKernel's member op_format_

Merge pull request !7524 from 王东旭/remove_op_format
pull/7524/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit f887618662

@ -123,7 +123,7 @@ __kernel void transpose_0231_NHWC4(__read_only image2d_t src_data, __write_only
if (X >= shape.y || 4 * Y >= shape.z || 4 * Z >= shape.w) {
return;
}
int W4 = UP_DIV(shape.y, 4);
int W4 = UP_DIV(shape.z, 4);
int C4 = UP_DIV(shape.w, 4);
FLT4 src0 = READ_IMAGE(src_data, smp_zero, (int2)(X * W4 + Y, 4 * Z));
FLT4 src1 = (FLT4)0.f;

@ -61,12 +61,12 @@ int ActivationOpenClKernel::Init() {
MS_LOG(ERROR) << "Activate fun only support dim=4 or 2, but your dim=" << in_size_;
return mindspore::lite::RET_ERROR;
}
std::map<int, std::string> Program_Kernel{{ActivationType_LEAKY_RELU, "LeakyRelu"},
{ActivationType_RELU, "Relu"},
{ActivationType_SIGMOID, "Sigmoid"},
{ActivationType_RELU6, "Relu6"},
{ActivationType_TANH, "Tanh"}};
if (Program_Kernel.count(type_) == 0) {
std::map<int, std::string> kernel_names{{ActivationType_LEAKY_RELU, "LeakyRelu"},
{ActivationType_RELU, "Relu"},
{ActivationType_SIGMOID, "Sigmoid"},
{ActivationType_RELU6, "Relu6"},
{ActivationType_TANH, "Tanh"}};
if (kernel_names.count(type_) == 0) {
MS_LOG(ERROR) << "schema::ActivationType:" << type_ << "not found";
return mindspore::lite::RET_ERROR;
}
@ -75,12 +75,8 @@ int ActivationOpenClKernel::Init() {
std::set<std::string> build_options;
std::string program_name = "Activation";
ocl_runtime_->LoadSource(program_name, source);
std::string kernel_name = Program_Kernel[type_];
std::string kernel_name = kernel_names[type_];
ocl_runtime_->BuildKernel(kernel_, program_name, kernel_name, build_options);
in_ori_format_ = in_tensors_[0]->GetFormat();
out_ori_format_ = out_tensors_[0]->GetFormat();
in_tensors_[0]->SetFormat(op_format_);
out_tensors_[0]->SetFormat(op_format_);
MS_LOG(DEBUG) << op_parameter_->name_ << " init Done!";
return mindspore::lite::RET_OK;
}
@ -107,32 +103,12 @@ int ActivationOpenClKernel::Run() {
cl_int4 ActivationOpenClKernel::GetImg2dShape() {
cl_int4 img2d_shape = {1, 1, 1, 1};
if (op_format_ == schema::Format_NHWC4) {
img2d_shape.s[1] = nhwc_shape_[1];
img2d_shape.s[2] = nhwc_shape_[2] * UP_DIV(nhwc_shape_[3], C4NUM);
img2d_shape.s[3] = C4NUM;
}
if (op_format_ == schema::Format_NC4HW4) {
img2d_shape.s[1] = UP_DIV(nhwc_shape_[3], C4NUM) * nhwc_shape_[1];
img2d_shape.s[2] = nhwc_shape_[2];
img2d_shape.s[3] = C4NUM;
}
img2d_shape.s[1] = nhwc_shape_[1];
img2d_shape.s[2] = nhwc_shape_[2] * UP_DIV(nhwc_shape_[3], C4NUM);
img2d_shape.s[3] = C4NUM;
return img2d_shape;
}
int ActivationOpenClKernel::GetImageSize(size_t idx, std::vector<size_t> *img_size) {
cl_int4 img_shape = GetImg2dShape();
size_t img_dtype = CL_FLOAT;
if (enable_fp16_) {
img_dtype = CL_HALF_FLOAT;
}
img_size->clear();
img_size->push_back(img_shape.s[2]);
img_size->push_back(img_shape.s[1]);
img_size->push_back(img_dtype);
return mindspore::lite::RET_OK;
}
kernel::LiteKernel *OpenClActivationKernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,

@ -26,27 +26,25 @@ namespace mindspore::kernel {
class ActivationOpenClKernel : public OpenCLKernel {
public:
explicit ActivationOpenClKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs)
: OpenCLKernel(parameter, inputs, outputs) {
type_ = (reinterpret_cast<ActivationParameter *>(parameter))->type_;
alpha_ = (reinterpret_cast<ActivationParameter *>(parameter))->alpha_;
}
~ActivationOpenClKernel() override{};
ActivationOpenClKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs)
: OpenCLKernel(parameter, inputs, outputs),
type_(reinterpret_cast<ActivationParameter *>(parameter)->type_),
alpha_(reinterpret_cast<ActivationParameter *>(parameter)->alpha_) {}
~ActivationOpenClKernel() override = default;
int Init() override;
int Run() override;
int GetImageSize(size_t idx, std::vector<size_t> *img_size) override;
cl_int4 GetImg2dShape();
void InitBuffer() {}
private:
cl_int4 GetImg2dShape();
cl::Kernel kernel_;
int type_;
float alpha_;
int in_size_;
int out_size_;
size_t fp_size;
int in_size_{};
int out_size_{};
size_t fp_size{};
bool enable_fp16_{false};
std::vector<size_t> nhwc_shape_;
};

@ -34,79 +34,35 @@ using mindspore::schema::PrimitiveType_Eltwise;
namespace mindspore::kernel {
ArithmeticOpenCLKernel::~ArithmeticOpenCLKernel() {}
std::vector<size_t> ArithmeticOpenCLKernel::InitGlobalSize() const {
const size_t global_x = out_tensors_[0]->Width();
const size_t global_y = out_tensors_[0]->Height();
const size_t global_z = UP_ROUND_DIV(out_tensors_[0]->Channel(), 4);
std::vector<size_t> global = {global_x, global_y, global_z};
return global;
auto out_shape = out_tensors_[0]->shape();
if (out_shape.size() == 2) {
const size_t global_x = 1;
const size_t global_y = 1;
const size_t global_z = UP_ROUND_DIV(out_shape[1], C4NUM);
std::vector<size_t> global = {global_x, global_y, global_z};
return global;
} else {
const size_t global_x = out_shape[2];
const size_t global_y = out_shape[1];
const size_t global_z = UP_ROUND_DIV(out_shape[3], C4NUM);
std::vector<size_t> global = {global_x, global_y, global_z};
return global;
}
}
void ArithmeticOpenCLKernel::Image2dGetWorkGroupSize() {
local_size_ = {16, 16};
if (out_tensors_[0]->shape().size() == 2) {
size_t H = out_tensors_[0]->shape()[0];
size_t W = UP_DIV(out_tensors_[0]->shape()[1], C4NUM);
global_size_ = {W, H};
return;
}
if (out_tensors_[0]->GetFormat() == schema::Format_NC4HW4) {
size_t H = out_tensors_[0]->Batch() * out_tensors_[0]->Height() * UP_DIV(out_tensors_[0]->Channel(), C4NUM);
size_t W = out_tensors_[0]->Width();
global_size_ = {W, H};
} else if (out_tensors_[0]->GetFormat() == schema::Format_NHWC4) {
size_t H = out_tensors_[0]->Batch() * out_tensors_[0]->Height();
size_t W = out_tensors_[0]->Width() * UP_DIV(out_tensors_[0]->Channel(), C4NUM);
global_size_ = {W, H};
} else if (out_tensors_[0]->GetFormat() == schema::Format_NC4) {
size_t H = out_tensors_[0]->Batch();
size_t W = UP_DIV(out_tensors_[0]->Channel(), C4NUM);
auto out_shape = out_tensors_[0]->shape();
if (out_shape.size() == 2) {
size_t H = out_shape[0];
size_t W = UP_DIV(out_shape[1], C4NUM);
global_size_ = {W, H};
} else {
MS_LOG(ERROR) << "Unsupport data format " << out_tensors_[0]->GetFormat();
}
}
void ArithmeticOpenCLKernel::BufferGetWorkGroupSize() {
uint32_t element_num = out_tensors_[0]->ElementsC4Num();
global_size_ = {element_num};
}
int ArithmeticOpenCLKernel::GetImageSize(size_t idx, std::vector<size_t> *img_size) {
size_t im_dst_x, im_dst_y;
if (out_tensors_[0]->shape().size() == 2) {
im_dst_x = UP_DIV(out_tensors_[0]->shape()[1], C4NUM);
im_dst_y = out_tensors_[0]->shape()[0];
} else {
if (out_tensors_[0]->GetFormat() == schema::Format_NC4HW4) {
im_dst_x = out_tensors_[0]->Width();
im_dst_y = out_tensors_[0]->Batch() * out_tensors_[0]->Height() * UP_DIV(out_tensors_[0]->Channel(), C4NUM);
} else if (out_tensors_[0]->GetFormat() == schema::Format_NHWC4) {
im_dst_x = out_tensors_[0]->Width() * UP_DIV(out_tensors_[0]->Channel(), C4NUM);
im_dst_y = out_tensors_[0]->Batch() * out_tensors_[0]->Height();
} else if (out_tensors_[0]->GetFormat() == schema::Format_NC4) {
im_dst_y = out_tensors_[0]->Batch();
im_dst_x = UP_DIV(out_tensors_[0]->Channel(), C4NUM);
} else {
MS_LOG(ERROR) << "Unsupport data format " << out_tensors_[0]->GetFormat();
return RET_ERROR;
}
}
size_t img_dtype = CL_FLOAT;
if (in_tensors_[0]->data_type() == kNumberTypeFloat16) {
img_dtype = CL_HALF_FLOAT;
} else if (in_tensors_[0]->data_type() == kNumberTypeFloat32) {
img_dtype = CL_FLOAT;
} else {
MS_LOG(ERROR) << "Unsupport data type " << in_tensors_[0]->data_type();
size_t H = out_shape[0] * out_shape[1];
size_t W = out_shape[2] * UP_DIV(out_shape[3], C4NUM);
global_size_ = {W, H};
}
img_size->clear();
std::vector<size_t> vec{im_dst_x, im_dst_y, img_dtype};
*img_size = vec;
return RET_OK;
}
int ArithmeticOpenCLKernel::InitBuffer() {
@ -119,7 +75,7 @@ int ArithmeticOpenCLKernel::InitBuffer() {
inputs_weight_ptrs_.push_back(nullptr);
} else {
auto allocator = ocl_runtime_->GetAllocator();
std::vector<size_t> img_size = GetImage2dShapeFromNHWC(nhwc_shape, op_format_);
std::vector<size_t> img_size = GetImage2dShapeFromNHWC(nhwc_shape, schema::Format_NHWC4);
int pack_weight_size = img_size[0] * img_size[1] * C4NUM;
int plane = nhwc_shape[1] * nhwc_shape[2];
int channel = nhwc_shape[3];
@ -132,22 +88,12 @@ int ArithmeticOpenCLKernel::InitBuffer() {
return RET_ERROR;
}
memset(weight, 0x00, pack_weight_size * data_size);
if (op_format_ == schema::Format_NHWC4) {
if (in_tensor_->data_type() == kNumberTypeFloat32) {
std::function<float(float)> to_dtype = [](float x) -> float { return x; };
PackNHWCToNHWC4<float, float>(in_tensor_->data_c(), weight, batch, plane, channel, to_dtype);
} else if (in_tensor_->data_type() == kNumberTypeFloat16) {
std::function<float(float16_t)> to_dtype = [](float16_t x) -> float { return static_cast<float>(x); };
PackNHWCToNHWC4<float16_t, float>(in_tensor_->data_c(), weight, batch, plane, channel, to_dtype);
}
} else if (op_format_ == schema::Format_NC4HW4) {
if (in_tensor_->data_type() == kNumberTypeFloat32) {
std::function<float(float)> to_dtype = [](float x) -> float { return x; };
PackNHWCToNC4HW4<float, float>(in_tensor_->data_c(), weight, batch, plane, channel, to_dtype);
} else if (in_tensor_->data_type() == kNumberTypeFloat16) {
std::function<float(float16_t)> to_dtype = [](float16_t x) -> float { return static_cast<float>(x); };
PackNHWCToNC4HW4<float16_t, float>(in_tensor_->data_c(), weight, batch, plane, channel, to_dtype);
}
if (in_tensor_->data_type() == kNumberTypeFloat32) {
std::function<float(float)> to_dtype = [](float x) -> float { return x; };
PackNHWCToNHWC4<float, float>(in_tensor_->data_c(), weight, batch, plane, channel, to_dtype);
} else if (in_tensor_->data_type() == kNumberTypeFloat16) {
std::function<float(float16_t)> to_dtype = [](float16_t x) -> float { return static_cast<float>(x); };
PackNHWCToNHWC4<float16_t, float>(in_tensor_->data_c(), weight, batch, plane, channel, to_dtype);
}
if (batch * plane * channel == 1) {
// scalar
@ -163,22 +109,12 @@ int ArithmeticOpenCLKernel::InitBuffer() {
return RET_ERROR;
}
memset(weight, 0x00, pack_weight_size * data_size);
if (op_format_ == schema::Format_NHWC4) {
if (in_tensor_->data_type() == kNumberTypeFloat32) {
std::function<float16_t(float)> to_dtype = [](float x) -> float16_t { return static_cast<float16_t>(x); };
PackNHWCToNHWC4<float, float16_t>(in_tensor_->data_c(), weight, batch, plane, channel, to_dtype);
} else if (in_tensor_->data_type() == kNumberTypeFloat16) {
std::function<float16_t(float16_t)> to_dtype = [](float16_t x) -> float16_t { return x; };
PackNHWCToNHWC4<float16_t, float16_t>(in_tensor_->data_c(), weight, batch, plane, channel, to_dtype);
}
} else if (op_format_ == schema::Format_NC4HW4) {
if (in_tensor_->data_type() == kNumberTypeFloat32) {
std::function<float16_t(float)> to_dtype = [](float x) -> float16_t { return static_cast<float16_t>(x); };
PackNHWCToNC4HW4<float, float16_t>(in_tensor_->data_c(), weight, batch, plane, channel, to_dtype);
} else if (in_tensor_->data_type() == kNumberTypeFloat16) {
std::function<float16_t(float16_t)> to_dtype = [](float16_t x) -> float16_t { return x; };
PackNHWCToNC4HW4<float16_t, float16_t>(in_tensor_->data_c(), weight, batch, plane, channel, to_dtype);
}
if (in_tensor_->data_type() == kNumberTypeFloat32) {
std::function<float16_t(float)> to_dtype = [](float x) -> float16_t { return static_cast<float16_t>(x); };
PackNHWCToNHWC4<float, float16_t>(in_tensor_->data_c(), weight, batch, plane, channel, to_dtype);
} else if (in_tensor_->data_type() == kNumberTypeFloat16) {
std::function<float16_t(float16_t)> to_dtype = [](float16_t x) -> float16_t { return x; };
PackNHWCToNHWC4<float16_t, float16_t>(in_tensor_->data_c(), weight, batch, plane, channel, to_dtype);
}
if (batch * plane * channel == 1) {
// scalar
@ -195,18 +131,11 @@ int ArithmeticOpenCLKernel::InitBuffer() {
int ArithmeticOpenCLKernel::Init() {
std::string kernel_name;
const ArithmeticParameter *arithmetic_parameter = reinterpret_cast<const ArithmeticParameter *>(op_parameter_);
auto *arithmetic_parameter = reinterpret_cast<const ArithmeticParameter *>(op_parameter_);
if (arithmetic_parameter->broadcasting_) {
element_flag_ = false;
if (op_format_ == schema::Format_NHWC4) {
kernel_name = "BroadcastNHWC4";
} else {
kernel_name = "BroadcastNC4HW4";
MS_LOG(ERROR) << "Don't support BroadcastNC4HW4 yet";
return RET_ERROR;
}
kernel_name = "BroadcastNHWC4";
} else {
kernel_name = "Element";
}
@ -302,17 +231,6 @@ int ArithmeticOpenCLKernel::Init() {
return error_code;
}
auto format = schema::Format::Format_NHWC4;
if (arithmetic_parameter->ndim_ == 2) {
format = schema::Format::Format_NC4;
}
in_ori_format_ = in_tensors_[0]->GetFormat();
out_ori_format_ = out_tensors_[0]->GetFormat();
in_tensors_[0]->SetFormat(format);
if (element_flag_ && in_tensors_[1]->category() != lite::Tensor::Category::CONST) {
in_tensors_[1]->SetFormat(format);
}
out_tensors_[0]->SetFormat(format);
Image2dGetWorkGroupSize();
InitBuffer();
MS_LOG(DEBUG) << kernel_name << " Init Done!";

@ -25,20 +25,18 @@ namespace mindspore::kernel {
class ArithmeticOpenCLKernel : public OpenCLKernel {
public:
explicit ArithmeticOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx)
ArithmeticOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx)
: OpenCLKernel(parameter, inputs, outputs) {}
~ArithmeticOpenCLKernel() override;
~ArithmeticOpenCLKernel() override = default;
int Init() override;
int Run() override;
int GetImageSize(size_t idx, std::vector<size_t> *img_size) override;
int InitBuffer() override;
private:
std::vector<size_t> InitGlobalSize() const;
void Image2dGetWorkGroupSize();
void BufferGetWorkGroupSize();
int InitBuffer();
cl::Kernel kernel_;
bool element_flag_{true};

@ -41,38 +41,6 @@ using mindspore::schema::PrimitiveType_Square;
namespace mindspore::kernel {
int ArithmeticSelfOpenCLKernel::GetImageSize(size_t idx, std::vector<size_t> *img_size) {
auto out_shape = out_tensors_[0]->shape();
size_t CO4 = UP_DIV(out_tensors_[0]->Channel(), C4NUM);
size_t im_dst_x, im_dst_y;
if (in_tensors_[0]->GetFormat() == schema::Format_NHWC4) {
if (in_tensors_[0]->shape().size() == 4) {
im_dst_x = out_tensors_[0]->Width() * CO4;
im_dst_y = out_tensors_[0]->Height() * out_tensors_[0]->Batch();
} else {
im_dst_x = UP_DIV(out_shape[1], C4NUM);
im_dst_y = out_tensors_[0]->Batch();
}
} else {
if (in_tensors_[0]->shape().size() == 4) {
im_dst_y = out_tensors_[0]->Batch() * out_tensors_[0]->Height() * CO4;
im_dst_x = out_tensors_[0]->Width();
} else {
im_dst_y = out_tensors_[0]->Batch() * UP_DIV(out_shape[1], C4NUM);
im_dst_x = 1;
}
}
size_t img_dtype = CL_FLOAT;
auto enable_fp16_ = ocl_runtime_->GetFp16Enable();
if (enable_fp16_) {
img_dtype = CL_HALF_FLOAT;
}
img_size->clear();
std::vector<size_t> vec{im_dst_x, im_dst_y, img_dtype};
*img_size = vec;
return mindspore::lite::RET_OK;
}
void ArithmeticSelfOpenCLKernel::GetKernelName(std::string *kernel_name, ArithmeticSelfParameter *param) {
switch (param->op_parameter_.type_) {
case PrimitiveType_Abs:
@ -126,24 +94,9 @@ int ArithmeticSelfOpenCLKernel::Init() {
}
auto param = reinterpret_cast<ArithmeticSelfParameter *>(this->op_parameter_);
auto in_format = op_format_;
if (in_format != schema::Format_NHWC4 && in_format != schema::Format_NC4HW4 && in_format != schema::Format_NC4) {
MS_LOG(ERROR) << "input format(" << in_format << ") "
<< "format not support!";
return mindspore::lite::RET_ERROR;
}
in_ori_format_ = in_tensors_[0]->GetFormat();
in_tensors_[0]->SetFormat(op_format_);
out_ori_format_ = out_tensors_[0]->GetFormat();
out_tensors_[0]->SetFormat(op_format_);
std::string kernel_name = "ArithmeticSelf";
GetKernelName(&kernel_name, param);
if (in_format == schema::Format_NC4HW4) {
kernel_name += "_NC4HW4";
} else if (in_format == schema::Format_NHWC4) {
kernel_name += "_NHWC4";
}
kernel_name += "_NHWC4";
MS_LOG(DEBUG) << "execute kernel name : " << kernel_name;
std::set<std::string> build_options;
std::string source = arithmeticself_source;
@ -154,8 +107,6 @@ int ArithmeticSelfOpenCLKernel::Init() {
return mindspore::lite::RET_OK;
}
int ArithmeticSelfOpenCLKernel::ReSize() { return mindspore::lite::RET_OK; }
void ArithmeticSelfGetWorkGroup(const std::vector<size_t> &global, std::vector<size_t> *local, int max_size) {
const int max_divider = 8;
const int max_x = 4, max_y = 8;

@ -26,23 +26,19 @@ namespace mindspore::kernel {
class ArithmeticSelfOpenCLKernel : public OpenCLKernel {
public:
explicit ArithmeticSelfOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs)
ArithmeticSelfOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs)
: OpenCLKernel(parameter, inputs, outputs) {}
~ArithmeticSelfOpenCLKernel() override{};
~ArithmeticSelfOpenCLKernel() override = default;
int Init() override;
int ReSize() override;
int Run() override;
int GetImageSize(size_t idx, std::vector<size_t> *img_size) override;
private:
void GetKernelName(std::string *kernel_name, ArithmeticSelfParameter *param);
private:
cl::Kernel kernel_;
};

@ -32,18 +32,12 @@ using mindspore::schema::PrimitiveType_BatchToSpaceND;
namespace mindspore::kernel {
int BatchToSpaceNDOpenCLKernel::Init() {
std::string kernel_name = "batch_to_space_nd";
auto in_format = op_format_;
std::string kernel_name = "batch_to_space_nd_NHWC4";
if (in_tensors_[0]->shape().size() != 4 && out_tensors_[0]->shape().size() != 4) {
MS_LOG(ERROR) << "input/output shape size must be 4, actual: " << in_tensors_[0]->shape().size() << ", "
<< out_tensors_[0]->shape().size();
return RET_ERROR;
}
if (in_format != schema::Format_NHWC4 && in_format != schema::Format_NC4HW4) {
MS_LOG(ERROR) << "input format(" << in_format << ") "
<< "format not support!";
return RET_ERROR;
}
auto *param = reinterpret_cast<BatchToSpaceParameter *>(this->op_parameter_);
if (param->block_shape_[0] < 1 || param->block_shape_[1] < 1) {
MS_LOG(ERROR) << "block_sizes_ must > 1, actual " << param->block_shape_[0] << ", " << param->block_shape_[1];
@ -55,18 +49,10 @@ int BatchToSpaceNDOpenCLKernel::Init() {
return RET_ERROR;
}
in_ori_format_ = in_tensors_[0]->GetFormat();
out_ori_format_ = out_tensors_[0]->GetFormat();
in_tensors_[0]->SetFormat(op_format_);
out_tensors_[0]->SetFormat(op_format_);
#ifdef PROGRAM_WITH_IL
kernel_ = ocl_runtime_->GetKernelFromBinary(kernel_name);
#else
if (in_format == schema::Format_NC4HW4) {
kernel_name += "_NC4HW4";
} else {
kernel_name += "_NHWC4";
}
std::set<std::string> build_options;
std::string source = batch_to_space_nd_source;
std::string program_name = "batch_to_space_nd";
@ -76,28 +62,7 @@ int BatchToSpaceNDOpenCLKernel::Init() {
MS_LOG(DEBUG) << kernel_name << " Init Done!";
return RET_OK;
}
int BatchToSpaceNDOpenCLKernel::InitBuffer() { return RET_OK; }
int BatchToSpaceNDOpenCLKernel::ReSize() { return RET_OK; }
int BatchToSpaceNDOpenCLKernel::GetImageSize(size_t idx, std::vector<size_t> *img_size) {
size_t CO4 = UP_DIV(out_tensors_[0]->Channel(), C4NUM);
size_t im_dst_x, im_dst_y;
if (in_tensors_[0]->GetFormat() == schema::Format::Format_NHWC4) {
im_dst_x = out_tensors_[0]->Width() * CO4;
im_dst_y = out_tensors_[0]->Height() * out_tensors_[0]->Batch();
} else {
im_dst_y = out_tensors_[0]->Batch() * out_tensors_[0]->Height() * CO4;
im_dst_x = out_tensors_[0]->Width();
}
size_t img_dtype = CL_FLOAT;
auto enable_fp16_ = ocl_runtime_->GetFp16Enable();
if (enable_fp16_) {
img_dtype = CL_HALF_FLOAT;
}
img_size->clear();
std::vector<size_t> vec{im_dst_x, im_dst_y, img_dtype};
*img_size = std::move(vec);
return RET_OK;
}
int BatchToSpaceNDOpenCLKernel::Run() {
MS_LOG(DEBUG) << this->name() << " Running! ";
auto param = reinterpret_cast<BatchToSpaceParameter *>(this->op_parameter_);

@ -25,22 +25,16 @@ namespace mindspore::kernel {
class BatchToSpaceNDOpenCLKernel : public OpenCLKernel {
public:
explicit BatchToSpaceNDOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs)
BatchToSpaceNDOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs)
: OpenCLKernel(parameter, inputs, outputs) {}
~BatchToSpaceNDOpenCLKernel() override{};
~BatchToSpaceNDOpenCLKernel() override = default;
int Init() override;
int ReSize() override;
int Run() override;
int GetImageSize(size_t idx, std::vector<size_t> *img_size) override;
int InitBuffer();
private:
cl::Kernel kernel_;
};

@ -30,45 +30,8 @@ using mindspore::schema::PrimitiveType_BatchNorm;
namespace mindspore::kernel {
int BatchNormOpenCLKernel::GetImageSize(size_t idx, std::vector<size_t> *img_size) {
size_t CO4 = UP_DIV(out_tensors_[0]->Channel(), C4NUM);
size_t im_dst_x, im_dst_y;
if (in_tensors_[0]->GetFormat() == schema::Format::Format_NHWC4) {
im_dst_x = out_tensors_[0]->Width() * CO4;
im_dst_y = out_tensors_[0]->Height();
} else {
im_dst_y = out_tensors_[0]->Batch() * out_tensors_[0]->Height() * CO4;
im_dst_x = out_tensors_[0]->Width();
}
size_t img_dtype = CL_FLOAT;
auto enable_fp16_ = ocl_runtime_->GetFp16Enable();
if (enable_fp16_) {
img_dtype = CL_HALF_FLOAT;
}
img_size->clear();
std::vector<size_t> vec{im_dst_x, im_dst_y, img_dtype};
*img_size = vec;
return RET_OK;
}
int BatchNormOpenCLKernel::Init() {
auto in_format = op_format_;
if (in_format != schema::Format_NHWC4 && in_format != schema::Format_NC4HW4) {
MS_LOG(ERROR) << "input format(" << in_format << ") "
<< "format not support!";
return RET_ERROR;
}
in_ori_format_ = in_tensors_[0]->GetFormat();
in_tensors_[0]->SetFormat(op_format_);
out_ori_format_ = out_tensors_[0]->GetFormat();
out_tensors_[0]->SetFormat(op_format_);
std::string kernel_name = "Batch_normalization";
if (in_format == schema::Format_NC4HW4) {
kernel_name += "_NC4HW4";
} else if (in_format == schema::Format_NHWC4) {
kernel_name += "_NHWC4";
}
std::string kernel_name = "Batch_normalization_NHWC4";
std::set<std::string> build_options;
std::string source = batchnorm_source;
std::string program_name = "Batch_normalization";
@ -78,8 +41,6 @@ int BatchNormOpenCLKernel::Init() {
return RET_OK;
}
int BatchNormOpenCLKernel::ReSize() { return RET_OK; }
void BatchNormGetWorkGroup(const std::vector<size_t> &global, std::vector<size_t> *local, int max_size) {
const int max_divider = 8;
const int max_x = 4, max_y = 8;

@ -25,20 +25,16 @@ namespace mindspore::kernel {
class BatchNormOpenCLKernel : public OpenCLKernel {
public:
explicit BatchNormOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs)
BatchNormOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs)
: OpenCLKernel(parameter, inputs, outputs) {}
~BatchNormOpenCLKernel() override{};
~BatchNormOpenCLKernel() override = default;
int Init() override;
int ReSize() override;
int Run() override;
int GetImageSize(size_t idx, std::vector<size_t> *img_size) override;
private:
cl::Kernel kernel_;
};

@ -35,7 +35,7 @@ using mindspore::schema::PrimitiveType_BiasAdd;
namespace mindspore::kernel {
void BiasAddOpenCLKernel::InitBuffer() {
int BiasAddOpenCLKernel::InitBuffer() {
int C = in_tensors_[1]->shape()[0];
int div_ci = UP_DIV(C, C4NUM);
auto allocator = ocl_runtime_->GetAllocator();
@ -49,6 +49,7 @@ void BiasAddOpenCLKernel::InitBuffer() {
memset(BiasAdd_, 0x00, div_ci * C4NUM * fp_size);
memcpy(BiasAdd_, in_tensors_[1]->data_c(), C * fp_size);
allocator->UnmapBuffer(BiasAdd_);
return RET_OK;
}
int BiasAddOpenCLKernel::Init() {
@ -77,10 +78,6 @@ int BiasAddOpenCLKernel::Init() {
ocl_runtime_->LoadSource(program_name, source);
ocl_runtime_->BuildKernel(kernel_, program_name, kernel_name, build_options);
in_ori_format_ = in_tensors_[0]->GetFormat();
out_ori_format_ = out_tensors_[0]->GetFormat();
in_tensors_[0]->SetFormat(op_format_);
out_tensors_[0]->SetFormat(op_format_);
MS_LOG(DEBUG) << program_name << " Init Done!";
return mindspore::lite::RET_OK;
}
@ -95,7 +92,7 @@ int BiasAddOpenCLKernel::Run() {
ocl_runtime_->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->data_c());
ocl_runtime_->SetKernelArg(kernel_, arg_idx++, input_shape_);
ocl_runtime_->SetKernelArg(kernel_, arg_idx++, BiasAdd_);
ocl_runtime_->SetKernelArg(kernel_, arg_idx++, data_type[op_format_]);
ocl_runtime_->SetKernelArg(kernel_, arg_idx++, data_type[schema::Format::Format_NHWC4]);
std::vector<size_t> local = {1, 1};
std::vector<size_t> global = {static_cast<size_t>(global_size.s[1]), static_cast<size_t>(global_size.s[2])};
auto ret = ocl_runtime_->RunKernel(kernel_, global, local, nullptr);
@ -108,32 +105,10 @@ int BiasAddOpenCLKernel::Run() {
cl_int4 BiasAddOpenCLKernel::GetGlobalshape() {
cl_int4 global_shape = input_shape_;
if (op_format_ == schema::Format::Format_NC4) {
global_shape.s[1] = global_shape.s[2];
global_shape.s[2] = UP_DIV(global_shape.s[3], C4NUM);
}
if (op_format_ == schema::Format::Format_NC4HW4) {
global_shape.s[1] = UP_DIV(global_shape.s[3], C4NUM) * global_shape.s[1]; // c / 4 * H
}
if (op_format_ == schema::Format::Format_NHWC4) {
global_shape.s[2] = UP_DIV(global_shape.s[3], C4NUM) * global_shape.s[2];
}
global_shape.s[2] = UP_DIV(global_shape.s[3], C4NUM) * global_shape.s[2];
return global_shape;
}
int BiasAddOpenCLKernel::GetImageSize(size_t idx, std::vector<size_t> *img_size) {
cl_int4 img_shape = GetGlobalshape();
size_t img_dtype = CL_FLOAT;
if (enable_fp16_) {
img_dtype = CL_HALF_FLOAT;
}
img_size->clear();
img_size->push_back(img_shape.s[2]);
img_size->push_back(img_shape.s[1]);
img_size->push_back(img_dtype);
return mindspore::lite::RET_OK;
}
kernel::LiteKernel *OpenCLBiasAddKernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,

@ -28,25 +28,25 @@ namespace mindspore::kernel {
class BiasAddOpenCLKernel : public OpenCLKernel {
public:
explicit BiasAddOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs)
BiasAddOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs)
: OpenCLKernel(parameter, inputs, outputs) {}
~BiasAddOpenCLKernel() override{};
~BiasAddOpenCLKernel() override = default;
int Init() override;
int Run() override;
int GetImageSize(size_t idx, std::vector<size_t> *img_size) override;
void InitBuffer();
cl_int4 GetGlobalshape();
int InitBuffer() override;
private:
cl_int4 GetGlobalshape();
cl::Kernel kernel_;
void *BiasAdd_;
int in_size_;
int out_size_;
size_t fp_size;
cl_int4 input_shape_;
bool enable_fp16_{false};
void *BiasAdd_{nullptr};
int in_size_{};
int out_size_{};
size_t fp_size{};
cl_int4 input_shape_{};
bool enable_fp16_{};
};
} // namespace mindspore::kernel

@ -30,27 +30,6 @@ using mindspore::schema::PrimitiveType_Cast;
namespace mindspore::kernel {
int CastOpenCLKernel::GetImageSize(size_t idx, std::vector<size_t> *img_size) {
size_t CO4 = UP_DIV(out_tensors_[0]->Channel(), C4NUM);
size_t im_dst_x, im_dst_y;
if (in_tensors_[0]->GetFormat() == schema::Format::Format_NHWC4) {
im_dst_x = out_tensors_[0]->Width() * CO4;
im_dst_y = out_tensors_[0]->Height();
} else {
im_dst_y = out_tensors_[0]->Batch() * out_tensors_[0]->Height() * CO4;
im_dst_x = out_tensors_[0]->Width();
}
size_t img_dtype = CL_FLOAT;
auto enable_fp16_ = ocl_runtime_->GetFp16Enable();
if (enable_fp16_) {
img_dtype = CL_HALF_FLOAT;
}
img_size->clear();
std::vector<size_t> vec{im_dst_x, im_dst_y, img_dtype};
*img_size = vec;
return RET_OK;
}
int CastOpenCLKernel::GetKernelName(std::string *kernel_name, CastParameter *param) {
if (param->src_type_ == kNumberTypeFloat32 && param->dst_type_ == kNumberTypeFloat16) {
kernel_name[0] += "_Fp32ToFp16";
@ -65,23 +44,9 @@ int CastOpenCLKernel::GetKernelName(std::string *kernel_name, CastParameter *par
int CastOpenCLKernel::Init() {
auto param = reinterpret_cast<CastParameter *>(this->op_parameter_);
auto in_format = op_format_;
if (in_format != schema::Format_NHWC4 && in_format != schema::Format_NC4HW4) {
MS_LOG(ERROR) << "input format(" << in_format << ") "
<< "format not support!";
return RET_ERROR;
}
in_ori_format_ = in_tensors_[0]->GetFormat();
in_tensors_[0]->SetFormat(op_format_);
out_ori_format_ = out_tensors_[0]->GetFormat();
out_tensors_[0]->SetFormat(op_format_);
std::string kernel_name = "Cast";
GetKernelName(&kernel_name, param);
if (in_format == schema::Format_NC4HW4) {
kernel_name += "_NC4HW4";
} else if (in_format == schema::Format_NHWC4) {
kernel_name += "_NHWC4";
}
kernel_name += "_NHWC4";
std::set<std::string> build_options;
std::string source = cast_source;
std::string program_name = "cast";
@ -91,8 +56,6 @@ int CastOpenCLKernel::Init() {
return RET_OK;
}
int CastOpenCLKernel::ReSize() { return RET_OK; }
void CastGetWorkGroup(const std::vector<size_t> &global, std::vector<size_t> *local, int max_size) {
const int max_divider = 8;
const int max_x = 4, max_y = 8;

@ -26,23 +26,19 @@ namespace mindspore::kernel {
class CastOpenCLKernel : public OpenCLKernel {
public:
explicit CastOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs)
CastOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs)
: OpenCLKernel(parameter, inputs, outputs) {}
~CastOpenCLKernel() override{};
~CastOpenCLKernel() override = default;
int Init() override;
int ReSize() override;
int Run() override;
private:
int GetKernelName(std::string *kernel_name, CastParameter *param);
int GetImageSize(size_t idx, std::vector<size_t> *img_size) override;
private:
cl::Kernel kernel_;
};

@ -31,27 +31,6 @@ using mindspore::schema::PrimitiveType_Concat;
namespace mindspore::kernel {
int ConcatOpenCLKernel::GetImageSize(size_t idx, std::vector<size_t> *img_size) {
size_t CO4 = UP_DIV(out_tensors_[0]->Channel(), C4NUM);
size_t im_dst_x, im_dst_y;
if (in_tensors_[0]->GetFormat() == schema::Format::Format_NHWC4) {
im_dst_x = out_tensors_[0]->Width() * CO4;
im_dst_y = out_tensors_[0]->Height() * out_tensors_[0]->Batch();
} else {
im_dst_y = out_tensors_[0]->Batch() * out_tensors_[0]->Height() * CO4;
im_dst_x = out_tensors_[0]->Width();
}
size_t img_dtype = CL_FLOAT;
auto enable_fp16_ = ocl_runtime_->GetFp16Enable();
if (enable_fp16_) {
img_dtype = CL_HALF_FLOAT;
}
img_size->clear();
std::vector<size_t> vec{im_dst_x, im_dst_y, img_dtype};
*img_size = vec;
return RET_OK;
}
int ConcatOpenCLKernel::RunAxis0() {
auto allocator_ = ocl_runtime_->GetAllocator();
std::vector<size_t> img_size;
@ -85,39 +64,15 @@ int ConcatOpenCLKernel::Init() {
MS_LOG(ERROR) << " only support axis >= 0 and axis <= 3 ";
return RET_ERROR;
}
auto in_format = op_format_;
if (in_format != schema::Format_NHWC4 && in_format != schema::Format_NC4HW4) {
MS_LOG(ERROR) << "input format(" << in_format << ") "
<< "format not support!";
return RET_ERROR;
}
in_ori_format_ = in_tensors_[0]->GetFormat();
in_tensors_[0]->SetFormat(op_format_);
out_ori_format_ = out_tensors_[0]->GetFormat();
out_tensors_[0]->SetFormat(op_format_);
std::string kernel_name = "Concat";
if (in_tensors_.size() == 2) {
kernel_name += "2inputaxis";
kernel_name += std::to_string(param->axis_);
} else if (in_tensors_.size() == 3) {
kernel_name += "3inputaxis";
kernel_name += std::to_string(param->axis_);
} else if (in_tensors_.size() == 4) {
kernel_name += "4inputaxis";
kernel_name += std::to_string(param->axis_);
} else if (in_tensors_.size() == 6) {
kernel_name += "6inputaxis";
kernel_name += std::to_string(param->axis_);
if (in_tensors_.size() == 2 || in_tensors_.size() == 3 || in_tensors_.size() == 4 || in_tensors_.size() == 4) {
kernel_name += std::to_string(in_tensors_.size()) + "inputaxis" + std::to_string(param->axis_);
} else {
MS_LOG(ERROR) << " input must be 2 , 3 , 4 or 6";
return RET_ERROR;
}
if (in_format == schema::Format_NC4HW4) {
kernel_name += "_NC4HW4";
} else if (in_format == schema::Format_NHWC4) {
kernel_name += "_NHWC4";
}
kernel_name += "_NHWC4";
MS_LOG(DEBUG) << "kernel_name=: " << kernel_name;
std::set<std::string> build_options;
std::string source = concat_source;
@ -128,16 +83,13 @@ int ConcatOpenCLKernel::Init() {
return RET_OK;
}
int ConcatOpenCLKernel::ReSize() { return RET_OK; }
int ConcatOpenCLKernel::IntegraShapeToXYZ() {
auto in_format = op_format_;
if (out_tensors_[0]->shape().size() > 4 || out_tensors_[0]->shape().size() <= 0) {
if (out_tensors_[0]->shape().size() > 4 || out_tensors_[0]->shape().empty()) {
MS_LOG(ERROR) << "in_tensors_.shape() must between 0~4";
return RET_ERROR;
}
if (in_format == schema::Format_NHWC4 || in_format == schema::Format_NC4HW4) {
if (out_tensors_[0]->shape().size() == 4) {
for (int i = 0; i < in_tensors_.size(); ++i) {
cl_int4 temp_cl;
auto temp = in_tensors_[i]->shape();

@ -25,28 +25,24 @@ namespace mindspore::kernel {
class ConcatOpenCLKernel : public OpenCLKernel {
public:
explicit ConcatOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs)
ConcatOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs)
: OpenCLKernel(parameter, inputs, outputs) {}
~ConcatOpenCLKernel() override{};
~ConcatOpenCLKernel() override = default;
int Init() override;
int ReSize() override;
int Run() override;
private:
int RunAxis0();
int GetImageSize(size_t idx, std::vector<size_t> *img_size) override;
int IntegraShapeToXYZ();
private:
cl::Kernel kernel_;
std::vector<cl_int3> XYZShape;
cl_int4 shape_nhwc;
cl_int4 shape_nhwc{};
};
} // namespace mindspore::kernel

@ -38,8 +38,7 @@ int Conv2dTransposeOpenCLKernel::Init() {
MS_LOG(ERROR) << "only support kernel - stride == 2 * pad";
return RET_ERROR;
}
std::string kernel_name = "conv2d_transpose";
kernel_name += "_" + std::string(EnumNameFormat(op_format_));
std::string kernel_name = "conv2d_transpose_NHWC4";
enable_fp16_ = ocl_runtime_->GetFp16Enable();
#ifdef PROGRAM_WITH_IL
kernel_ = ocl_runtime_->GetKernelFromBinary(kernel_name);
@ -51,20 +50,14 @@ int Conv2dTransposeOpenCLKernel::Init() {
ocl_runtime_->BuildKernel(kernel_, program_name, kernel_name, build_options);
#endif
PadWeight();
in_ori_format_ = in_tensors_[0]->GetFormat();
in_tensors_[0]->SetFormat(op_format_);
out_ori_format_ = out_tensors_[0]->GetFormat();
out_tensors_[0]->SetFormat(op_format_);
MS_LOG(DEBUG) << kernel_name << " Init Done!";
return mindspore::lite::RET_OK;
}
int Conv2dTransposeOpenCLKernel::ReSize() { return mindspore::lite::RET_OK; }
void Conv2dTransposeOpenCLKernel::PadWeight() {
ConvParameter *param = reinterpret_cast<ConvParameter *>(op_parameter_);
int ci = in_tensors_[0]->Channel();
int co = out_tensors_[0]->Channel();
int ci = in_tensors_[0]->shape()[3];
int co = out_tensors_[0]->shape()[3];
int kh = param->kernel_h_;
int kw = param->kernel_w_;
int div_ci = UP_DIV(ci, C4NUM);
@ -147,32 +140,6 @@ void Conv2dTransposeOpenCLKernel::PadWeight() {
allocator->UnmapBuffer(bias_);
}
int Conv2dTransposeOpenCLKernel::GetImageSize(size_t idx, std::vector<size_t> *img_size) {
size_t im_dst_x, im_dst_y;
int n = out_tensors_[0]->shape()[0];
int h = out_tensors_[0]->shape()[1];
int w = out_tensors_[0]->shape()[2];
int c = out_tensors_[0]->shape()[3];
if (op_format_ == schema::Format::Format_NHWC4) {
im_dst_x = w * UP_DIV(c, C4NUM);
im_dst_y = n * h;
} else if (op_format_ == schema::Format::Format_NC4HW4) {
im_dst_x = w;
im_dst_y = n * UP_DIV(c, C4NUM) * h;
} else {
MS_LOG(ERROR) << "not support op format:" << EnumNameFormat(op_format_);
return mindspore::lite::RET_ERROR;
}
size_t img_dtype = CL_FLOAT;
if (enable_fp16_) {
img_dtype = CL_HALF_FLOAT;
}
img_size->clear();
std::vector<size_t> vec{im_dst_x, im_dst_y, img_dtype};
*img_size = vec;
return mindspore::lite::RET_OK;
}
int Conv2dTransposeOpenCLKernel::Run() {
MS_LOG(DEBUG) << this->name() << " Running!";
ConvParameter *param = reinterpret_cast<ConvParameter *>(op_parameter_);

@ -27,21 +27,20 @@ namespace mindspore::kernel {
class Conv2dTransposeOpenCLKernel : public OpenCLKernel {
public:
explicit Conv2dTransposeOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs)
Conv2dTransposeOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs)
: OpenCLKernel(parameter, inputs, outputs) {}
~Conv2dTransposeOpenCLKernel() override{};
~Conv2dTransposeOpenCLKernel() override = default;
int Init() override;
int ReSize() override;
int Run() override;
void PadWeight();
int GetImageSize(size_t idx, std::vector<size_t> *img_size) override;
private:
void PadWeight();
cl::Kernel kernel_;
void *padWeight_;
void *bias_;
void *padWeight_{nullptr};
void *bias_{nullptr};
bool enable_fp16_{false};
};
} // namespace mindspore::kernel

@ -29,8 +29,6 @@ using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_Conv2D;
using mindspore::schema::Format::Format_NC4HW4;
using mindspore::schema::Format::Format_NCHW;
using mindspore::schema::Format::Format_NHWC;
using mindspore::schema::Format::Format_NHWC4;
namespace mindspore::kernel {
@ -46,14 +44,6 @@ int ConvolutionOpenCLKernel::Init() {
auto input_tensor = in_tensors_[0];
auto output_tensor = out_tensors_[0];
in_ori_format_ = input_tensor->GetFormat();
out_ori_format_ = output_tensor->GetFormat();
if (op_format_ != Format_NHWC4 && op_format_ != Format_NC4HW4) {
MS_LOG(ERROR) << "op_format_ " << op_format_ << " not support!";
return RET_ERROR;
}
input_tensor->SetFormat(op_format_);
output_tensor->SetFormat(op_format_);
batch_size_ = input_tensor->Batch();
CI_ = input_tensor->Channel();
@ -112,7 +102,7 @@ int ConvolutionOpenCLKernel::Init() {
winograd_mem1_ = allocator->Malloc(size, {width, height, img_dtype});
}
this->InitBuffer();
InitBuffer();
MS_LOG(DEBUG) << "Convolution Init Done!";
return RET_OK;
@ -251,30 +241,6 @@ int ConvolutionOpenCLKernel::InitBuffer() {
return RET_OK;
}
int ConvolutionOpenCLKernel::GetImageSize(size_t idx, std::vector<size_t> *img_size) {
size_t im_dst_x, im_dst_y;
if (in_tensors_[0]->GetFormat() == Format_NHWC4) {
if (OW_ * CO_SLICES_ <= MAX_IMAGE2D_SIZE) {
{
im_dst_y = batch_size_ * OH_;
im_dst_x = OW_ * CO_SLICES_;
}
} else {
im_dst_y = OW_;
im_dst_x = batch_size_ * OH_ * CO_SLICES_;
}
} else {
im_dst_y = batch_size_ * CO_SLICES_ * OH_;
im_dst_x = OW_;
}
size_t img_dtype = use_fp16_ ? CL_HALF_FLOAT : CL_FLOAT;
img_size->clear();
img_size->push_back(im_dst_x);
img_size->push_back(im_dst_y);
img_size->push_back(img_dtype);
return RET_OK;
}
int ConvolutionOpenCLKernel::Run() {
MS_LOG(DEBUG) << this->name() << " Running!";

@ -23,22 +23,22 @@
#include "src/runtime/kernel/opencl/opencl_kernel.h"
#include "schema/model_generated.h"
#include "nnacl/conv_parameter.h"
#include "schema/ops_generated.h"
namespace mindspore::kernel {
class ConvolutionOpenCLKernel : public OpenCLKernel {
public:
explicit ConvolutionOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs)
ConvolutionOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs)
: OpenCLKernel(parameter, inputs, outputs) {}
~ConvolutionOpenCLKernel() override{};
~ConvolutionOpenCLKernel() override = default;
int Init() override;
int Run() override;
int GetImageSize(size_t idx, std::vector<size_t> *img_size) override;
int InitBuffer() override;
private:
int InitBuffer();
int InitWeight();
int InitBias();
int GenerateWinogradWeight();
@ -92,7 +92,8 @@ class ConvolutionOpenCLKernel : public OpenCLKernel {
return code_id;
}
bool use_fp16_ = false;
bool use_fp16_{false};
const schema::Format op_format_{schema::Format_NHWC4};
int batch_size_{};
int CI_{};
@ -105,16 +106,16 @@ class ConvolutionOpenCLKernel : public OpenCLKernel {
int CO_SLICES_{};
int KH_{};
int KW_{};
void *packed_weight_ = nullptr;
void *packed_bias_ = nullptr;
bool has_bias_ = false;
void *packed_weight_{nullptr};
void *packed_bias_{nullptr};
bool has_bias_{false};
bool use_winograd_ = false;
bool use_winograd_{false};
int TILES_X_{};
int TILES_Y_{};
int TILES_XY_{};
void *winograd_mem0_ = nullptr;
void *winograd_mem1_ = nullptr;
void *winograd_mem0_{nullptr};
void *winograd_mem1_{nullptr};
cl::Kernel kernel_4x4to36_;
cl::Kernel kernel_conv_;

@ -42,26 +42,12 @@ namespace mindspore::kernel {
int DepthwiseConv2dOpenCLKernel::Init() {
std::string kernel_name = "DepthwiseConv2d";
auto in_format = op_format_;
in_ori_format_ = in_tensors_[0]->GetFormat();
out_ori_format_ = out_tensors_[0]->GetFormat();
if (in_format != schema::Format::Format_NHWC4 && in_format != schema::Format::Format_NC4HW4) {
MS_LOG(ERROR) << "input format(" << in_format << ") "
<< "format not support!";
return mindspore::lite::RET_ERROR;
}
in_tensors_[0]->SetFormat(in_format);
out_tensors_[0]->SetFormat(in_format);
if (out_mem_type_ == OpenCLMemType::BUF) {
kernel_name += "_BUF";
} else {
kernel_name += "_IMG";
}
if (in_format == schema::Format::Format_NC4HW4) {
kernel_name += "_NC4HW4";
} else if (in_format == schema::Format::Format_NHWC4) {
kernel_name += "_NHWC4";
}
kernel_name += "_NHWC4";
auto parameter = reinterpret_cast<ConvParameter *>(op_parameter_);
if (parameter->kernel_h_ == 1) {
kernel_name += "_1x1";
@ -75,7 +61,7 @@ int DepthwiseConv2dOpenCLKernel::Init() {
ocl_runtime_->LoadSource(program_name, source);
ocl_runtime_->BuildKernel(kernel_, program_name, kernel_name, build_options);
#endif
this->InitBuffer();
InitBuffer();
MS_LOG(DEBUG) << kernel_name << " Init Done! mem type=" << static_cast<int>(out_mem_type_);
return mindspore::lite::RET_OK;
}
@ -146,28 +132,6 @@ int DepthwiseConv2dOpenCLKernel::InitBuffer() {
return mindspore::lite::RET_OK;
}
int DepthwiseConv2dOpenCLKernel::ReSize() { return mindspore::lite::RET_OK; }
int DepthwiseConv2dOpenCLKernel::GetImageSize(size_t idx, std::vector<size_t> *img_size) {
size_t CO4 = UP_DIV(out_tensors_[0]->Channel(), C4NUM);
size_t im_dst_x, im_dst_y;
if (in_tensors_[0]->GetFormat() == schema::Format::Format_NHWC4) {
im_dst_x = out_tensors_[0]->Width() * CO4;
im_dst_y = out_tensors_[0]->Height();
} else {
im_dst_y = out_tensors_[0]->Height() * CO4;
im_dst_x = out_tensors_[0]->Width();
}
size_t img_dtype = CL_FLOAT;
if (ocl_runtime_->GetFp16Enable()) {
img_dtype = CL_HALF_FLOAT;
}
img_size->clear();
std::vector<size_t> vec{im_dst_x, im_dst_y, img_dtype};
*img_size = vec;
return mindspore::lite::RET_OK;
}
int DepthwiseConv2dOpenCLKernel::GetGlobalSize(size_t idx, std::vector<size_t> *global_size) {
size_t CO4 = UP_DIV(out_tensors_[0]->Channel(), C4NUM);
std::vector<size_t> global = {(size_t)out_tensors_[0]->Width(), (size_t)out_tensors_[0]->Height(), CO4};

@ -25,27 +25,25 @@ namespace mindspore::kernel {
class DepthwiseConv2dOpenCLKernel : public OpenCLKernel {
public:
explicit DepthwiseConv2dOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs)
: OpenCLKernel(parameter, inputs, outputs), packed_weight_(nullptr), bias_data_(nullptr), kernel_(nullptr) {}
DepthwiseConv2dOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs)
: OpenCLKernel(parameter, inputs, outputs) {}
~DepthwiseConv2dOpenCLKernel() override{};
~DepthwiseConv2dOpenCLKernel() override = default;
int Init() override;
int ReSize() override;
int Run() override;
int InitBuffer();
int InitBuffer() override;
int GetImageSize(size_t idx, std::vector<size_t> *img_size) override;
int GetGlobalSize(size_t idx, std::vector<size_t> *global_size) override;
int GetLocalSize(size_t idx, const std::vector<size_t> &global_size, std::vector<size_t> *local_size) override;
private:
void *packed_weight_;
void *bias_data_;
void *packed_weight_{nullptr};
void *bias_data_{nullptr};
cl::Kernel kernel_;
};
} // namespace mindspore::kernel

@ -33,8 +33,7 @@ using mindspore::schema::PrimitiveType_FullConnection;
namespace mindspore::kernel {
int FullConnectionOpenCLKernel::Init() {
std::string kernel_name = "FullConnection";
kernel_name += "_" + std::string(EnumNameFormat(op_format_));
std::string kernel_name = "FullConnection_NHWC4";
auto param = reinterpret_cast<MatMulParameter *>(op_parameter_);
transposeA = param->a_transpose_;
if (transposeA) {
@ -77,16 +76,10 @@ int FullConnectionOpenCLKernel::Init() {
#endif
PadWeight();
in_ori_format_ = in_tensors_[0]->GetFormat();
out_ori_format_ = out_tensors_[0]->GetFormat();
in_tensors_[0]->SetFormat(op_format_);
out_tensors_[0]->SetFormat(op_format_);
MS_LOG(DEBUG) << kernel_name << " Init Done!";
return RET_OK;
}
int FullConnectionOpenCLKernel::ReSize() { return RET_OK; }
void FullConnectionOpenCLKernel::PadWeight() {
// ABMCI @ ABCICO = ABMCO
auto allocator = ocl_runtime_->GetAllocator();
@ -177,39 +170,6 @@ void FullConnectionOpenCLKernel::PadWeight() {
allocator->UnmapBuffer(bias_);
}
int FullConnectionOpenCLKernel::GetImageSize(size_t idx, std::vector<size_t> *img_size) {
size_t im_dst_x, im_dst_y;
auto out_shape = out_tensors_[0]->shape();
int n = 1, h = 1, w = 1, c = 1;
if (out_tensors_[0]->shape().size() == 2) {
n = out_shape[0];
c = out_shape[1];
} else {
n = out_shape[0];
h = out_shape[1];
w = out_shape[2];
c = out_shape[3];
}
if (op_format_ == schema::Format_NHWC4) {
im_dst_x = w * UP_DIV(c, C4NUM);
im_dst_y = n * h;
} else if (op_format_ == schema::Format_NC4HW4) {
im_dst_x = w;
im_dst_y = n * UP_DIV(c, C4NUM) * h;
} else {
MS_LOG(ERROR) << "not support op format:" << EnumNameFormat(op_format_);
return RET_ERROR;
}
size_t img_dtype = CL_FLOAT;
if (enable_fp16_) {
img_dtype = CL_HALF_FLOAT;
}
img_size->clear();
std::vector<size_t> vec{im_dst_x, im_dst_y, img_dtype};
*img_size = vec;
return RET_OK;
}
int FullConnectionOpenCLKernel::Run() {
MS_LOG(DEBUG) << this->name() << " Running!";
// local size should less than MAX_GROUP_SIZE

@ -26,21 +26,20 @@ namespace mindspore::kernel {
class FullConnectionOpenCLKernel : public OpenCLKernel {
public:
explicit FullConnectionOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs)
FullConnectionOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs)
: OpenCLKernel(parameter, inputs, outputs) {}
~FullConnectionOpenCLKernel() override{};
~FullConnectionOpenCLKernel() override = default;
int Init() override;
int ReSize() override;
int Run() override;
void PadWeight();
int GetImageSize(size_t idx, std::vector<size_t> *img_size) override;
private:
void PadWeight();
cl::Kernel kernel_;
void *padWeight_;
void *bias_;
void *padWeight_{nullptr};
void *bias_{nullptr};
bool enable_fp16_{false};
bool transposeA{false};
bool transposeB{true};

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save