You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
graphengine/ge/host_kernels/rsqrt_kernel.cc

151 lines
5.1 KiB

/**
* Copyright 2020 Huawei Technologies Co., Ltd
5 years ago
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "host_kernels/rsqrt_kernel.h"
5 years ago
#include <cfloat>
#include <memory>
#include "common/debug/ge_log.h"
#include "common/debug/log.h"
#include "common/ge_inner_error_codes.h"
#include "common/op/ge_op_utils.h"
#include "framework/common/debug/ge_log.h"
#include "host_kernels/kernel_utils.h"
5 years ago
#include "inc/kernel_factory.h"
#include "common/math/math_util.h"
4 years ago
#include "framework/common/types.h"
5 years ago
namespace ge {
namespace {
const size_t kRsqrtInputSize = 1;
const size_t kRsqrtInputIndex0 = 0;
template <typename T>
Status ZeroCheck(T x, const DataType &data_type) {
switch (data_type) {
case DT_FLOAT16:
FMK_FP16_ZEROCHECK(static_cast<double>(x))
break;
case DT_FLOAT:
FMK_FLOAT_ZEROCHECK(static_cast<float>(x))
break;
case DT_DOUBLE:
FMK_DOUBLE_ZEROCHECK(static_cast<double>(x))
break;
default:
break;
}
return SUCCESS;
}
4 years ago
#define SET_RSQRT_CASE(DTYPE, TYPE) \
case (DTYPE): \
ret = RsqrtKernel::RsqrtCompute<TYPE>(input_ptr, output_ptr); \
break;
5 years ago
} // namespace
4 years ago
template<typename T>
Status RsqrtKernel::RsqrtCompute(ConstGeTensorPtr &input_tensor_ptr, GeTensorPtr &output_tensor_ptr) {
GE_CHECK_NOTNULL(input_tensor_ptr);
GE_CHECK_NOTNULL(output_tensor_ptr);
size_t data_size = input_tensor_ptr->GetData().size();
size_t data_count = data_size / sizeof(T);
auto data_type = input_tensor_ptr->GetTensorDesc().GetDataType();
if (data_count > 0) {
4 years ago
unique_ptr<T[]> buf(new(std::nothrow) T[data_count]());
if (buf == nullptr) {
GELOGW("New buf failed");
return NOT_CHANGED;
}
4 years ago
auto ptr = const_cast<T * >(reinterpret_cast<const T *>(input_tensor_ptr->GetData().data()));
for (size_t i = 0; i < data_count; i++) {
if (ZeroCheck(*(ptr + i), data_type) != SUCCESS) {
GELOGW("Rsqrt: The input data can not less than or equal to zero, rsqrt folding failed.");
return NOT_CHANGED;
}
switch (data_type) {
case DT_FLOAT16: {
4 years ago
double val = static_cast<double>(*(reinterpret_cast<const fp16_t*>(input_tensor_ptr->GetData().data()) + i));
double drSqrt = 1.0 / std::sqrt(val);
buf[i] = drSqrt;
break;
}
4 years ago
case DT_FLOAT:{
float denominator = std::sqrt(*(reinterpret_cast<const float*>(input_tensor_ptr->GetData().data()) + i));
buf[i] = static_cast<float >(1 / denominator);
break;
}
case DT_DOUBLE: {
4 years ago
double denominator = std::sqrt(*(reinterpret_cast<const double*>(input_tensor_ptr->GetData().data()) + i));
buf[i] = static_cast<double>(1 / denominator);
break;
}
default:
GELOGW("Input data type must be FP16, FP32 and DOUBLE.");
return NOT_CHANGED;
}
}
GE_IF_BOOL_EXEC(output_tensor_ptr->SetData(reinterpret_cast<uint8_t *>(buf.get()), data_size) != GRAPH_SUCCESS,
4 years ago
GELOGW("Set data failed"); return NOT_CHANGED);
output_tensor_ptr->MutableTensorDesc().SetDataType(data_type);
output_tensor_ptr->MutableTensorDesc().SetShape(input_tensor_ptr->GetTensorDesc().GetShape());
}
return SUCCESS;
}
5 years ago
Status RsqrtKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector<ConstGeTensorPtr> &input,
std::vector<GeTensorPtr> &v_output) {
GELOGI("RsqrtKernel in.");
GE_CHECK_NOTNULL(op_desc_ptr);
5 years ago
// check input size
if (input.size() != kRsqrtInputSize) {
GELOGW("The number of input for rsqrt must be %zu.", kRsqrtInputSize);
return NOT_CHANGED;
}
5 years ago
ConstGeTensorPtr input_ptr = input.at(kRsqrtInputIndex0);
GE_CHECK_NOTNULL(input_ptr);
5 years ago
// Index 0 can always gets a GeTensorDesc object from any OpDescPtr.
auto output_tensor_desc = op_desc_ptr->GetOutputDesc(0);
GeTensorPtr output_ptr = MakeShared<GeTensor>(output_tensor_desc);
if (output_ptr == nullptr) {
GELOGW("MakeShared GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str());
return NOT_CHANGED;
5 years ago
}
Status ret = NOT_CHANGED;
auto dtype = input_ptr->GetTensorDesc().GetDataType();
switch (dtype) {
SET_RSQRT_CASE(DT_FLOAT16, fp16_t)
SET_RSQRT_CASE(DT_FLOAT, float)
SET_RSQRT_CASE(DT_DOUBLE, double)
default:
GELOGW("Input data type must be FP16, FP32 and DOUBLE.");
5 years ago
return NOT_CHANGED;
}
if (ret != SUCCESS) {
GELOGW("Rsqrt folding failed.");
return NOT_CHANGED;
}
v_output.push_back(output_ptr);
5 years ago
GELOGI("RsqrtKernel success.");
return SUCCESS;
}
REGISTER_KERNEL(RSQRT, RsqrtKernel);
} // namespace ge