fix fp16 bug

pull/12024/head
lzk 4 years ago
parent 6d764fe66c
commit 9c5fad32f9

@ -82,7 +82,7 @@ int BiasCPUFp16Kernel::Init() {
return RET_NULL_PTR;
}
auto *bias = reinterpret_cast<float *>(bias_tensor->MutableData());
if (bias != nullptr) {
if (bias == nullptr) {
MS_LOG(ERROR) << "bias is nullptr!";
return RET_NULL_PTR;
}

@ -40,6 +40,7 @@ int TransposeFp16CPUKernel::Init() {
int TransposeFp16CPUKernel::Run() {
MS_ASSERT(in_tensors_.size() == 1 || in_tensors_.size() == 2);
TransposeParameter *param = reinterpret_cast<TransposeParameter *>(this->op_parameter_);
param->data_size_ = in_tensors_[0]->Size();
if (in_tensors_.size() == 2) {
auto input_perm = in_tensors_.at(1);
MS_ASSERT(input_perm != nullptr);

Loading…
Cancel
Save