|
|
|
@ -16,7 +16,7 @@
|
|
|
|
|
import numpy as np
|
|
|
|
|
import mindspore.nn as nn
|
|
|
|
|
import mindspore.common.dtype as mstype
|
|
|
|
|
from mindspore import Tensor
|
|
|
|
|
from mindspore import context, Tensor
|
|
|
|
|
from mindspore.ops import operations as P
|
|
|
|
|
from mindspore.ops import functional as F
|
|
|
|
|
from mindspore.common.initializer import initializer
|
|
|
|
@ -102,6 +102,7 @@ class RPN(nn.Cell):
|
|
|
|
|
cfg_rpn = config
|
|
|
|
|
self.dtype = np.float32
|
|
|
|
|
self.ms_type = mstype.float32
|
|
|
|
|
self.device_type = "Ascend" if context.get_context("device_target") == "Ascend" else "Others"
|
|
|
|
|
self.num_bboxes = cfg_rpn.num_bboxes
|
|
|
|
|
self.slice_index = ()
|
|
|
|
|
self.feature_anchor_shape = ()
|
|
|
|
@ -180,9 +181,12 @@ class RPN(nn.Cell):
|
|
|
|
|
bias_reg = initializer(0, shape=shp_bias_reg, dtype=self.ms_type).to_tensor()
|
|
|
|
|
|
|
|
|
|
for i in range(num_layers):
|
|
|
|
|
rpn_layer.append(RpnRegClsBlock(in_channels, feat_channels, num_anchors, cls_out_channels, \
|
|
|
|
|
rpn_reg_cls_block = RpnRegClsBlock(in_channels, feat_channels, num_anchors, cls_out_channels, \
|
|
|
|
|
weight_conv, bias_conv, weight_cls, \
|
|
|
|
|
bias_cls, weight_reg, bias_reg))
|
|
|
|
|
bias_cls, weight_reg, bias_reg)
|
|
|
|
|
if self.device_type == "Ascend":
|
|
|
|
|
rpn_reg_cls_block.to_float(mstype.float16)
|
|
|
|
|
rpn_layer.append(rpn_reg_cls_block)
|
|
|
|
|
|
|
|
|
|
for i in range(1, num_layers):
|
|
|
|
|
rpn_layer[i].rpn_conv.weight = rpn_layer[0].rpn_conv.weight
|
|
|
|
@ -250,6 +254,7 @@ class RPN(nn.Cell):
|
|
|
|
|
mstype.bool_),
|
|
|
|
|
anchor_using_list, gt_valids_i)
|
|
|
|
|
|
|
|
|
|
bbox_target = self.cast(bbox_target, self.ms_type)
|
|
|
|
|
bbox_weight = self.cast(bbox_weight, self.ms_type)
|
|
|
|
|
label = self.cast(label, self.ms_type)
|
|
|
|
|
label_weight = self.cast(label_weight, self.ms_type)
|
|
|
|
@ -286,8 +291,8 @@ class RPN(nn.Cell):
|
|
|
|
|
label_ = F.stop_gradient(label_with_batchsize)
|
|
|
|
|
label_weight_ = F.stop_gradient(label_weight_with_batchsize)
|
|
|
|
|
|
|
|
|
|
cls_score_i = rpn_cls_score[i]
|
|
|
|
|
reg_score_i = rpn_bbox_pred[i]
|
|
|
|
|
cls_score_i = self.cast(rpn_cls_score[i], self.ms_type)
|
|
|
|
|
reg_score_i = self.cast(rpn_bbox_pred[i], self.ms_type)
|
|
|
|
|
|
|
|
|
|
loss_cls = self.loss_cls(cls_score_i, label_)
|
|
|
|
|
loss_cls_item = loss_cls * label_weight_
|
|
|
|
|