|
|
|
@ -1393,8 +1393,10 @@ def ssd_loss(location,
|
|
|
|
|
# 3. Mining hard examples
|
|
|
|
|
actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
|
|
|
|
|
actual_shape.stop_gradient = True
|
|
|
|
|
# shape=(-1, 0) is set for compile-time, the correct shape is set by
|
|
|
|
|
# actual_shape in runtime.
|
|
|
|
|
conf_loss = nn.reshape(
|
|
|
|
|
x=conf_loss, shape=(num, num_prior), actual_shape=actual_shape)
|
|
|
|
|
x=conf_loss, shape=(-1, 0), actual_shape=actual_shape)
|
|
|
|
|
conf_loss.stop_gradient = True
|
|
|
|
|
neg_indices = helper.create_variable_for_type_inference(dtype='int32')
|
|
|
|
|
dtype = matched_indices.dtype
|
|
|
|
@ -1464,7 +1466,9 @@ def ssd_loss(location,
|
|
|
|
|
# 5.3 Compute overall weighted loss.
|
|
|
|
|
loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss
|
|
|
|
|
# reshape to [N, Np], N is the batch size and Np is the prior box number.
|
|
|
|
|
loss = nn.reshape(x=loss, shape=(num, num_prior), actual_shape=actual_shape)
|
|
|
|
|
# shape=(-1, 0) is set for compile-time, the correct shape is set by
|
|
|
|
|
# actual_shape in runtime.
|
|
|
|
|
loss = nn.reshape(x=loss, shape=(-1, 0), actual_shape=actual_shape)
|
|
|
|
|
loss = nn.reduce_sum(loss, dim=1, keep_dim=True)
|
|
|
|
|
if normalize:
|
|
|
|
|
normalizer = nn.reduce_sum(target_loc_weight)
|
|
|
|
@ -1927,13 +1931,7 @@ def multi_box_head(inputs,
|
|
|
|
|
stride=stride)
|
|
|
|
|
|
|
|
|
|
mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
|
|
|
|
|
compile_shape = [
|
|
|
|
|
mbox_loc.shape[0], cpt.floor_division(
|
|
|
|
|
mbox_loc.shape[1] * mbox_loc.shape[2] * mbox_loc.shape[3], 4), 4
|
|
|
|
|
]
|
|
|
|
|
run_shape = tensor.assign(numpy.array([0, -1, 4]).astype("int32"))
|
|
|
|
|
mbox_loc_flatten = nn.reshape(
|
|
|
|
|
mbox_loc, shape=compile_shape, actual_shape=run_shape)
|
|
|
|
|
mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
|
|
|
|
|
mbox_locs.append(mbox_loc_flatten)
|
|
|
|
|
|
|
|
|
|
# get conf
|
|
|
|
@ -1945,16 +1943,7 @@ def multi_box_head(inputs,
|
|
|
|
|
padding=pad,
|
|
|
|
|
stride=stride)
|
|
|
|
|
conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
|
|
|
|
|
new_shape = [0, -1, num_classes]
|
|
|
|
|
compile_shape = [
|
|
|
|
|
conf_loc.shape[0],
|
|
|
|
|
cpt.floor_division(conf_loc.shape[1] * conf_loc.shape[2] *
|
|
|
|
|
conf_loc.shape[3], num_classes), num_classes
|
|
|
|
|
]
|
|
|
|
|
run_shape = tensor.assign(
|
|
|
|
|
numpy.array([0, -1, num_classes]).astype("int32"))
|
|
|
|
|
conf_loc_flatten = nn.reshape(
|
|
|
|
|
conf_loc, shape=compile_shape, actual_shape=run_shape)
|
|
|
|
|
conf_loc_flatten = nn.flatten(conf_loc, axis=1)
|
|
|
|
|
mbox_confs.append(conf_loc_flatten)
|
|
|
|
|
|
|
|
|
|
if len(box_results) == 1:
|
|
|
|
@ -1972,7 +1961,10 @@ def multi_box_head(inputs,
|
|
|
|
|
box = tensor.concat(reshaped_boxes)
|
|
|
|
|
var = tensor.concat(reshaped_vars)
|
|
|
|
|
mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
|
|
|
|
|
mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4])
|
|
|
|
|
mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
|
|
|
|
|
mbox_confs_concat = nn.reshape(
|
|
|
|
|
mbox_confs_concat, shape=[0, -1, num_classes])
|
|
|
|
|
|
|
|
|
|
box.stop_gradient = True
|
|
|
|
|
var.stop_gradient = True
|
|
|
|
|