fix softmax CE time limit check failed (#19846)

* fix softmax ce time limit check failed. test=develop

* refine softmax calc. test=develop
expand_as_op_1
Kaipeng Deng 6 years ago committed by GitHub
parent a4919d3688
commit 3f021781a1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -52,26 +52,45 @@ void SoftmaxEigen(const DeviceContext& context, const int axis_dim,
Eigen::DSizes<int, 1> along_axis(kAxisDim);
Eigen::DSizes<int, 2> batch_classes(batch_size, num_classes);
Eigen::DSizes<int, 2> batch_by_one(batch_size, 1);
Eigen::DSizes<int, 2> one_by_class(1, num_classes);
Eigen::DSizes<int, 3> batch_one_remain(batch_size, 1, num_remain);
Eigen::DSizes<int, 3> one_axis_one(1, axis_dim, 1);
Eigen::DSizes<int, 2> one_axis(1, axis_dim);
Eigen::DSizes<int, 3> batch_axis_remain(batch_size, axis_dim, num_remain);
auto logits_reshape = logits.reshape(batch_axis_remain);
auto shifted_logits = (logits_reshape -
logits_reshape.maximum(along_axis)
.eval()
.reshape(batch_one_remain)
.broadcast(one_axis_one))
.unaryExpr(ValueClip<T>());
auto exp = shifted_logits.exp();
softmax.device(*context.eigen_device()) = (exp *
exp.sum(along_axis)
// For numerical stability, logits should be shifted by maximum number along
// axis, calculate shifted_logits into softmax tensor for memory reuse.
if (num_remain == 1) {
// axis == -1, axis and class in same dimension, calculate along
// class dimension directly for higher performance
softmax.device(*context.eigen_device()) = (logits -
logits.maximum(along_axis)
.eval()
.reshape(batch_by_one)
.broadcast(one_by_class))
.unaryExpr(ValueClip<T>());
} else {
// axis != -1, class dimension split into (axis, remain), max and sum
// should be calculated along axis dimension
softmax.device(*context.eigen_device()) =
(logits.reshape(batch_axis_remain) -
logits.reshape(batch_axis_remain)
.maximum(along_axis)
.eval()
.reshape(batch_one_remain)
.broadcast(one_axis_one)
.reshape(batch_classes))
.unaryExpr(ValueClip<T>());
}
softmax.device(*context.eigen_device()) = softmax.exp();
softmax.device(*context.eigen_device()) = (softmax *
softmax.reshape(batch_axis_remain)
.sum(along_axis)
.inverse()
.eval()
.reshape(batch_one_remain)
.broadcast(one_axis_one))
.reshape(batch_classes);
.broadcast(one_axis));
}
template <typename DeviceContext, typename T, bool is_test, typename Enable>

Loading…
Cancel
Save