|
|
|
@ -115,18 +115,22 @@ class TeacherStudentSigmoidLossOpMaker
|
|
|
|
|
AddOutput("Y",
|
|
|
|
|
"(Tensor, default Tensor<float>), a 2-D tensor with shape "
|
|
|
|
|
"[N x 1]. The teacher student sigmoid loss.");
|
|
|
|
|
AddAttr<float>("soft_max_up_bound", "fp32, default 15.0").SetDefault(15.0);
|
|
|
|
|
AddAttr<float>("soft_max_lower_bound", "fp32, default -15.0")
|
|
|
|
|
AddAttr<float>(
|
|
|
|
|
"soft_max_up_bound",
|
|
|
|
|
"fp32, if input > soft_max_up_bound, will be bound, default 15.0")
|
|
|
|
|
.SetDefault(15.0);
|
|
|
|
|
AddAttr<float>(
|
|
|
|
|
"soft_max_lower_bound",
|
|
|
|
|
"fp32, if input < soft_max_lower_bound, will be bound, default -15.0")
|
|
|
|
|
.SetDefault(-15.0);
|
|
|
|
|
AddComment(R"DOC(
|
|
|
|
|
TeacherStudentSigmoidLoss Operator.
|
|
|
|
|
TeacherStudentSigmoidLoss Operator.
|
|
|
|
|
|
|
|
|
|
It's similarity to SigmoidCrossEntropyWithLogits Operator. The difference is that
|
|
|
|
|
we add another label(z') to original.
|
|
|
|
|
loss = max(x, 0) - x * z + log(1 + exp(-abs(x))) + max(x, 0) - x * z' + log(1 + exp(-abs(x)))
|
|
|
|
|
z is click or not
|
|
|
|
|
z' is value q of feed_fine
|
|
|
|
|
z' is teacher value
|
|
|
|
|
label = {-2, -1, [0, 2]}
|
|
|
|
|
when z' is not exist, clk = 0 : label = -2;
|
|
|
|
|
when z' is not exist, clk = 1 : label = -1;
|
|
|
|
@ -137,104 +141,6 @@ we add another label(z') to original.
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// template <typename DeviceContext, typename T>
|
|
|
|
|
template <typename T>
|
|
|
|
|
class TeacherStudentSigmoidLossOpKernel : public framework::OpKernel<T> {
|
|
|
|
|
public:
|
|
|
|
|
void Compute(const framework::ExecutionContext& context) const override {
|
|
|
|
|
PADDLE_ENFORCE(platform::is_cpu_place(context.GetPlace()),
|
|
|
|
|
"This kernel only runs on CPU.");
|
|
|
|
|
|
|
|
|
|
Tensor* y = context.Output<Tensor>("Y");
|
|
|
|
|
const Tensor* x = context.Input<Tensor>("X");
|
|
|
|
|
const Tensor* labels = context.Input<Tensor>("Label");
|
|
|
|
|
T* y_data = y->mutable_data<T>(context.GetPlace());
|
|
|
|
|
const T* x_data = x->data<T>();
|
|
|
|
|
const T* label_data = labels->data<T>();
|
|
|
|
|
int64_t batch_size = x->dims()[0];
|
|
|
|
|
// loss = max(x, 0) - x * z + log(1 + exp(-abs(x))) + max(x, 0) - x * z' +
|
|
|
|
|
// log(1 + exp(-abs(x)))
|
|
|
|
|
// z is click or not
|
|
|
|
|
// z' is value q of feed_fine
|
|
|
|
|
// label = {-2, -1, [0, 2]}
|
|
|
|
|
// when z' is not exist, clk = 0 : label = -2;
|
|
|
|
|
// when z' is not exist, clk = 1 : label = -1;
|
|
|
|
|
// when z' is exist , clk = 0 : label = 0 + z';
|
|
|
|
|
// when z' is exist , clk = 1 : label = 1 + z';
|
|
|
|
|
for (int i = 0; i < batch_size; ++i) {
|
|
|
|
|
if (label_data[i] < -1.0) {
|
|
|
|
|
y_data[i] = (x_data[i] > 0 ? x_data[i] : 0.0) +
|
|
|
|
|
log(1.0 + exp(-fabs(x_data[i])));
|
|
|
|
|
} else if (label_data[i] < 0.0) {
|
|
|
|
|
y_data[i] = (x_data[i] > 0 ? x_data[i] : 0.0) - x_data[i] +
|
|
|
|
|
log(1.0 + exp(-fabs(x_data[i])));
|
|
|
|
|
} else if (label_data[i] < 1.0) {
|
|
|
|
|
y_data[i] = (x_data[i] > 0 ? x_data[i] : 0.0) +
|
|
|
|
|
log(1.0 + exp(-fabs(x_data[i]))) +
|
|
|
|
|
(x_data[i] > 0 ? x_data[i] : 0.0) -
|
|
|
|
|
x_data[i] * label_data[i] +
|
|
|
|
|
log(1.0 + exp(-fabs(x_data[i])));
|
|
|
|
|
} else {
|
|
|
|
|
y_data[i] = (x_data[i] > 0 ? x_data[i] : 0.0) - x_data[i] +
|
|
|
|
|
log(1.0 + exp(-fabs(x_data[i]))) +
|
|
|
|
|
(x_data[i] > 0 ? x_data[i] : 0.0) -
|
|
|
|
|
x_data[i] * (label_data[i] - 1.0) +
|
|
|
|
|
log(1.0 + exp(-fabs(x_data[i])));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
|
class TeacherStudentSigmoidLossGradOpKernel : public framework::OpKernel<T> {
|
|
|
|
|
public:
|
|
|
|
|
void Compute(const framework::ExecutionContext& context) const override {
|
|
|
|
|
const Tensor* x = context.Input<Tensor>("X");
|
|
|
|
|
const T* x_data = x->data<T>();
|
|
|
|
|
|
|
|
|
|
Tensor* dx = context.Output<Tensor>(framework::GradVarName("X"));
|
|
|
|
|
T* dx_data = dx->mutable_data<T>(context.GetPlace());
|
|
|
|
|
|
|
|
|
|
const Tensor* labels = context.Input<Tensor>("Label");
|
|
|
|
|
const T* label_data = labels->data<T>();
|
|
|
|
|
|
|
|
|
|
T soft_max_up_bound =
|
|
|
|
|
static_cast<T>(context.Attr<float>("soft_max_up_bound"));
|
|
|
|
|
T soft_max_lower_bound =
|
|
|
|
|
static_cast<T>(context.Attr<float>("soft_max_lower_bound"));
|
|
|
|
|
|
|
|
|
|
int64_t batch_size = x->dims()[0];
|
|
|
|
|
|
|
|
|
|
const framework::Tensor* dOut =
|
|
|
|
|
context.Input<framework::Tensor>(framework::GradVarName("Y"));
|
|
|
|
|
|
|
|
|
|
const T* dout_data = dOut->data<T>();
|
|
|
|
|
|
|
|
|
|
for (int i = 0; i < batch_size; ++i) {
|
|
|
|
|
T sum_val = x_data[i];
|
|
|
|
|
if (sum_val > soft_max_up_bound) {
|
|
|
|
|
sum_val = soft_max_up_bound;
|
|
|
|
|
} else {
|
|
|
|
|
if (sum_val < soft_max_lower_bound) {
|
|
|
|
|
sum_val = soft_max_lower_bound;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
T pred = 1.0 / (1.0 + exp(-sum_val));
|
|
|
|
|
if (label_data[i] < -1.0) {
|
|
|
|
|
dx_data[i] = 0.0 - pred;
|
|
|
|
|
} else if (label_data[i] < 0.0) {
|
|
|
|
|
dx_data[i] = 1.0 - pred;
|
|
|
|
|
} else {
|
|
|
|
|
dx_data[i] = label_data[i] - 2.0 * pred;
|
|
|
|
|
}
|
|
|
|
|
if (sum_val >= soft_max_up_bound || sum_val <= soft_max_lower_bound) {
|
|
|
|
|
dx_data[i] = 0;
|
|
|
|
|
}
|
|
|
|
|
dx_data[i] *= dout_data[i] * -1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
} // namespace operators
|
|
|
|
|
} // namespace paddle
|
|
|
|
|
|
|
|
|
|