You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
Paddle/paddle/fluid/operators/hierarchical_sigmoid_op.h

210 lines
8.4 KiB

/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
7 years ago
#include <iostream>
6 years ago
#include <set>
7 years ago
#include <vector>
6 years ago
#include "paddle/fluid/framework/mixed_vector.h"
7 years ago
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/clip_op.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/matrix_bit_code.h"
#include "paddle/fluid/platform/transform.h"
namespace paddle {
namespace operators {
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
using platform::Transform;
6 years ago
std::vector<int64_t> cal_rows(const framework::LoDTensor* path) {
std::set<int64_t> tmp;
std::vector<int64_t> rows;
rows.clear();
for (size_t i = 0; i < static_cast<size_t>(path->dims()[0]); i++) {
for (size_t j = 0; j < static_cast<size_t>(path->dims()[1]); j++) {
int64_t temp =
path->data<int64_t>()[i * static_cast<size_t>(path->dims()[1]) + j];
if (temp >= 0) {
tmp.insert(temp);
}
}
}
6 years ago
rows.assign(tmp.begin(), tmp.end());
6 years ago
return rows;
}
template <typename DeviceContext, typename T>
class HierarchicalSigmoidOpKernel : public framework::OpKernel<T> {
public:
7 years ago
void Compute(const framework::ExecutionContext& ctx) const override {
6 years ago
auto* in = ctx.Input<framework::LoDTensor>("X");
auto* w = ctx.Input<framework::LoDTensor>("W");
auto* path = ctx.Input<framework::LoDTensor>("PTable");
auto* code = ctx.Input<framework::LoDTensor>("PCode");
auto* label = ctx.Input<framework::LoDTensor>("Label");
auto* bias = ctx.Input<framework::LoDTensor>("Bias");
auto* out = ctx.Output<framework::LoDTensor>("Out");
auto* pre_out = ctx.Output<framework::LoDTensor>("PreOut");
7 years ago
size_t num_classes = static_cast<size_t>(ctx.Attr<int>("num_classes"));
bool is_custom = false;
if (path) {
is_custom = true;
} else {
is_custom = false;
}
int64_t code_length =
path ? path->dims()[1] : math::FindLastSet(num_classes - 1);
int64_t batch_size = in->dims()[0];
6 years ago
framework::LoDTensor sum;
7 years ago
auto& dev_ctx = ctx.template device_context<DeviceContext>();
auto* pre_out_data = pre_out->mutable_data<T>(
framework::make_ddim({batch_size, code_length}), ctx.GetPlace());
7 years ago
auto pre_out_mat = EigenMatrix<T>::From(*pre_out);
// Not all class(leaf) nodes' path lengths equal code_length, thus init as
// 0s can avoid out of path's loss.
math::SetConstant<DeviceContext, T> zero;
7 years ago
zero(dev_ctx, pre_out, static_cast<T>(0.0));
auto& place = *ctx.template device_context<DeviceContext>().eigen_device();
math::RowwiseSum<DeviceContext, T> row_sum;
std::unique_ptr<math::MatrixBitCodeFunctor<T>> bit_code;
if (!is_custom) {
bit_code.reset(new math::MatrixBitCodeFunctor<T>(num_classes,
label->data<int64_t>()));
} else {
bit_code.reset(new math::MatrixBitCodeFunctor<T>(path, code,
label->data<int64_t>()));
}
std::vector<int64_t> sum_dims({batch_size, 1UL});
sum.mutable_data<T>(framework::make_ddim(sum_dims), ctx.GetPlace());
auto sum_mat = EigenMatrix<T>::From(sum);
out->mutable_data<T>(ctx.GetPlace());
auto out_mat = framework::EigenVector<T>::Flatten(*out);
if (bias) {
bit_code->Add(pre_out, *bias);
7 years ago
}
bit_code->Mul(pre_out, *w, *in);
// clip to [-40, 40]
Transform<DeviceContext> trans;
trans(ctx.template device_context<DeviceContext>(), pre_out_data,
7 years ago
pre_out_data + pre_out->numel(), pre_out_data,
ClipFunctor<T>(static_cast<T>(-40.0), static_cast<T>(40.0)));
bit_code->Sum(*pre_out, out, static_cast<T>(-1));
// use softrelu to calculate cross entropy
7 years ago
pre_out_mat.device(place) = (static_cast<T>(1.0) + pre_out_mat.exp()).log();
7 years ago
row_sum(dev_ctx, *pre_out, &sum);
// TODO(guosheng): Subtract the out of path's loss, since not all
// class(leaf) nodes' path lengths equal code_length. But it won't break the
// gradient check since both have the out of path's loss and will cancel out
// each other.
out_mat.device(place) = sum_mat + out_mat;
7 years ago
}
};
template <typename DeviceContext, typename T>
class HierarchicalSigmoidGradOpKernel : public framework::OpKernel<T> {
public:
7 years ago
void Compute(const framework::ExecutionContext& ctx) const override {
6 years ago
auto* in = ctx.Input<framework::LoDTensor>("X");
auto* w = ctx.Input<framework::LoDTensor>("W");
auto* path = ctx.Input<framework::LoDTensor>("PTable");
auto* code = ctx.Input<framework::LoDTensor>("PCode");
auto* in_grad =
ctx.Output<framework::LoDTensor>(framework::GradVarName("X"));
bool is_sparse = ctx.Attr<bool>("is_sparse");
auto& dev_ctx = ctx.template device_context<DeviceContext>();
math::SetConstant<DeviceContext, T> zero;
7 years ago
auto* bias_grad =
6 years ago
ctx.Output<framework::LoDTensor>(framework::GradVarName("Bias"));
auto* label = ctx.Input<framework::LoDTensor>("Label");
auto* pre_out = ctx.Input<framework::LoDTensor>("PreOut");
7 years ago
auto* out_grad =
6 years ago
ctx.Input<framework::LoDTensor>(framework::GradVarName("Out"));
framework::LoDTensor pre_out_grad;
pre_out_grad.mutable_data<T>(pre_out->dims(), ctx.GetPlace());
in_grad->mutable_data<T>(ctx.GetPlace());
zero(dev_ctx, in_grad, static_cast<T>(0.0));
7 years ago
7 years ago
size_t num_classes = static_cast<size_t>(ctx.Attr<int>("num_classes"));
bool is_custom = false;
if (path) {
is_custom = true;
} else {
is_custom = false;
}
std::unique_ptr<math::MatrixBitCodeFunctor<T>> bit_code;
if (!is_custom) {
bit_code.reset(new math::MatrixBitCodeFunctor<T>(num_classes,
label->data<int64_t>()));
} else {
bit_code.reset(new math::MatrixBitCodeFunctor<T>(path, code,
label->data<int64_t>()));
}
auto& place = *ctx.template device_context<DeviceContext>().eigen_device();
7 years ago
auto pre_out_mat = EigenMatrix<T>::From(*pre_out);
auto pre_out_grad_mat = EigenMatrix<T>::From(pre_out_grad);
7 years ago
auto out_grad_mat = EigenMatrix<T>::From(*out_grad);
6 years ago
Eigen::array<int, 2> bcast({{1, static_cast<int>(pre_out_grad.dims()[1])}});
// softrelu derivative
pre_out_grad_mat.device(place) =
static_cast<T>(1.0) - static_cast<T>(1.0) / pre_out_mat.exp();
bit_code->Sub(&pre_out_grad); // the gradient of clip(w * x + b)
7 years ago
pre_out_grad_mat.device(place) =
pre_out_grad_mat * out_grad_mat.broadcast(bcast);
// TODO(guosheng): multiply pre_out_grad with subgradient of clipping to
// be consistent with the clipping in forward.
7 years ago
if (bias_grad) {
bias_grad->mutable_data<T>(ctx.GetPlace());
zero(dev_ctx, bias_grad, static_cast<T>(0.0));
bit_code->AddGrad(pre_out_grad, bias_grad);
7 years ago
}
6 years ago
if (!is_sparse) {
auto* w_grad =
ctx.Output<framework::LoDTensor>(framework::GradVarName("W"));
w_grad->mutable_data<T>(ctx.GetPlace());
zero(dev_ctx, w_grad, static_cast<T>(0.0));
bit_code->MulGradWeight(pre_out_grad, w_grad, *in);
} else {
framework::Vector<int64_t> real_rows = cal_rows(path);
auto* w_grad =
ctx.Output<framework::SelectedRows>(framework::GradVarName("W"));
w_grad->set_rows(real_rows);
// build ids -> rows index map
w_grad->SyncIndex();
6 years ago
w_grad->set_height(w->dims()[0]);
6 years ago
auto* w_grad_value = w_grad->mutable_value();
framework::DDim temp_dim(w->dims());
set(temp_dim, 0, real_rows.size());
w_grad_value->mutable_data<T>(temp_dim, ctx.GetPlace());
zero(dev_ctx, w_grad_value, static_cast<T>(0.0));
bit_code->MulGradWeight(pre_out_grad, w_grad, *in);
}
bit_code->MulGradError(pre_out_grad, *w, in_grad);
7 years ago
}
};
} // namespace operators
} // namespace paddle