|
|
|
@ -22,36 +22,60 @@ namespace operators {
|
|
|
|
|
using Tensor = framework::Tensor;
|
|
|
|
|
using LoDTensor = framework::LoDTensor;
|
|
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
|
void CvmComputeKernel(const bool use_cvm, const int64_t item_width, const T** X,
|
|
|
|
|
T** Y) {
|
|
|
|
|
const auto cvm_offset = use_cvm ? 0 : 2;
|
|
|
|
|
|
|
|
|
|
std::memcpy(*Y, *X + cvm_offset, (item_width - cvm_offset) * sizeof(T));
|
|
|
|
|
|
|
|
|
|
if (use_cvm) {
|
|
|
|
|
(*Y)[0] = log((*Y)[0] + 1);
|
|
|
|
|
(*Y)[1] = log((*Y)[1] + 1) - (*Y)[0];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
(*X) += item_width;
|
|
|
|
|
(*Y) += item_width - cvm_offset;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
|
void CvmGradComputeKernel(const bool use_cvm, const int64_t item_width,
|
|
|
|
|
const T& CVM, const T** DY, T** DX) {
|
|
|
|
|
const auto cvm_offset = use_cvm ? 0 : 2;
|
|
|
|
|
|
|
|
|
|
std::memcpy(*DX + cvm_offset, *DY, (item_width - cvm_offset) * sizeof(T));
|
|
|
|
|
|
|
|
|
|
(*DX)[0] = (&CVM)[0];
|
|
|
|
|
(*DX)[1] = (&CVM)[1];
|
|
|
|
|
|
|
|
|
|
(*DX) += item_width;
|
|
|
|
|
(*DY) += item_width - cvm_offset;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
|
class CVMOpKernel : public framework::OpKernel<T> {
|
|
|
|
|
public:
|
|
|
|
|
void Compute(const framework::ExecutionContext& context) const override {
|
|
|
|
|
const LoDTensor* x = context.Input<LoDTensor>("X");
|
|
|
|
|
const auto* x = context.Input<LoDTensor>("X");
|
|
|
|
|
const T* x_data = x->data<T>();
|
|
|
|
|
auto lod = x->lod()[0];
|
|
|
|
|
int64_t item_size = x->numel() / x->dims()[0];
|
|
|
|
|
int offset = 2;
|
|
|
|
|
if (!context.Attr<bool>("use_cvm")) {
|
|
|
|
|
item_size -= offset;
|
|
|
|
|
}
|
|
|
|
|
LoDTensor* y = context.Output<LoDTensor>("Y");
|
|
|
|
|
|
|
|
|
|
auto batch_size = x->dims()[0];
|
|
|
|
|
auto item_size = x->numel() / batch_size;
|
|
|
|
|
auto use_cvm = context.Attr<bool>("use_cvm");
|
|
|
|
|
|
|
|
|
|
auto* y = context.Output<LoDTensor>("Y");
|
|
|
|
|
T* y_data = y->mutable_data<T>(context.GetPlace());
|
|
|
|
|
|
|
|
|
|
int seq_num = static_cast<int>(lod.size()) - 1;
|
|
|
|
|
for (int i = 0; i < seq_num; ++i) {
|
|
|
|
|
int64_t seq_len = static_cast<int64_t>(lod[i + 1] - lod[i]);
|
|
|
|
|
|
|
|
|
|
for (int j = 0; j < seq_len; ++j) {
|
|
|
|
|
if (context.Attr<bool>("use_cvm")) {
|
|
|
|
|
std::memcpy(y_data, x_data, item_size * sizeof(T));
|
|
|
|
|
y_data[0] = log(y_data[0] + 1);
|
|
|
|
|
y_data[1] = log(y_data[1] + 1) - y_data[0];
|
|
|
|
|
x_data += item_size;
|
|
|
|
|
y_data += item_size;
|
|
|
|
|
} else {
|
|
|
|
|
std::memcpy(y_data, x_data + offset, item_size * sizeof(T));
|
|
|
|
|
x_data += item_size + offset;
|
|
|
|
|
y_data += item_size;
|
|
|
|
|
// for Input X do not have Lod Information.
|
|
|
|
|
if (x->NumLevels() == 0) {
|
|
|
|
|
for (int i = 0; i < batch_size; i++) {
|
|
|
|
|
CvmComputeKernel(use_cvm, item_size, &x_data, &y_data);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
auto lod = x->lod()[0];
|
|
|
|
|
for (int i = 0; i < lod.size() - 1; ++i) {
|
|
|
|
|
for (int j = 0; j < lod[i + 1] - lod[i]; ++j) {
|
|
|
|
|
CvmComputeKernel(use_cvm, item_size, &x_data, &y_data);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
@ -62,42 +86,39 @@ template <typename T>
|
|
|
|
|
class CVMGradOpKernel : public framework::OpKernel<T> {
|
|
|
|
|
public:
|
|
|
|
|
void Compute(const framework::ExecutionContext& context) const override {
|
|
|
|
|
LoDTensor* dx = context.Output<LoDTensor>(framework::GradVarName("X"));
|
|
|
|
|
auto* dx = context.Output<LoDTensor>(framework::GradVarName("X"));
|
|
|
|
|
T* dx_data = dx->mutable_data<T>(context.GetPlace());
|
|
|
|
|
|
|
|
|
|
const Tensor* cvm = context.Input<Tensor>("CVM");
|
|
|
|
|
const T* cvm_data = cvm->data<T>();
|
|
|
|
|
int offset = 2;
|
|
|
|
|
const framework::LoDTensor* dOut =
|
|
|
|
|
|
|
|
|
|
const auto* dOut =
|
|
|
|
|
context.Input<framework::LoDTensor>(framework::GradVarName("Y"));
|
|
|
|
|
const T* dout_data = dOut->data<T>();
|
|
|
|
|
|
|
|
|
|
auto lod = dx->lod()[0];
|
|
|
|
|
int64_t item_size = dx->numel() / dx->dims()[0];
|
|
|
|
|
if (!context.Attr<bool>("use_cvm")) {
|
|
|
|
|
item_size -= offset;
|
|
|
|
|
}
|
|
|
|
|
auto use_cvm = context.Attr<bool>("use_cvm");
|
|
|
|
|
|
|
|
|
|
int seq_num = static_cast<int>(lod.size()) - 1;
|
|
|
|
|
for (int i = 0; i < seq_num; ++i) {
|
|
|
|
|
int64_t seq_len = static_cast<int64_t>(lod[i + 1] - lod[i]);
|
|
|
|
|
|
|
|
|
|
for (int j = 0; j < seq_len; ++j) {
|
|
|
|
|
if (context.Attr<bool>("use_cvm")) {
|
|
|
|
|
std::memcpy(dx_data, dout_data, item_size * sizeof(T));
|
|
|
|
|
dx_data[0] = cvm_data[0];
|
|
|
|
|
dx_data[1] = cvm_data[1];
|
|
|
|
|
dx_data += item_size;
|
|
|
|
|
dout_data += item_size;
|
|
|
|
|
} else {
|
|
|
|
|
std::memcpy(dx_data + offset, dout_data, item_size * sizeof(T));
|
|
|
|
|
dx_data[0] = cvm_data[0];
|
|
|
|
|
dx_data[1] = cvm_data[1];
|
|
|
|
|
dx_data += item_size + offset;
|
|
|
|
|
dout_data += item_size;
|
|
|
|
|
auto offset = 2;
|
|
|
|
|
auto batch_size = dx->dims()[0];
|
|
|
|
|
auto item_size = dx->numel() / batch_size;
|
|
|
|
|
|
|
|
|
|
// for Input X do not have Lod Information.
|
|
|
|
|
if (dx->NumLevels() == 0) {
|
|
|
|
|
for (int x = 0; x < batch_size; ++x) {
|
|
|
|
|
CvmGradComputeKernel(use_cvm, item_size, *cvm_data, &dout_data,
|
|
|
|
|
&dx_data);
|
|
|
|
|
cvm_data += offset;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
auto lod = dx->lod()[0];
|
|
|
|
|
int seq_num = static_cast<int>(lod.size()) - 1;
|
|
|
|
|
for (int i = 0; i < seq_num; ++i) {
|
|
|
|
|
for (int j = 0; j < lod[i + 1] - lod[i]; ++j) {
|
|
|
|
|
CvmGradComputeKernel(use_cvm, item_size, *cvm_data, &dout_data,
|
|
|
|
|
&dx_data);
|
|
|
|
|
}
|
|
|
|
|
cvm_data += offset;
|
|
|
|
|
}
|
|
|
|
|
cvm_data += offset;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|