|
|
|
@ -14,6 +14,7 @@ limitations under the License. */
|
|
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
|
|
#include <vector>
|
|
|
|
|
#include "paddle/fluid/framework/eigen.h"
|
|
|
|
|
#include "paddle/fluid/framework/op_registry.h"
|
|
|
|
|
|
|
|
|
@ -50,47 +51,40 @@ class FakeChannelWiseDequantizeMaxAbsKernel : public framework::OpKernel<T> {
|
|
|
|
|
public:
|
|
|
|
|
virtual void Compute(const framework::ExecutionContext& ctx) const {
|
|
|
|
|
auto* in = ctx.Input<framework::Tensor>("X");
|
|
|
|
|
auto* weight_scales = ctx.Input<framework::Tensor>("WeightScales");
|
|
|
|
|
auto scales = ctx.MultiInput<framework::Tensor>("Scales");
|
|
|
|
|
auto* out = ctx.Output<framework::Tensor>("Out");
|
|
|
|
|
|
|
|
|
|
PADDLE_ENFORCE_EQ(weight_scales->numel(), in->dims()[0],
|
|
|
|
|
"The weight uses the per-channel quantization type, so "
|
|
|
|
|
"the number of weight scale values must be the same with "
|
|
|
|
|
PADDLE_ENFORCE_EQ(scales[0]->numel(), in->dims()[0],
|
|
|
|
|
"The number of first scale values must be the same with "
|
|
|
|
|
"first dimension value of Input(X).");
|
|
|
|
|
|
|
|
|
|
int ativation_bits = ctx.Attr<int>("activation_bits");
|
|
|
|
|
int weight_bits = ctx.Attr<int>("weight_bits");
|
|
|
|
|
int range = std::pow(2, weight_bits - 1) - 1;
|
|
|
|
|
auto quant_bits = ctx.Attr<std::vector<int>>("quant_bits");
|
|
|
|
|
int max_range = std::pow(2, quant_bits[0] - 1) - 1;
|
|
|
|
|
|
|
|
|
|
auto& dev_ctx = ctx.template device_context<DeviceContext>();
|
|
|
|
|
out->mutable_data<T>(dev_ctx.GetPlace());
|
|
|
|
|
|
|
|
|
|
auto dequant = DequantizeFunctor<DeviceContext, T>();
|
|
|
|
|
if (ctx.HasInput("ActivationScale")) {
|
|
|
|
|
auto* activation_scale = ctx.Input<framework::Tensor>("ActivationScale");
|
|
|
|
|
PADDLE_ENFORCE_EQ(activation_scale->numel(), 1,
|
|
|
|
|
"The activation uses per-layer quantization type, so "
|
|
|
|
|
"it must have only one value.");
|
|
|
|
|
framework::Tensor cpu_weigth_scales;
|
|
|
|
|
framework::TensorCopy(*weight_scales, platform::CPUPlace(),
|
|
|
|
|
&cpu_weigth_scales);
|
|
|
|
|
dev_ctx.Wait();
|
|
|
|
|
const T* weight_scales_data = cpu_weigth_scales.data<T>();
|
|
|
|
|
range *= (std::pow(2, ativation_bits - 1) - 1);
|
|
|
|
|
if (scales.size() == 2) {
|
|
|
|
|
PADDLE_ENFORCE_EQ(
|
|
|
|
|
scales[1]->numel(), 1,
|
|
|
|
|
"The second scale tensor should only have one value at now.");
|
|
|
|
|
for (int64_t i = 0; i < in->dims()[0]; i++) {
|
|
|
|
|
framework::Tensor one_channel_in = in->Slice(i, i + 1);
|
|
|
|
|
framework::Tensor one_channel_out = out->Slice(i, i + 1);
|
|
|
|
|
auto max_range = range / weight_scales_data[i];
|
|
|
|
|
dequant(dev_ctx, &one_channel_in, activation_scale,
|
|
|
|
|
framework::Tensor one_channel_scale = scales[0]->Slice(i, i + 1);
|
|
|
|
|
max_range *= (std::pow(2, quant_bits[1] - 1) - 1);
|
|
|
|
|
dequant(dev_ctx, &one_channel_in, &one_channel_scale,
|
|
|
|
|
static_cast<T>(max_range), &one_channel_out);
|
|
|
|
|
}
|
|
|
|
|
dequant(dev_ctx, out, scales[1], static_cast<T>(1), out);
|
|
|
|
|
} else {
|
|
|
|
|
for (int64_t i = 0; i < in->dims()[0]; i++) {
|
|
|
|
|
framework::Tensor one_channel_in = in->Slice(i, i + 1);
|
|
|
|
|
framework::Tensor one_channel_out = out->Slice(i, i + 1);
|
|
|
|
|
framework::Tensor one_channel_scale = weight_scales->Slice(i, i + 1);
|
|
|
|
|
framework::Tensor one_channel_scale = scales[0]->Slice(i, i + 1);
|
|
|
|
|
dequant(dev_ctx, &one_channel_in, &one_channel_scale,
|
|
|
|
|
static_cast<T>(range), &one_channel_out);
|
|
|
|
|
static_cast<T>(max_range), &one_channel_out);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|