You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
186 lines
5.9 KiB
186 lines
5.9 KiB
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
#pragma once
|
|
#include <vector>
|
|
#include "paddle/fluid/operators/math/math_function.h"
|
|
|
|
namespace paddle {
|
|
namespace operators {
|
|
namespace math {
|
|
|
|
template <typename T>
|
|
struct CBlas;
|
|
|
|
template <>
|
|
struct CBlas<float> {
|
|
template <typename... ARGS>
|
|
static void GEMM(ARGS... args) {
|
|
cblas_sgemm(args...);
|
|
}
|
|
|
|
template <typename... ARGS>
|
|
static void AXPY(ARGS... args) {
|
|
cblas_saxpy(args...);
|
|
}
|
|
|
|
template <typename... ARGS>
|
|
static void GEMV(ARGS... args) {
|
|
cblas_sgemv(args...);
|
|
}
|
|
|
|
#ifdef PADDLE_WITH_MKLML
|
|
template <typename... ARGS>
|
|
static void GEMM_BATCH(ARGS... args) {
|
|
cblas_sgemm_batch(args...);
|
|
}
|
|
#endif
|
|
};
|
|
|
|
template <>
|
|
struct CBlas<double> {
|
|
template <typename... ARGS>
|
|
static void GEMM(ARGS... args) {
|
|
cblas_dgemm(args...);
|
|
}
|
|
|
|
template <typename... ARGS>
|
|
static void AXPY(ARGS... args) {
|
|
cblas_daxpy(args...);
|
|
}
|
|
|
|
template <typename... ARGS>
|
|
static void GEMV(ARGS... args) {
|
|
cblas_dgemv(args...);
|
|
}
|
|
|
|
#ifdef PADDLE_WITH_MKLML
|
|
template <typename... ARGS>
|
|
static void GEMM_BATCH(ARGS... args) {
|
|
cblas_dgemm_batch(args...);
|
|
}
|
|
#endif
|
|
};
|
|
|
|
template <>
|
|
struct CBlas<platform::float16> {
|
|
static void GEMM(...) { PADDLE_THROW("float16 GEMM not supported on CPU"); }
|
|
#ifdef PADDLE_WITH_MKLML
|
|
static void GEMM_BATCH(...) {
|
|
PADDLE_THROW("float16 GEMM_BATCH not supported on CPU");
|
|
}
|
|
#endif
|
|
};
|
|
|
|
template <>
|
|
template <typename T>
|
|
void Blas<platform::CPUDeviceContext>::GEMM(CBLAS_TRANSPOSE transA,
|
|
CBLAS_TRANSPOSE transB, int M,
|
|
int N, int K, T alpha, const T *A,
|
|
const T *B, T beta, T *C) const {
|
|
int lda = (transA == CblasNoTrans) ? K : M;
|
|
int ldb = (transB == CblasNoTrans) ? N : K;
|
|
int ldc = N;
|
|
CBlas<T>::GEMM(CblasRowMajor, transA, transB, M, N, K, alpha, A, lda, B, ldb,
|
|
beta, C, ldc);
|
|
}
|
|
|
|
template <>
|
|
template <typename T>
|
|
void Blas<platform::CPUDeviceContext>::GEMM(bool transA, bool transB, int M,
|
|
int N, int K, T alpha, const T *A,
|
|
int lda, const T *B, int ldb,
|
|
T beta, T *C, int ldc) const {
|
|
CBlas<T>::GEMM(CblasRowMajor, transA == false ? CblasNoTrans : CblasTrans,
|
|
transB == false ? CblasNoTrans : CblasTrans, M, N, K, alpha, A,
|
|
lda, B, ldb, beta, C, ldc);
|
|
}
|
|
|
|
template <typename DeviceContext>
|
|
template <typename T>
|
|
void Blas<DeviceContext>::MatMul(const framework::Tensor &mat_a, bool trans_a,
|
|
const framework::Tensor &mat_b, bool trans_b,
|
|
T alpha, framework::Tensor *mat_out,
|
|
T beta) const {
|
|
auto dim_a = mat_a.dims();
|
|
auto dim_b = mat_b.dims();
|
|
auto dim_out = mat_out->dims();
|
|
PADDLE_ENFORCE(dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
|
|
"The input and output of matmul be matrix");
|
|
PADDLE_ENFORCE(
|
|
mat_a.place() == mat_b.place() && mat_a.place() == mat_out->place(),
|
|
"The places of matrices must be same");
|
|
|
|
int M = dim_out[0];
|
|
int N = dim_out[1];
|
|
int K = !trans_a ? dim_a[1] : dim_a[0];
|
|
|
|
CBLAS_TRANSPOSE transA = !trans_a ? CblasNoTrans : CblasTrans;
|
|
CBLAS_TRANSPOSE transB = !trans_b ? CblasNoTrans : CblasTrans;
|
|
|
|
this->GEMM(transA, transB, M, N, K, alpha, mat_a.data<T>(), mat_b.data<T>(),
|
|
beta, mat_out->data<T>());
|
|
}
|
|
|
|
template <>
|
|
template <typename T>
|
|
void Blas<platform::CPUDeviceContext>::AXPY(int n, T alpha, const T *x,
|
|
T *y) const {
|
|
CBlas<T>::AXPY(n, alpha, x, 1, y, 1);
|
|
}
|
|
|
|
template <>
|
|
template <typename T>
|
|
void Blas<platform::CPUDeviceContext>::GEMV(bool trans_a, int M, int N, T alpha,
|
|
const T *A, const T *B, T beta,
|
|
T *C) const {
|
|
CBLAS_TRANSPOSE transA = !trans_a ? CblasNoTrans : CblasTrans;
|
|
CBlas<T>::GEMV(CblasRowMajor, transA, M, N, alpha, A, N, B, 1, beta, C, 1);
|
|
}
|
|
|
|
template <>
|
|
template <typename T>
|
|
void Blas<platform::CPUDeviceContext>::BatchedGEMM(
|
|
CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int M, int N, int K,
|
|
T alpha, const T *A, const T *B, T beta, T *C, int batchCount,
|
|
int64_t strideA, int64_t strideB) const {
|
|
#ifdef PADDLE_WITH_MKLML
|
|
int lda = (transA == CblasNoTrans) ? K : M;
|
|
int ldb = (transB == CblasNoTrans) ? N : K;
|
|
int ldc = N;
|
|
auto a_array = std::vector<const T *>(batchCount);
|
|
auto b_array = std::vector<const T *>(batchCount);
|
|
auto c_array = std::vector<T *>(batchCount);
|
|
for (int k = 0; k < batchCount; ++k) {
|
|
a_array[k] = &A[k * strideA];
|
|
b_array[k] = &B[k * strideB];
|
|
c_array[k] = &C[k * M * N];
|
|
}
|
|
|
|
CBlas<T>::GEMM_BATCH(CblasRowMajor, &transA, &transB, &M, &N, &K, &alpha,
|
|
a_array.data(), &lda, b_array.data(), &ldb, &beta,
|
|
c_array.data(), &ldc, 1 /* group_count */, &batchCount);
|
|
#else
|
|
for (int k = 0; k < batchCount; ++k) {
|
|
const float *Ak = &A[k * strideA];
|
|
const float *Bk = &B[k * strideB];
|
|
float *Ck = &C[k * M * N];
|
|
this->template GEMM<T>(transA, transB, M, N, K, alpha, Ak, Bk, beta, Ck);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
} // namespace math
|
|
} // namespace operators
|
|
} // namespace paddle
|