revert-3824-remove_grad_op_type
commit
b53f4e0be7
@ -0,0 +1,29 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/framework/op_info.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
|
||||
static OpInfoMap* g_op_info_map = nullptr;
|
||||
|
||||
OpInfoMap& OpInfoMap::Instance() {
|
||||
if (g_op_info_map == nullptr) {
|
||||
g_op_info_map = new OpInfoMap();
|
||||
}
|
||||
return *g_op_info_map;
|
||||
}
|
||||
} // namespace framework
|
||||
} // namespace paddle
|
@ -0,0 +1,101 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
#include <functional>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "paddle/framework/attribute.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
class OperatorBase;
|
||||
using VariableNameMap = std::map<std::string, std::vector<std::string>>;
|
||||
|
||||
using OpCreator = std::function<OperatorBase*(
|
||||
const std::string& /*type*/, const VariableNameMap& /*inputs*/,
|
||||
const VariableNameMap& /*outputs*/, const AttributeMap& /*attrs*/)>;
|
||||
|
||||
struct OpInfo {
|
||||
OpCreator creator_;
|
||||
std::string grad_op_type_;
|
||||
OpProto* proto_;
|
||||
OpAttrChecker* checker_;
|
||||
|
||||
bool HasOpProtoAndChecker() const {
|
||||
return proto_ != nullptr && checker_ != nullptr;
|
||||
}
|
||||
|
||||
const OpProto& Proto() const {
|
||||
PADDLE_ENFORCE_NOT_NULL(proto_, "Operator Proto has not been registered");
|
||||
PADDLE_ENFORCE(proto_->IsInitialized(),
|
||||
"Operator Proto must be initialized in op info");
|
||||
return *proto_;
|
||||
}
|
||||
|
||||
const OpAttrChecker& Checker() const {
|
||||
PADDLE_ENFORCE_NOT_NULL(checker_,
|
||||
"Operator Checker has not been registered");
|
||||
return *checker_;
|
||||
}
|
||||
|
||||
const OpCreator& Creator() const {
|
||||
PADDLE_ENFORCE_NOT_NULL(creator_,
|
||||
"Operator Creator has not been registered");
|
||||
return creator_;
|
||||
}
|
||||
|
||||
bool HasGradientOp() const { return !grad_op_type_.empty(); }
|
||||
};
|
||||
|
||||
class OpInfoMap {
|
||||
public:
|
||||
static OpInfoMap& Instance();
|
||||
|
||||
OpInfoMap(const OpInfoMap& o) = delete;
|
||||
OpInfoMap(OpInfoMap&& o) = delete;
|
||||
OpInfoMap& operator=(const OpInfoMap& o) = delete;
|
||||
OpInfoMap& operator=(OpInfoMap&& o) = delete;
|
||||
|
||||
bool Has(const std::string& op_type) const {
|
||||
return map_.find(op_type) != map_.end();
|
||||
}
|
||||
|
||||
void Insert(const std::string& type, const OpInfo& info) {
|
||||
PADDLE_ENFORCE(!Has(type), "Operator %s has been registered", type);
|
||||
map_.insert({type, info});
|
||||
}
|
||||
|
||||
const OpInfo& Get(const std::string& type) const {
|
||||
auto it = map_.find(type);
|
||||
PADDLE_ENFORCE(it != map_.end(), "Operator %s are not found", type);
|
||||
return it->second;
|
||||
}
|
||||
|
||||
template <typename Callback>
|
||||
void IterAllInfo(Callback callback) {
|
||||
for (auto& it : map_) {
|
||||
callback(it.first, it.second);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
OpInfoMap() = default;
|
||||
std::unordered_map<std::string, const OpInfo> map_;
|
||||
};
|
||||
|
||||
} // namespace framework
|
||||
} // namespace paddle
|
@ -0,0 +1,91 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include <glog/logging.h>
|
||||
#include "unsupported/Eigen/CXX11/Tensor"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
template <class T>
|
||||
struct EigenBlasGemm {
|
||||
typedef Eigen::TensorMap<Eigen::Tensor<T, 2, Eigen::RowMajor, int>,
|
||||
Eigen::Aligned>
|
||||
Matrix;
|
||||
|
||||
static void compute(const bool transA,
|
||||
const bool transB,
|
||||
const int M,
|
||||
const int N,
|
||||
const int K,
|
||||
const T alpha,
|
||||
const T* A,
|
||||
const int lda,
|
||||
const T* B,
|
||||
const int ldb,
|
||||
const T beta,
|
||||
T* C,
|
||||
const int ldc) {
|
||||
Eigen::array<int, 2> sizeA;
|
||||
if (transA) {
|
||||
sizeA[0] = K;
|
||||
sizeA[1] = M;
|
||||
CHECK_EQ(M, lda);
|
||||
} else {
|
||||
sizeA[0] = M;
|
||||
sizeA[1] = K;
|
||||
CHECK_EQ(K, lda);
|
||||
}
|
||||
Eigen::array<int, 2> sizeB;
|
||||
if (transB) {
|
||||
sizeB[0] = N;
|
||||
sizeB[1] = K;
|
||||
CHECK_EQ(K, ldb);
|
||||
} else {
|
||||
sizeB[0] = K;
|
||||
sizeB[1] = N;
|
||||
CHECK_EQ(N, ldb);
|
||||
}
|
||||
Eigen::array<int, 2> sizeC;
|
||||
sizeC[0] = M;
|
||||
sizeC[1] = N;
|
||||
CHECK_EQ(N, ldc);
|
||||
|
||||
const Matrix a(const_cast<T*>(A), sizeA);
|
||||
const Matrix b(const_cast<T*>(B), sizeB);
|
||||
Matrix c(C, sizeC);
|
||||
|
||||
typedef typename Eigen::Tensor<T, 2>::DimensionPair DimPair;
|
||||
Eigen::array<DimPair, 1> dims;
|
||||
dims[0] = DimPair(1, 0);
|
||||
dims[0].first = transA ? 0 : 1;
|
||||
dims[0].second = transB ? 1 : 0;
|
||||
|
||||
Eigen::DefaultDevice device;
|
||||
if (alpha == T(1) && beta == T(0)) {
|
||||
c.device(device) = a.contract(b, dims);
|
||||
} else if (alpha == T(1) && beta == T(1)) {
|
||||
c.device(device) += a.contract(b, dims);
|
||||
} else {
|
||||
c.device(device) = alpha * a.contract(b, dims) + beta * c;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#ifdef PADDLE_TYPE_DOUBLE
|
||||
template class EigenBlasGemm<double>;
|
||||
#else
|
||||
template class EigenBlasGemm<float>;
|
||||
#endif
|
||||
|
||||
} // namespace paddle
|
@ -0,0 +1,90 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "GemmFunctor.h"
|
||||
#include "paddle/math/MathFunctions.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
template <class T>
|
||||
struct BlasGemm<DEVICE_TYPE_CPU, T> {
|
||||
static void compute(const bool transA,
|
||||
const bool transB,
|
||||
const int M,
|
||||
const int N,
|
||||
const int K,
|
||||
const T alpha,
|
||||
const T* A,
|
||||
const int lda,
|
||||
const T* B,
|
||||
const int ldb,
|
||||
const T beta,
|
||||
T* C,
|
||||
const int ldc) {
|
||||
#ifdef PADDLE_USE_EIGEN_FOR_BLAS
|
||||
EigenBlasGemm<T>::compute(
|
||||
transA, transB, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc);
|
||||
#else
|
||||
gemm<T>(transA == false ? CblasNoTrans : CblasTrans,
|
||||
transB == false ? CblasNoTrans : CblasTrans,
|
||||
M,
|
||||
N,
|
||||
K,
|
||||
alpha,
|
||||
A,
|
||||
lda,
|
||||
B,
|
||||
ldb,
|
||||
beta,
|
||||
C,
|
||||
ldc);
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
template <class T>
|
||||
struct BlasGemm<DEVICE_TYPE_GPU, T> {
|
||||
static void compute(const bool transA,
|
||||
const bool transB,
|
||||
const int M,
|
||||
const int N,
|
||||
const int K,
|
||||
const T alpha,
|
||||
const T* A,
|
||||
const int lda,
|
||||
const T* B,
|
||||
const int ldb,
|
||||
const T beta,
|
||||
T* C,
|
||||
const int ldc) {
|
||||
hl_matrix_mul((T*)A,
|
||||
transA == false ? HPPL_OP_N : HPPL_OP_T,
|
||||
(T*)B,
|
||||
transB == false ? HPPL_OP_N : HPPL_OP_T,
|
||||
C,
|
||||
M,
|
||||
N,
|
||||
K,
|
||||
alpha,
|
||||
beta,
|
||||
lda,
|
||||
ldb,
|
||||
ldc);
|
||||
}
|
||||
};
|
||||
|
||||
template struct BlasGemm<DEVICE_TYPE_CPU, real>;
|
||||
template struct BlasGemm<DEVICE_TYPE_GPU, real>;
|
||||
|
||||
} // namespace paddle
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue