Merge pull request #14962 from sneaxiy/rewrite_variable_type
Rewrite variable typerevert-15207-remove_op_handle_lock_and_fix_var
commit
c0bcff00dc
@ -0,0 +1,119 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle/fluid/framework/var_type_traits.h"
|
||||
#include "paddle/fluid/framework/lod_rank_table.h"
|
||||
#include "paddle/fluid/framework/reader.h"
|
||||
#include "paddle/fluid/framework/scope.h"
|
||||
#include "paddle/fluid/framework/selected_rows.h"
|
||||
#include "paddle/fluid/operators/reader/lod_tensor_blocking_queue.h"
|
||||
#include "paddle/fluid/platform/macros.h"
|
||||
#ifdef PADDLE_WITH_CUDA
|
||||
#ifndef _WIN32
|
||||
#include "paddle/fluid/operators/nccl/nccl_gpu_common.h"
|
||||
#endif
|
||||
#include <cudnn.h>
|
||||
#include "paddle/fluid/operators/conv_cudnn_op_cache.h"
|
||||
#include "paddle/fluid/operators/cudnn_rnn_cache.h"
|
||||
#endif
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
|
||||
// Besides registering variable type id, it is helpful to register a
|
||||
// var_id -> std::type_index map (for example, get type names according to id)
|
||||
namespace detail {
|
||||
|
||||
template <int kStart, int kEnd, bool kStop>
|
||||
struct VarIdToTypeIndexMapInitializerImpl {
|
||||
template <typename MapType1, typename MapType2>
|
||||
static void Init(MapType1 *id_to_type, MapType2 *type_to_id) {
|
||||
using Type =
|
||||
typename std::tuple_element<kStart, VarTypeRegistry::ArgTuple>::type;
|
||||
static_assert(!std::is_same<Type, void>::value, "Type cannot be void");
|
||||
constexpr int kId = VarTypeTrait<Type>::kId;
|
||||
auto type = std::type_index(typeid(Type));
|
||||
PADDLE_ENFORCE(id_to_type->count(kId) == 0,
|
||||
"Registered duplicate type id %d for type %s", kId,
|
||||
type.name());
|
||||
PADDLE_ENFORCE(type_to_id->count(type) == 0,
|
||||
"Registered duplicate type_index %s for id %d", type.name(),
|
||||
kId);
|
||||
id_to_type->emplace(kId, type);
|
||||
type_to_id->emplace(type, kId);
|
||||
VarIdToTypeIndexMapInitializerImpl<kStart + 1, kEnd,
|
||||
kStart + 1 == kEnd>::Init(id_to_type,
|
||||
type_to_id);
|
||||
}
|
||||
};
|
||||
|
||||
template <int kStart, int kEnd>
|
||||
struct VarIdToTypeIndexMapInitializerImpl<kStart, kEnd, true> {
|
||||
template <typename MapType1, typename MapType2>
|
||||
static void Init(MapType1 *, MapType2 *) {}
|
||||
};
|
||||
|
||||
// VarIdToTypeIndexMapInitializer is designed to initialize var_id ->
|
||||
// std::type_index map and std::type_index -> var_id map
|
||||
using VarIdToTypeIndexMapInitializer =
|
||||
VarIdToTypeIndexMapInitializerImpl<0, VarTypeRegistry::kRegisteredTypeNum,
|
||||
VarTypeRegistry::kRegisteredTypeNum ==
|
||||
0>;
|
||||
|
||||
struct VarIdToTypeIndexMapHolder {
|
||||
DISABLE_COPY_AND_ASSIGN(VarIdToTypeIndexMapHolder);
|
||||
|
||||
public:
|
||||
static const std::type_index &ToTypeIndex(int var_id) {
|
||||
auto it = Instance().id_to_type_map_.find(var_id);
|
||||
PADDLE_ENFORCE(it != Instance().id_to_type_map_.end(),
|
||||
"VarId %d is not registered.", var_id);
|
||||
return it->second;
|
||||
}
|
||||
|
||||
static int ToTypeId(const std::type_index &type) {
|
||||
auto it = Instance().type_to_id_map_.find(type);
|
||||
PADDLE_ENFORCE(it != Instance().type_to_id_map_.end(),
|
||||
"VarType %s is not registered.", type.name());
|
||||
return it->second;
|
||||
}
|
||||
|
||||
private:
|
||||
VarIdToTypeIndexMapHolder() {
|
||||
VarIdToTypeIndexMapInitializer::Init(&id_to_type_map_, &type_to_id_map_);
|
||||
}
|
||||
|
||||
static const VarIdToTypeIndexMapHolder &Instance() {
|
||||
static const VarIdToTypeIndexMapHolder instance;
|
||||
return instance;
|
||||
}
|
||||
|
||||
std::unordered_map<int, std::type_index> id_to_type_map_;
|
||||
std::unordered_map<std::type_index, int> type_to_id_map_;
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
const std::type_index &ToTypeIndex(int var_id) {
|
||||
return detail::VarIdToTypeIndexMapHolder::ToTypeIndex(var_id);
|
||||
}
|
||||
|
||||
const char *ToTypeName(int var_id) { return ToTypeIndex(var_id).name(); }
|
||||
|
||||
int ToTypeId(const std::type_index &type) {
|
||||
return detail::VarIdToTypeIndexMapHolder::ToTypeId(type);
|
||||
}
|
||||
|
||||
} // namespace framework
|
||||
} // namespace paddle
|
@ -0,0 +1,195 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <typeindex>
|
||||
#include <vector>
|
||||
#include "paddle/fluid/framework/framework.pb.h"
|
||||
#include "paddle/fluid/framework/lod_tensor_array.h"
|
||||
#include "paddle/fluid/platform/place.h"
|
||||
#ifdef PADDLE_WITH_CUDA
|
||||
#include <cudnn.h>
|
||||
#ifndef _WIN32
|
||||
#include <nccl.h>
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Users should add forward declarations here
|
||||
namespace paddle {
|
||||
|
||||
namespace platform {
|
||||
#ifdef PADDLE_WITH_CUDA
|
||||
#ifndef _WIN32
|
||||
class Communicator;
|
||||
#endif
|
||||
#endif
|
||||
} // namespace platform
|
||||
|
||||
namespace framework {
|
||||
class Tensor;
|
||||
class LoDTensor;
|
||||
class SelectedRows;
|
||||
class LoDRankTable;
|
||||
class ReaderHolder;
|
||||
class Scope;
|
||||
} // namespace framework
|
||||
|
||||
namespace operators {
|
||||
template <typename T>
|
||||
class AlgorithmsCache;
|
||||
|
||||
class CudnnRNNCache;
|
||||
|
||||
namespace reader {
|
||||
class LoDTensorBlockingQueueHolder;
|
||||
} // namespace reader
|
||||
} // namespace operators
|
||||
|
||||
} // namespace paddle
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
|
||||
const char *ToTypeName(int var_id);
|
||||
const std::type_index &ToTypeIndex(int var_id);
|
||||
int ToTypeId(const std::type_index &type);
|
||||
|
||||
namespace detail {
|
||||
|
||||
template <bool kStop, int kStart, int kEnd, typename T1, typename T2,
|
||||
typename... Args>
|
||||
struct TypePosFinderImpl {
|
||||
static constexpr int kPos =
|
||||
std::is_same<T1, T2>::value
|
||||
? kStart
|
||||
: TypePosFinderImpl<kStart + 2 == kEnd, kStart + 1, kEnd, T1,
|
||||
Args...>::kPos;
|
||||
};
|
||||
|
||||
template <int kStart, int kEnd, typename T1, typename T2>
|
||||
struct TypePosFinderImpl<true, kStart, kEnd, T1, T2> {
|
||||
static constexpr int kPos = std::is_same<T1, T2>::value ? kStart : -1;
|
||||
};
|
||||
|
||||
// TypePosFinder helps to find the position in which T is inside Args...
|
||||
// If T is not inside Args..., kPos would be -1
|
||||
template <typename T, typename... Args>
|
||||
struct TypePosFinder {
|
||||
static constexpr int kPos =
|
||||
TypePosFinderImpl<sizeof...(Args) == 1, 0, sizeof...(Args), T,
|
||||
Args...>::kPos;
|
||||
};
|
||||
|
||||
template <typename... Args>
|
||||
struct VarTypeRegistryImpl {
|
||||
static constexpr size_t kRegisteredTypeNum = sizeof...(Args);
|
||||
using ArgTuple = std::tuple<Args...>;
|
||||
|
||||
// TypePos() returns the position in which T is inside Args...
|
||||
// If T is not inside Args..., return -1
|
||||
template <typename T>
|
||||
static constexpr int TypePos() {
|
||||
return TypePosFinder<T, Args...>::kPos;
|
||||
}
|
||||
|
||||
// IsRegistered() returns whether T is registered inside RegistryImpl
|
||||
template <typename T>
|
||||
static constexpr bool IsRegistered() {
|
||||
return TypePos<T>() >= 0;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
#define REG_PROTO_VAR_TYPE_TRAIT(type, proto_id) \
|
||||
template <> \
|
||||
struct VarTypeTrait<type> { \
|
||||
static_assert(VarTypeRegistry::IsRegistered<type>(), \
|
||||
"Must be registered type"); \
|
||||
using Type = type; \
|
||||
static constexpr int kId = static_cast<int>(proto_id); \
|
||||
}
|
||||
|
||||
/**
|
||||
* The following codes are designed to register variable types.
|
||||
* Only registered types can be stored in Variable.
|
||||
* This registry mechanism is designed to speed up Variable.
|
||||
*
|
||||
* Caution: If you want to add more var types, please consider carefully
|
||||
* whether you really need to add it.
|
||||
*/
|
||||
|
||||
// Users should add other variable types below.
|
||||
// Paddle would generate unique Ids for each registered variable types.
|
||||
using VarTypeRegistry = detail::VarTypeRegistryImpl<
|
||||
Tensor, LoDTensor, SelectedRows, std::vector<Scope *>, LoDRankTable,
|
||||
LoDTensorArray, platform::PlaceList, ReaderHolder, std::string, Scope *,
|
||||
std::map<size_t, Tensor>, operators::reader::LoDTensorBlockingQueueHolder,
|
||||
#ifdef PADDLE_WITH_CUDA
|
||||
#ifndef _WIN32
|
||||
ncclUniqueId, platform::Communicator,
|
||||
#endif
|
||||
operators::AlgorithmsCache<cudnnConvolutionFwdAlgo_t>,
|
||||
operators::AlgorithmsCache<cudnnConvolutionBwdDataAlgo_t>,
|
||||
operators::AlgorithmsCache<cudnnConvolutionBwdFilterAlgo_t>,
|
||||
operators::CudnnRNNCache,
|
||||
#endif
|
||||
int, float>;
|
||||
|
||||
template <typename T>
|
||||
struct VarTypeTrait {
|
||||
static_assert(VarTypeRegistry::IsRegistered<T>(), "Must be registered type");
|
||||
using Type = T;
|
||||
/**
|
||||
* Unique VarType Id generation.
|
||||
*
|
||||
* The auto-generated id should not be the same as any protobuf id defined in
|
||||
* framework.proto. Therefore, we generate id by adding the type pos and
|
||||
* maximum protobuf id (i.e., proto::VarType::TUPLE).
|
||||
*
|
||||
* However, we may need more protobuf id in the future.
|
||||
* To avoid changing this auto id generation algorithm frequently, we
|
||||
* generate id by adding the type pos and twice of maximum protobuf id (i.e.,
|
||||
* proto::VarType::TUPLE).
|
||||
*/
|
||||
static constexpr int kId = VarTypeRegistry::TypePos<T>() +
|
||||
static_cast<int>(proto::VarType::TUPLE) * 2;
|
||||
};
|
||||
|
||||
// Users should set some of variable type ids to be what is defined in
|
||||
// framework.proto below
|
||||
REG_PROTO_VAR_TYPE_TRAIT(LoDTensor, proto::VarType::LOD_TENSOR);
|
||||
REG_PROTO_VAR_TYPE_TRAIT(SelectedRows, proto::VarType::SELECTED_ROWS);
|
||||
REG_PROTO_VAR_TYPE_TRAIT(std::vector<Scope *>, proto::VarType::STEP_SCOPES);
|
||||
REG_PROTO_VAR_TYPE_TRAIT(LoDRankTable, proto::VarType::LOD_RANK_TABLE);
|
||||
REG_PROTO_VAR_TYPE_TRAIT(LoDTensorArray, proto::VarType::LOD_TENSOR_ARRAY);
|
||||
REG_PROTO_VAR_TYPE_TRAIT(platform::PlaceList, proto::VarType::PLACE_LIST);
|
||||
REG_PROTO_VAR_TYPE_TRAIT(ReaderHolder, proto::VarType::READER);
|
||||
REG_PROTO_VAR_TYPE_TRAIT(int, proto::VarType::INT32);
|
||||
REG_PROTO_VAR_TYPE_TRAIT(float, proto::VarType::FP32);
|
||||
|
||||
/** End of variable type registration */
|
||||
|
||||
template <typename T>
|
||||
inline constexpr bool IsRegisteredVarType() {
|
||||
return VarTypeRegistry::IsRegistered<T>();
|
||||
}
|
||||
|
||||
#undef REG_PROTO_VAR_TYPE_TRAIT
|
||||
} // namespace framework
|
||||
} // namespace paddle
|
@ -0,0 +1,120 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <cstdint>
|
||||
#include <iostream>
|
||||
#include <unordered_set>
|
||||
|
||||
#include "paddle/fluid/framework/lod_rank_table.h"
|
||||
#include "paddle/fluid/framework/reader.h"
|
||||
#include "paddle/fluid/framework/scope.h"
|
||||
#include "paddle/fluid/framework/selected_rows.h"
|
||||
#include "paddle/fluid/framework/var_type_traits.h"
|
||||
#include "paddle/fluid/operators/reader/lod_tensor_blocking_queue.h"
|
||||
#ifdef PADDLE_WITH_CUDA
|
||||
#ifndef _WIN32
|
||||
#include "paddle/fluid/operators/nccl/nccl_gpu_common.h"
|
||||
#endif
|
||||
#include "paddle/fluid/operators/conv_cudnn_op_cache.h"
|
||||
#include "paddle/fluid/operators/cudnn_rnn_cache.h"
|
||||
#endif
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
|
||||
template <int kPos, int kEnd, bool kStop>
|
||||
struct TypeIndexChecker {
|
||||
template <typename SetType1, typename SetType2>
|
||||
static void Check(SetType1 *var_id_set, SetType2 *type_index_set) {
|
||||
using Type =
|
||||
typename std::tuple_element<kPos, VarTypeRegistry::ArgTuple>::type;
|
||||
static_assert(std::is_same<typename VarTypeTrait<Type>::Type, Type>::value,
|
||||
"Type must be the same");
|
||||
constexpr auto kId = VarTypeTrait<Type>::kId;
|
||||
std::type_index actual_type(typeid(Type));
|
||||
EXPECT_EQ(std::string(ToTypeName(kId)), std::string(actual_type.name()));
|
||||
EXPECT_EQ(ToTypeIndex(kId), actual_type);
|
||||
EXPECT_EQ(ToTypeId(actual_type), kId);
|
||||
EXPECT_EQ(ToTypeIndex(ToTypeId(actual_type)), actual_type);
|
||||
EXPECT_EQ(ToTypeId(ToTypeIndex(kId)), kId);
|
||||
|
||||
EXPECT_TRUE(var_id_set->count(kId) == 0); // NOLINT
|
||||
EXPECT_TRUE(type_index_set->count(actual_type) == 0); // NOLINT
|
||||
var_id_set->insert(kId);
|
||||
type_index_set->insert(std::type_index(typeid(Type)));
|
||||
TypeIndexChecker<kPos + 1, kEnd, kPos + 1 == kEnd>::Check(var_id_set,
|
||||
type_index_set);
|
||||
}
|
||||
};
|
||||
|
||||
template <int kPos, int kEnd>
|
||||
struct TypeIndexChecker<kPos, kEnd, true> {
|
||||
template <typename SetType1, typename SetType2>
|
||||
static void Check(SetType1 *, SetType2 *) {}
|
||||
};
|
||||
|
||||
TEST(var_type_traits, check_no_duplicate_registry) {
|
||||
constexpr size_t kRegisteredNum = VarTypeRegistry::kRegisteredTypeNum;
|
||||
std::unordered_set<int> var_id_set;
|
||||
std::unordered_set<std::type_index> type_index_set;
|
||||
TypeIndexChecker<0, kRegisteredNum, kRegisteredNum == 0>::Check(
|
||||
&var_id_set, &type_index_set);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool CheckVarId(int proto_id) {
|
||||
static_assert(std::is_same<typename VarTypeTrait<T>::Type, T>::value,
|
||||
"Type must be the same");
|
||||
return VarTypeTrait<T>::kId == proto_id;
|
||||
}
|
||||
|
||||
TEST(var_type_traits, check_proto_type_id) {
|
||||
ASSERT_TRUE(CheckVarId<LoDTensor>(proto::VarType::LOD_TENSOR));
|
||||
ASSERT_TRUE(CheckVarId<SelectedRows>(proto::VarType::SELECTED_ROWS));
|
||||
ASSERT_TRUE(CheckVarId<std::vector<Scope *>>(proto::VarType::STEP_SCOPES));
|
||||
ASSERT_TRUE(CheckVarId<LoDRankTable>(proto::VarType::LOD_RANK_TABLE));
|
||||
ASSERT_TRUE(CheckVarId<LoDTensorArray>(proto::VarType::LOD_TENSOR_ARRAY));
|
||||
ASSERT_TRUE(CheckVarId<platform::PlaceList>(proto::VarType::PLACE_LIST));
|
||||
ASSERT_TRUE(CheckVarId<ReaderHolder>(proto::VarType::READER));
|
||||
ASSERT_TRUE(CheckVarId<int>(proto::VarType::INT32));
|
||||
ASSERT_TRUE(CheckVarId<float>(proto::VarType::FP32));
|
||||
|
||||
ASSERT_EQ(proto::VarType_Type_LOD_TENSOR, proto::VarType::LOD_TENSOR);
|
||||
ASSERT_EQ(proto::VarType_Type_SELECTED_ROWS, proto::VarType::SELECTED_ROWS);
|
||||
ASSERT_EQ(proto::VarType_Type_STEP_SCOPES, proto::VarType::STEP_SCOPES);
|
||||
ASSERT_EQ(proto::VarType_Type_LOD_RANK_TABLE, proto::VarType::LOD_RANK_TABLE);
|
||||
ASSERT_EQ(proto::VarType_Type_LOD_TENSOR_ARRAY,
|
||||
proto::VarType::LOD_TENSOR_ARRAY);
|
||||
ASSERT_EQ(proto::VarType_Type_PLACE_LIST, proto::VarType::PLACE_LIST);
|
||||
ASSERT_EQ(proto::VarType_Type_READER, proto::VarType::READER);
|
||||
ASSERT_EQ(proto::VarType_Type_FEED_MINIBATCH, proto::VarType::FEED_MINIBATCH);
|
||||
ASSERT_EQ(proto::VarType_Type_FETCH_LIST, proto::VarType::FETCH_LIST);
|
||||
ASSERT_EQ(proto::VarType_Type_RAW, proto::VarType::RAW);
|
||||
ASSERT_EQ(proto::VarType_Type_TUPLE, proto::VarType::TUPLE);
|
||||
ASSERT_EQ(proto::VarType_Type_INT32, proto::VarType::INT32);
|
||||
ASSERT_EQ(proto::VarType_Type_FP32, proto::VarType::FP32);
|
||||
}
|
||||
|
||||
TEST(var_type_traits, test_registry) {
|
||||
using Registry = detail::VarTypeRegistryImpl<int8_t, int32_t, size_t, double>;
|
||||
ASSERT_TRUE(Registry::TypePos<int8_t>() == 0);
|
||||
ASSERT_TRUE(Registry::TypePos<int32_t>() == 1);
|
||||
ASSERT_TRUE(Registry::TypePos<size_t>() == 2);
|
||||
ASSERT_TRUE(Registry::TypePos<double>() == 3);
|
||||
ASSERT_TRUE(Registry::TypePos<float>() == -1);
|
||||
}
|
||||
|
||||
} // namespace framework
|
||||
} // namespace paddle
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue