|
|
|
@ -108,44 +108,16 @@ class Reducer {
|
|
|
|
|
|
|
|
|
|
void AddDistHook(size_t var_index);
|
|
|
|
|
|
|
|
|
|
// void MarkDenseVarReady(size_t var_index);
|
|
|
|
|
|
|
|
|
|
// void MarkSparseVarReady(size_t var_index);
|
|
|
|
|
|
|
|
|
|
void MarkVarReady(const size_t var_index, const bool is_used_var);
|
|
|
|
|
|
|
|
|
|
void MarkGroupReady(size_t group_index);
|
|
|
|
|
|
|
|
|
|
void FinalizeBackward();
|
|
|
|
|
|
|
|
|
|
void ReleaseReducer();
|
|
|
|
|
|
|
|
|
|
std::vector<std::vector<size_t>> RebuildGruops();
|
|
|
|
|
|
|
|
|
|
inline bool NeedRebuildGroup() { return !has_rebuilt_group_; }
|
|
|
|
|
|
|
|
|
|
// Reducer Singleton
|
|
|
|
|
static std::shared_ptr<Reducer> SetInstance(
|
|
|
|
|
const std::vector<std::shared_ptr<imperative::VarBase>>& vars,
|
|
|
|
|
const std::vector<std::vector<size_t>>& group_indices,
|
|
|
|
|
const std::vector<bool>& is_sparse_gradient,
|
|
|
|
|
std::shared_ptr<imperative::ParallelContext> parallel_ctx,
|
|
|
|
|
const std::vector<size_t>& group_size_limits, bool find_unused_vars) {
|
|
|
|
|
if (NULL == s_instance_) {
|
|
|
|
|
s_instance_.reset(new paddle::imperative::Reducer(
|
|
|
|
|
vars, group_indices, is_sparse_gradient, parallel_ctx,
|
|
|
|
|
group_size_limits, find_unused_vars));
|
|
|
|
|
}
|
|
|
|
|
return s_instance_;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static std::shared_ptr<Reducer> GetInstance() {
|
|
|
|
|
PADDLE_ENFORCE_EQ(
|
|
|
|
|
s_instance_ != NULL, true,
|
|
|
|
|
platform::errors::InvalidArgument("Reducer is not initialized."));
|
|
|
|
|
return s_instance_;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
private:
|
|
|
|
|
std::vector<std::shared_ptr<imperative::VarBase>> vars_;
|
|
|
|
|
std::vector<std::vector<size_t>> group_indices_;
|
|
|
|
|