|
|
|
@ -12,7 +12,9 @@
|
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
|
// limitations under the License.
|
|
|
|
|
#include "paddle/fluid/framework/details/all_reduce_op_handle.h"
|
|
|
|
|
|
|
|
|
|
#include <algorithm>
|
|
|
|
|
|
|
|
|
|
#include "paddle/fluid/framework/details/container_cast.h"
|
|
|
|
|
#include "paddle/fluid/framework/details/reduce_and_gather.h"
|
|
|
|
|
#include "paddle/fluid/framework/details/variable_visitor.h"
|
|
|
|
@ -34,14 +36,24 @@ AllReduceOpHandle::AllReduceOpHandle(ir::Node *node,
|
|
|
|
|
const std::vector<platform::Place> &places,
|
|
|
|
|
const platform::NCCLCommunicator *ctxs)
|
|
|
|
|
: NCCLOpHandleBase(node, places, ctxs), local_scopes_(local_scopes) {
|
|
|
|
|
PADDLE_ENFORCE_EQ(places_.size(), local_scopes_.size());
|
|
|
|
|
PADDLE_ENFORCE_EQ(places_.size(), local_scopes_.size(),
|
|
|
|
|
platform::errors::InvalidArgument(
|
|
|
|
|
"The number of places and the number of local scopes "
|
|
|
|
|
"should be equal, but got number of places is %d and "
|
|
|
|
|
"number of local scopes is %d.",
|
|
|
|
|
places_.size(), local_scopes_.size()));
|
|
|
|
|
}
|
|
|
|
|
#else
|
|
|
|
|
AllReduceOpHandle::AllReduceOpHandle(ir::Node *node,
|
|
|
|
|
const std::vector<Scope *> &local_scopes,
|
|
|
|
|
const std::vector<platform::Place> &places)
|
|
|
|
|
: OpHandleBase(node), local_scopes_(local_scopes), places_(places) {
|
|
|
|
|
PADDLE_ENFORCE_EQ(places_.size(), local_scopes_.size());
|
|
|
|
|
PADDLE_ENFORCE_EQ(places_.size(), local_scopes_.size(),
|
|
|
|
|
platform::errors::InvalidArgument(
|
|
|
|
|
"The number of places and the number of local scopes "
|
|
|
|
|
"should be equal, but got number of places is %d and "
|
|
|
|
|
"number of local scopes is %d.",
|
|
|
|
|
places_.size(), local_scopes_.size()));
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
@ -60,13 +72,25 @@ void AllReduceOpHandle::AllReduceImpl(
|
|
|
|
|
const std::vector<VarHandle *> &in_var_handles,
|
|
|
|
|
const std::vector<VarHandle *> &out_var_handles) {
|
|
|
|
|
size_t num_places = places_.size();
|
|
|
|
|
PADDLE_ENFORCE_EQ(
|
|
|
|
|
in_var_handles.size(), num_places,
|
|
|
|
|
"The NoDummyInputSize should be equal to the number of places.");
|
|
|
|
|
PADDLE_ENFORCE_EQ(in_var_handles.size(), num_places,
|
|
|
|
|
platform::errors::InvalidArgument(
|
|
|
|
|
"The NoDummyInputSize should be equal "
|
|
|
|
|
"to the number of places, but got NoDummyInputSize is "
|
|
|
|
|
"%d and the number of place is %d.",
|
|
|
|
|
in_var_handles.size(), num_places));
|
|
|
|
|
PADDLE_ENFORCE_EQ(
|
|
|
|
|
in_var_handles.size(), out_var_handles.size(),
|
|
|
|
|
"The NoDummyInputSize and NoDummyOutputSize should be equal.");
|
|
|
|
|
PADDLE_ENFORCE_EQ(local_exec_scopes_.size(), num_places);
|
|
|
|
|
platform::errors::InvalidArgument(
|
|
|
|
|
"The NoDummyInputSize and NoDummyOutputSize should be "
|
|
|
|
|
"equal, but got NoDummyInputSize is %d and NoDummyOutputSize is %d.",
|
|
|
|
|
in_var_handles.size(), out_var_handles.size()));
|
|
|
|
|
PADDLE_ENFORCE_EQ(
|
|
|
|
|
local_exec_scopes_.size(), num_places,
|
|
|
|
|
platform::errors::InvalidArgument(
|
|
|
|
|
"The number of local scopes should be equal "
|
|
|
|
|
"to the number of places, but got the number of local scopes is "
|
|
|
|
|
"%d and the number of place is %d.",
|
|
|
|
|
in_var_handles.size(), num_places));
|
|
|
|
|
|
|
|
|
|
std::vector<const void *> lod_tensor_data;
|
|
|
|
|
std::vector<platform::Place> places;
|
|
|
|
@ -78,23 +102,36 @@ void AllReduceOpHandle::AllReduceImpl(
|
|
|
|
|
for (size_t i = 0; i < local_exec_scopes_.size(); ++i) {
|
|
|
|
|
auto &local_scope = local_exec_scopes_[i];
|
|
|
|
|
auto var = local_scope->FindVar(in_var_handles[i]->name());
|
|
|
|
|
PADDLE_ENFORCE_NOT_NULL(var, "%s is not found int scope.",
|
|
|
|
|
in_var_handles[i]->name());
|
|
|
|
|
PADDLE_ENFORCE_NOT_NULL(var, platform::errors::NotFound(
|
|
|
|
|
"Variable %s is not found in local scope.",
|
|
|
|
|
in_var_handles[i]->name()));
|
|
|
|
|
auto &lod_tensor = var->Get<LoDTensor>();
|
|
|
|
|
|
|
|
|
|
if (i == 0) {
|
|
|
|
|
numel = static_cast<int64_t>(lod_tensor.numel());
|
|
|
|
|
// only enforce place0, we will enforce other palce numel == place0 numel
|
|
|
|
|
PADDLE_ENFORCE_GT(
|
|
|
|
|
numel, 0, platform::errors::InvalidArgument(
|
|
|
|
|
"The numel of tensos=[%s] must > 0. But now numel=[%d]",
|
|
|
|
|
in_var_handles[i]->name(), numel));
|
|
|
|
|
numel, 0,
|
|
|
|
|
platform::errors::PreconditionNotMet(
|
|
|
|
|
"The numel of tensor %s should be > 0, but got numel is %d.",
|
|
|
|
|
in_var_handles[i]->name(), numel));
|
|
|
|
|
dtype = lod_tensor.type();
|
|
|
|
|
is_gpu_place = platform::is_gpu_place(lod_tensor.place());
|
|
|
|
|
}
|
|
|
|
|
PADDLE_ENFORCE_EQ(numel, static_cast<int64_t>(lod_tensor.numel()));
|
|
|
|
|
PADDLE_ENFORCE_EQ(dtype, lod_tensor.type());
|
|
|
|
|
PADDLE_ENFORCE_EQ(is_gpu_place, platform::is_gpu_place(lod_tensor.place()));
|
|
|
|
|
PADDLE_ENFORCE_EQ(
|
|
|
|
|
numel, static_cast<int64_t>(lod_tensor.numel()),
|
|
|
|
|
platform::errors::PreconditionNotMet(
|
|
|
|
|
"The size of tensors of the same variable in different local "
|
|
|
|
|
"scopes should be equal."));
|
|
|
|
|
PADDLE_ENFORCE_EQ(
|
|
|
|
|
dtype, lod_tensor.type(),
|
|
|
|
|
platform::errors::PreconditionNotMet(
|
|
|
|
|
"The dtype of tensors of the same variable in different local "
|
|
|
|
|
"scopes should be equal."));
|
|
|
|
|
PADDLE_ENFORCE_EQ(is_gpu_place, platform::is_gpu_place(lod_tensor.place()),
|
|
|
|
|
platform::errors::PreconditionNotMet(
|
|
|
|
|
"The place type of tensors of the same variable "
|
|
|
|
|
"in different local scopes should be equal."));
|
|
|
|
|
|
|
|
|
|
lod_tensor_data.emplace_back(lod_tensor.data<void>());
|
|
|
|
|
places.emplace_back(lod_tensor.place());
|
|
|
|
@ -102,8 +139,12 @@ void AllReduceOpHandle::AllReduceImpl(
|
|
|
|
|
VLOG(10) << "place:" << i << ", input_name:" << in_var_handles[i]->name()
|
|
|
|
|
<< ", out_name:" << out_var_handles[i]->name();
|
|
|
|
|
|
|
|
|
|
PADDLE_ENFORCE_EQ(in_var_handles[i]->name(), out_var_handles[i]->name(),
|
|
|
|
|
"The name of input and output should be equal.");
|
|
|
|
|
PADDLE_ENFORCE_EQ(
|
|
|
|
|
in_var_handles[i]->name(), out_var_handles[i]->name(),
|
|
|
|
|
platform::errors::InvalidArgument(
|
|
|
|
|
"The name of input and output of all_reduce op should be equal, "
|
|
|
|
|
"but got input is %s and output is %s.",
|
|
|
|
|
in_var_handles[i]->name(), out_var_handles[i]->name()));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::vector<std::string> grad_var_names;
|
|
|
|
@ -122,7 +163,9 @@ void AllReduceOpHandle::AllReduceFunc(
|
|
|
|
|
const std::vector<std::string> &out_var_names) {
|
|
|
|
|
if (is_gpu_place(places[0])) {
|
|
|
|
|
#if defined(PADDLE_WITH_NCCL)
|
|
|
|
|
PADDLE_ENFORCE_NOT_NULL(nccl_ctxs_, "nccl_ctxs should not be nullptr.");
|
|
|
|
|
PADDLE_ENFORCE_NOT_NULL(nccl_ctxs_,
|
|
|
|
|
platform::errors::InvalidArgument(
|
|
|
|
|
"The nccl context should not be NULL."));
|
|
|
|
|
ncclDataType_t nccl_dtype = platform::ToNCCLDataType(dtype);
|
|
|
|
|
std::vector<std::function<void()>> all_reduce_calls;
|
|
|
|
|
for (size_t i = 0; i < local_exec_scopes_.size(); ++i) {
|
|
|
|
@ -134,7 +177,8 @@ void AllReduceOpHandle::AllReduceFunc(
|
|
|
|
|
}
|
|
|
|
|
NCCLAllReduceFunc(all_reduce_calls);
|
|
|
|
|
#else
|
|
|
|
|
PADDLE_THROW("Not compiled with CUDA.");
|
|
|
|
|
PADDLE_THROW(
|
|
|
|
|
platform::errors::PreconditionNotMet("Not compiled with CUDA."));
|
|
|
|
|
#endif
|
|
|
|
|
} else { // Special handle CPU only Operator's gradient. Like CRF
|
|
|
|
|
auto &trg = *local_exec_scopes_[0]
|
|
|
|
|