You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
140 lines
4.2 KiB
140 lines
4.2 KiB
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License. */
|
|
|
|
#pragma once
|
|
|
|
#include <string>
|
|
|
|
#include "paddle/fluid/framework/data_type.h"
|
|
#include "paddle/fluid/framework/lod_tensor.h"
|
|
#include "paddle/fluid/framework/op_registry.h"
|
|
|
|
#if defined(PADDLE_WITH_NCCL)
|
|
#include "paddle/fluid/platform/collective_helper.h"
|
|
#include "paddle/fluid/platform/nccl_helper.h"
|
|
#endif
|
|
|
|
namespace paddle {
|
|
namespace operators {
|
|
|
|
enum ReduceType { kRedSum, kRedMax, kRedMin, kRedProd };
|
|
|
|
class CAllReduceOp : public framework::OperatorWithKernel {
|
|
public:
|
|
using framework::OperatorWithKernel::OperatorWithKernel;
|
|
|
|
void InferShape(framework::InferShapeContext* ctx) const override {
|
|
ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
|
|
}
|
|
|
|
protected:
|
|
framework::OpKernelType GetExpectedKernelType(
|
|
const framework::ExecutionContext& ctx) const override {
|
|
return framework::OpKernelType(
|
|
OperatorWithKernel::IndicateVarDataType(ctx, "X"), ctx.GetPlace());
|
|
}
|
|
};
|
|
|
|
template <ReduceType red_type, typename T>
|
|
class CAllReduceOpCPUKernel : public framework::OpKernel<T> {
|
|
public:
|
|
void Compute(const framework::ExecutionContext& ctx) const override {
|
|
PADDLE_THROW("CAllReduce op do not support CPUKernel for now.");
|
|
}
|
|
};
|
|
|
|
template <ReduceType red_type, typename T>
|
|
class CAllReduceOpCUDAKernel : public framework::OpKernel<T> {
|
|
public:
|
|
void Compute(const framework::ExecutionContext& ctx) const override {
|
|
#if defined(PADDLE_WITH_NCCL)
|
|
auto in = ctx.Input<framework::Tensor>("X");
|
|
auto out = ctx.Output<framework::Tensor>("Out");
|
|
|
|
auto place = ctx.GetPlace();
|
|
ncclDataType_t dtype = platform::ToNCCLDataType(in->type());
|
|
int64_t numel = in->numel();
|
|
const void* sendbuff = in->data<void>();
|
|
out->Resize(in->dims());
|
|
void* recvbuff = out->mutable_data<T>(place);
|
|
|
|
int rid = ctx.Attr<int>("ring_id");
|
|
auto comm = platform::NCCLCommContext::Instance().Get(rid, place);
|
|
|
|
cudaStream_t stream = nullptr;
|
|
if (ctx.Attr<bool>("use_calc_stream")) {
|
|
auto dev_ctx = platform::DeviceContextPool::Instance().Get(place);
|
|
stream = static_cast<platform::CUDADeviceContext*>(dev_ctx)->stream();
|
|
} else {
|
|
stream = comm->stream();
|
|
}
|
|
|
|
ncclRedOp_t nccl_red_type = ncclSum;
|
|
switch (red_type) {
|
|
case kRedSum:
|
|
nccl_red_type = ncclSum;
|
|
break;
|
|
|
|
case kRedMax:
|
|
nccl_red_type = ncclMax;
|
|
break;
|
|
|
|
case kRedMin:
|
|
nccl_red_type = ncclMin;
|
|
break;
|
|
|
|
case kRedProd:
|
|
nccl_red_type = ncclProd;
|
|
break;
|
|
|
|
default:
|
|
PADDLE_THROW("Invalid reduce type: %d", red_type);
|
|
}
|
|
|
|
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce(
|
|
sendbuff, recvbuff, numel, dtype, nccl_red_type, comm->comm(), stream));
|
|
#else
|
|
PADDLE_THROW("PaddlePaddle should compile with GPU.");
|
|
#endif
|
|
}
|
|
};
|
|
|
|
class CAllReduceOpMaker : public framework::OpProtoAndCheckerMaker {
|
|
public:
|
|
void Make() {
|
|
AddInput("X", "(Tensor), tensor to be allreduced.");
|
|
AddOutput("Out", "(Tensor) the allreduced result.");
|
|
AddAttr<int>("ring_id", "(int default 0) communication ring id.")
|
|
.SetDefault(0);
|
|
AddAttr<bool>(
|
|
"use_calc_stream",
|
|
"(bool default false) eject CUDA operations to calculation stream.")
|
|
.SetDefault(false);
|
|
AddComment(string::Sprintf(R"DOC(
|
|
CAllReduce %s Operator
|
|
|
|
Call collective AllReduce with reduce type %s. If input and output are
|
|
the same variable, in-place allreduce will be used.
|
|
Reference: https://docs.nvidia.com/deeplearning/sdk/nccl-developer-guide/docs/usage/operations.html#allreduce
|
|
)DOC",
|
|
GetName(), GetName()));
|
|
}
|
|
|
|
protected:
|
|
virtual std::string GetName() const = 0;
|
|
};
|
|
|
|
} // namespace operators
|
|
} // namespace paddle
|