commit
0693b4148d
@ -0,0 +1,47 @@
|
|||||||
|
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "paddle/fluid/framework/details/graph_builder_factory.h"
|
||||||
|
#include <fstream>
|
||||||
|
#include "paddle/fluid/framework/details/multi_devices_graph_builder.h"
|
||||||
|
#include "paddle/fluid/framework/details/ssa_graph_printer.h"
|
||||||
|
|
||||||
|
namespace paddle {
|
||||||
|
namespace framework {
|
||||||
|
namespace details {
|
||||||
|
std::unique_ptr<SSAGraphBuilder> SSAGraphBuilderFactory::Create() {
|
||||||
|
std::unique_ptr<SSAGraphBuilder> res(
|
||||||
|
#ifdef PADDLE_WITH_CUDA
|
||||||
|
new MultiDevSSAGraphBuilder(places_, loss_var_name_, param_names_,
|
||||||
|
local_scopes_, nccl_ctxs_, strategy_)
|
||||||
|
#else
|
||||||
|
new MultiDevSSAGraphBuilder(places_, loss_var_name_, param_names_,
|
||||||
|
local_scopes_, strategy_)
|
||||||
|
#endif
|
||||||
|
); // NOLINT
|
||||||
|
|
||||||
|
if (!strategy_.debug_graphviz_path_.empty()) {
|
||||||
|
std::unique_ptr<std::ostream> fout(
|
||||||
|
new std::ofstream(strategy_.debug_graphviz_path_));
|
||||||
|
PADDLE_ENFORCE(fout->good());
|
||||||
|
std::unique_ptr<GraphvizSSAGraphPrinter> graphviz_printer(
|
||||||
|
new GraphvizSSAGraphPrinter());
|
||||||
|
res.reset(new SSAGraghBuilderWithPrinter(
|
||||||
|
std::move(fout), std::move(graphviz_printer), std::move(res)));
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
} // namespace details
|
||||||
|
} // namespace framework
|
||||||
|
} // namespace paddle
|
@ -0,0 +1,67 @@
|
|||||||
|
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
#include "paddle/fluid/framework/details/build_strategy.h"
|
||||||
|
#include "paddle/fluid/framework/details/ssa_graph_builder.h"
|
||||||
|
#include "paddle/fluid/platform/place.h"
|
||||||
|
|
||||||
|
#ifdef PADDLE_WITH_CUDA
|
||||||
|
#include "paddle/fluid/platform/nccl_helper.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace paddle {
|
||||||
|
namespace framework {
|
||||||
|
class Scope;
|
||||||
|
namespace details {
|
||||||
|
|
||||||
|
class SSAGraphBuilderFactory {
|
||||||
|
public:
|
||||||
|
SSAGraphBuilderFactory(const std::vector<platform::Place>& places,
|
||||||
|
const std::string& loss_var_name,
|
||||||
|
const std::unordered_set<std::string>& param_names,
|
||||||
|
const std::vector<Scope*>& local_scopes,
|
||||||
|
const BuildStrategy& strategy)
|
||||||
|
: places_(places),
|
||||||
|
loss_var_name_(loss_var_name),
|
||||||
|
param_names_(param_names),
|
||||||
|
local_scopes_(local_scopes),
|
||||||
|
strategy_(strategy) {}
|
||||||
|
|
||||||
|
#ifdef PADDLE_WITH_CUDA
|
||||||
|
void SetNCCLContextMap(platform::NCCLContextMap* nccl_ctxs) {
|
||||||
|
nccl_ctxs_ = nccl_ctxs;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
std::unique_ptr<SSAGraphBuilder> Create();
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::vector<platform::Place> places_;
|
||||||
|
std::string loss_var_name_;
|
||||||
|
std::unordered_set<std::string> param_names_;
|
||||||
|
std::vector<Scope*> local_scopes_;
|
||||||
|
BuildStrategy strategy_;
|
||||||
|
|
||||||
|
#ifdef PADDLE_WITH_CUDA
|
||||||
|
platform::NCCLContextMap* nccl_ctxs_;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace details
|
||||||
|
} // namespace framework
|
||||||
|
} // namespace paddle
|
@ -0,0 +1,83 @@
|
|||||||
|
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "paddle/fluid/framework/details/ssa_graph_printer.h"
|
||||||
|
#include <string>
|
||||||
|
#include "paddle/fluid/framework/details/ssa_graph.h"
|
||||||
|
|
||||||
|
namespace paddle {
|
||||||
|
namespace framework {
|
||||||
|
namespace details {
|
||||||
|
|
||||||
|
template <typename Callback>
|
||||||
|
static inline void IterAllVar(const SSAGraph &graph, Callback callback) {
|
||||||
|
for (auto &each : graph.vars_) {
|
||||||
|
for (auto &pair1 : each) {
|
||||||
|
for (auto &pair2 : pair1.second) {
|
||||||
|
callback(*pair2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto &var : graph.dep_vars_) {
|
||||||
|
callback(*var);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void GraphvizSSAGraphPrinter::Print(const SSAGraph &graph,
|
||||||
|
std::ostream &sout) const {
|
||||||
|
size_t var_id = 0;
|
||||||
|
std::unordered_map<const VarHandleBase *, size_t> vars;
|
||||||
|
|
||||||
|
sout << "digraph G {\n";
|
||||||
|
|
||||||
|
IterAllVar(graph, [&](const VarHandleBase &var) {
|
||||||
|
auto *var_ptr = &var;
|
||||||
|
auto *var_handle_ptr = dynamic_cast<const VarHandle *>(var_ptr);
|
||||||
|
auto *dummy_ptr = dynamic_cast<const DummyVarHandle *>(var_ptr);
|
||||||
|
|
||||||
|
size_t cur_var_id = var_id++;
|
||||||
|
vars[var_ptr] = cur_var_id;
|
||||||
|
|
||||||
|
if (var_handle_ptr) {
|
||||||
|
sout << "var_" << cur_var_id << " [label=\"" << var_handle_ptr->name_
|
||||||
|
<< "\\n"
|
||||||
|
<< var_handle_ptr->place_ << "\\n"
|
||||||
|
<< var_handle_ptr->version_ << "\"]" << std::endl;
|
||||||
|
} else if (dummy_ptr) {
|
||||||
|
sout << "var_" << cur_var_id << " [label=\"dummy\"]" << std::endl;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
size_t op_id = 0;
|
||||||
|
for (auto &op : graph.ops_) {
|
||||||
|
std::string op_name = "op_" + std::to_string(op_id++);
|
||||||
|
sout << op_name << " [label=\"" << op->Name() << "\", shape=rect]"
|
||||||
|
<< std::endl;
|
||||||
|
for (auto in : op->Inputs()) {
|
||||||
|
std::string var_name = "var_" + std::to_string(vars[in]);
|
||||||
|
sout << var_name << " -> " << op_name << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto out : op->Outputs()) {
|
||||||
|
std::string var_name = "var_" + std::to_string(vars[out]);
|
||||||
|
sout << op_name << " -> " << var_name << std::endl;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sout << "}\n";
|
||||||
|
}
|
||||||
|
} // namespace details
|
||||||
|
} // namespace framework
|
||||||
|
} // namespace paddle
|
@ -0,0 +1,67 @@
|
|||||||
|
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <iosfwd>
|
||||||
|
#include "paddle/fluid/framework/details/ssa_graph_builder.h"
|
||||||
|
|
||||||
|
namespace paddle {
|
||||||
|
namespace framework {
|
||||||
|
namespace details {
|
||||||
|
class SSAGraph;
|
||||||
|
class SSAGraphPrinter {
|
||||||
|
public:
|
||||||
|
virtual ~SSAGraphPrinter() {}
|
||||||
|
virtual void Print(const SSAGraph& graph, std::ostream& sout) const = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
class GraphvizSSAGraphPrinter : public SSAGraphPrinter {
|
||||||
|
public:
|
||||||
|
void Print(const SSAGraph& graph, std::ostream& sout) const override;
|
||||||
|
};
|
||||||
|
|
||||||
|
class SSAGraghBuilderWithPrinter : public SSAGraphBuilder {
|
||||||
|
public:
|
||||||
|
SSAGraghBuilderWithPrinter(std::ostream& sout,
|
||||||
|
std::unique_ptr<SSAGraphPrinter>&& printer,
|
||||||
|
std::unique_ptr<SSAGraphBuilder>&& builder)
|
||||||
|
: printer_(std::move(printer)),
|
||||||
|
builder_(std::move(builder)),
|
||||||
|
stream_ref_(sout) {}
|
||||||
|
|
||||||
|
SSAGraghBuilderWithPrinter(std::unique_ptr<std::ostream>&& sout,
|
||||||
|
std::unique_ptr<SSAGraphPrinter>&& printer,
|
||||||
|
std::unique_ptr<SSAGraphBuilder>&& builder)
|
||||||
|
: printer_(std::move(printer)),
|
||||||
|
builder_(std::move(builder)),
|
||||||
|
stream_ptr_(std::move(sout)),
|
||||||
|
stream_ref_(*stream_ptr_) {}
|
||||||
|
|
||||||
|
std::unique_ptr<SSAGraph> Build(const ProgramDesc& program) const override {
|
||||||
|
auto graph = builder_->Build(program);
|
||||||
|
printer_->Print(*graph, stream_ref_);
|
||||||
|
return graph;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::unique_ptr<SSAGraphPrinter> printer_;
|
||||||
|
std::unique_ptr<SSAGraphBuilder> builder_;
|
||||||
|
std::unique_ptr<std::ostream> stream_ptr_;
|
||||||
|
std::ostream& stream_ref_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace details
|
||||||
|
} // namespace framework
|
||||||
|
} // namespace paddle
|
@ -0,0 +1,107 @@
|
|||||||
|
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "paddle/fluid/operators/reverse_op.h"
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
namespace paddle {
|
||||||
|
namespace operators {
|
||||||
|
|
||||||
|
class ReverseOp : public framework::OperatorWithKernel {
|
||||||
|
public:
|
||||||
|
using framework::OperatorWithKernel::OperatorWithKernel;
|
||||||
|
|
||||||
|
void InferShape(framework::InferShapeContext* ctx) const override {
|
||||||
|
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null");
|
||||||
|
PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null");
|
||||||
|
const auto& x_dims = ctx->GetInputDim("X");
|
||||||
|
const auto& axis = ctx->Attrs().Get<std::vector<int>>("axis");
|
||||||
|
PADDLE_ENFORCE(!axis.empty(), "'axis' can not be empty.");
|
||||||
|
for (int a : axis) {
|
||||||
|
PADDLE_ENFORCE_LT(a, x_dims.size(),
|
||||||
|
"The axis must be less than input tensor's rank.");
|
||||||
|
}
|
||||||
|
ctx->SetOutputDim("Out", x_dims);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class ReverseOpMaker : public framework::OpProtoAndCheckerMaker {
|
||||||
|
public:
|
||||||
|
void Make() override {
|
||||||
|
AddInput("X", "The LoDTensor to be flipped.");
|
||||||
|
AddOutput("Out", "The LoDTensor after flipping.");
|
||||||
|
AddAttr<std::vector<int>>(
|
||||||
|
"axis", "The axises that along which order of elements is reversed.");
|
||||||
|
AddComment(R"DOC(
|
||||||
|
Reverse Operator.
|
||||||
|
|
||||||
|
Reverse the order of elements in the input LoDTensor along given axises.
|
||||||
|
|
||||||
|
Case 1:
|
||||||
|
Given
|
||||||
|
X = [[1, 2, 3, 4, 5]
|
||||||
|
[6, 7, 8, 9, 10]
|
||||||
|
[11, 12, 13, 14, 15]],
|
||||||
|
and
|
||||||
|
axis = [0],
|
||||||
|
we get:
|
||||||
|
Out = [[11, 12, 13, 14, 15]
|
||||||
|
[6, 7, 8, 9, 10]
|
||||||
|
[1, 2, 3, 4, 5]].
|
||||||
|
|
||||||
|
Case 2:
|
||||||
|
Given
|
||||||
|
X = [[[1, 2, 3, 4]
|
||||||
|
[5, 6, 7, 8]]
|
||||||
|
[[9, 10, 11, 12]
|
||||||
|
[13, 14, 15, 16]]],
|
||||||
|
and
|
||||||
|
axis = [0, 2],
|
||||||
|
we get:
|
||||||
|
Out = [[[12, 11, 10, 9]
|
||||||
|
[16, 15, 14, 13]]
|
||||||
|
[[4, 3, 2, 1]
|
||||||
|
[8, 7, 6, 5]]],
|
||||||
|
)DOC");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class ReverseGradMaker : public framework::SingleGradOpDescMaker {
|
||||||
|
public:
|
||||||
|
using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;
|
||||||
|
|
||||||
|
std::unique_ptr<framework::OpDesc> Apply() const override {
|
||||||
|
auto* grad_op = new framework::OpDesc();
|
||||||
|
grad_op->SetType("reverse");
|
||||||
|
grad_op->SetInput("X", OutputGrad("Out"));
|
||||||
|
grad_op->SetOutput("Out", InputGrad("X"));
|
||||||
|
grad_op->SetAttr("axis", GetAttr("axis"));
|
||||||
|
return std::unique_ptr<framework::OpDesc>(grad_op);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace operators
|
||||||
|
} // namespace paddle
|
||||||
|
|
||||||
|
namespace ops = paddle::operators;
|
||||||
|
REGISTER_OPERATOR(reverse, ops::ReverseOp, ops::ReverseOpMaker,
|
||||||
|
ops::ReverseGradMaker);
|
||||||
|
REGISTER_OPERATOR(reverse_grad, ops::ReverseOp);
|
||||||
|
REGISTER_OP_CPU_KERNEL(
|
||||||
|
reverse, ops::ReverseKernel<paddle::platform::CPUDeviceContext, int>,
|
||||||
|
ops::ReverseKernel<paddle::platform::CPUDeviceContext, uint8_t>,
|
||||||
|
ops::ReverseKernel<paddle::platform::CPUDeviceContext, int64_t>,
|
||||||
|
ops::ReverseKernel<paddle::platform::CPUDeviceContext, bool>,
|
||||||
|
ops::ReverseKernel<paddle::platform::CPUDeviceContext, float>,
|
||||||
|
ops::ReverseKernel<paddle::platform::CPUDeviceContext, double>)
|
@ -0,0 +1,24 @@
|
|||||||
|
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "paddle/fluid/operators/reverse_op.h"
|
||||||
|
|
||||||
|
namespace ops = paddle::operators;
|
||||||
|
REGISTER_OP_CUDA_KERNEL(
|
||||||
|
reverse, ops::ReverseKernel<paddle::platform::CUDADeviceContext, int>,
|
||||||
|
ops::ReverseKernel<paddle::platform::CUDADeviceContext, uint8_t>,
|
||||||
|
ops::ReverseKernel<paddle::platform::CUDADeviceContext, int64_t>,
|
||||||
|
ops::ReverseKernel<paddle::platform::CUDADeviceContext, bool>,
|
||||||
|
ops::ReverseKernel<paddle::platform::CUDADeviceContext, float>,
|
||||||
|
ops::ReverseKernel<paddle::platform::CUDADeviceContext, double>)
|
@ -0,0 +1,87 @@
|
|||||||
|
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
#include <vector>
|
||||||
|
#include "paddle/fluid/framework/eigen.h"
|
||||||
|
#include "paddle/fluid/framework/op_registry.h"
|
||||||
|
|
||||||
|
namespace paddle {
|
||||||
|
namespace operators {
|
||||||
|
template <typename DeviceContext, typename T, int Rank>
|
||||||
|
struct ReverseFunctor {
|
||||||
|
void operator()(const DeviceContext& context, const framework::LoDTensor& in,
|
||||||
|
framework::LoDTensor* out, const std::vector<int>& axis) {
|
||||||
|
Eigen::array<bool, Rank> reverse_axis;
|
||||||
|
for (int i = 0; i < Rank; ++i) {
|
||||||
|
reverse_axis[i] = false;
|
||||||
|
}
|
||||||
|
for (int a : axis) {
|
||||||
|
reverse_axis[a] = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto in_eigen = framework::EigenTensor<T, Rank>::From(in);
|
||||||
|
auto out_eigen = framework::EigenTensor<T, Rank>::From(*out);
|
||||||
|
auto* dev = context.eigen_device();
|
||||||
|
|
||||||
|
out_eigen.device(*dev) = in_eigen.reverse(reverse_axis);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename DeviceContext, typename T>
|
||||||
|
class ReverseKernel : public framework::OpKernel<T> {
|
||||||
|
public:
|
||||||
|
void Compute(const framework::ExecutionContext& context) const override {
|
||||||
|
auto* x = context.Input<framework::LoDTensor>("X");
|
||||||
|
auto* out = context.Output<framework::LoDTensor>("Out");
|
||||||
|
out->mutable_data<T>(context.GetPlace());
|
||||||
|
const auto& axis = context.Attr<std::vector<int>>("axis");
|
||||||
|
int rank = x->dims().size();
|
||||||
|
auto& dev_ctx = context.template device_context<DeviceContext>();
|
||||||
|
|
||||||
|
switch (rank) {
|
||||||
|
case 1:
|
||||||
|
ReverseFunctor<DeviceContext, T, 1> functor1;
|
||||||
|
functor1(dev_ctx, *x, out, axis);
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
ReverseFunctor<DeviceContext, T, 2> functor2;
|
||||||
|
functor2(dev_ctx, *x, out, axis);
|
||||||
|
break;
|
||||||
|
case 3:
|
||||||
|
ReverseFunctor<DeviceContext, T, 3> functor3;
|
||||||
|
functor3(dev_ctx, *x, out, axis);
|
||||||
|
break;
|
||||||
|
case 4:
|
||||||
|
ReverseFunctor<DeviceContext, T, 4> functor4;
|
||||||
|
functor4(dev_ctx, *x, out, axis);
|
||||||
|
break;
|
||||||
|
case 5:
|
||||||
|
ReverseFunctor<DeviceContext, T, 5> functor5;
|
||||||
|
functor5(dev_ctx, *x, out, axis);
|
||||||
|
break;
|
||||||
|
case 6:
|
||||||
|
ReverseFunctor<DeviceContext, T, 6> functor6;
|
||||||
|
functor6(dev_ctx, *x, out, axis);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
PADDLE_THROW(
|
||||||
|
"Reserve operator doesn't supports tensors whose ranks are greater "
|
||||||
|
"than 6.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace operators
|
||||||
|
} // namespace paddle
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue