Rewrite feed/fetch op (#4815)
* Feed/Fetch op just plain operator, not a OpWithKernel * Do not register OpInfoMaker since Feed/Fetch will never be configured by users * Feed/Fetch op has empty gradient * Feed/Fetch op do not hard code `feed_variable`, `fetch_variable` as its input and output, make it as a plain Operator input/outputrevert-4814-Add_sequence_project_op
parent
440ad999b7
commit
4df6cf4d16
@ -0,0 +1,24 @@
|
|||||||
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License. */
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
#include <vector>
|
||||||
|
#include "paddle/framework/lod_tensor.h"
|
||||||
|
|
||||||
|
namespace paddle {
|
||||||
|
namespace framework {
|
||||||
|
using FeedFetchType = LoDTensor;
|
||||||
|
using FeedFetchList = std::vector<FeedFetchType>;
|
||||||
|
} // namespace framework
|
||||||
|
} // namespace paddle
|
@ -1,59 +1,57 @@
|
|||||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
You may obtain a copy of the License at
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
Unless required by applicable law or agreed to in writing, software
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
See the License for the specific language governing permissions and
|
See the License for the specific language governing permissions and
|
||||||
limitations under the License. */
|
limitations under the License. */
|
||||||
|
|
||||||
#include "paddle/operators/feed_op.h"
|
#include "paddle/framework/feed_fetch_type.h"
|
||||||
|
#include "paddle/framework/op_registry.h"
|
||||||
|
#include "paddle/framework/operator.h"
|
||||||
|
|
||||||
namespace paddle {
|
namespace paddle {
|
||||||
namespace operators {
|
namespace operators {
|
||||||
|
class FeedOp : public framework::OperatorBase {
|
||||||
class FeedOp : public framework::OperatorWithKernel {
|
|
||||||
public:
|
|
||||||
using framework::OperatorWithKernel::OperatorWithKernel;
|
|
||||||
|
|
||||||
protected:
|
|
||||||
void InferShape(framework::InferShapeContext* ctx) const override {
|
|
||||||
PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output should be not null.");
|
|
||||||
auto& shape = ctx->Attrs().Get<std::vector<int>>("dims");
|
|
||||||
std::vector<int64_t> shape_int64(shape.size(), 0);
|
|
||||||
std::transform(shape.begin(), shape.end(), shape_int64.begin(),
|
|
||||||
[](int a) { return static_cast<int64_t>(a); });
|
|
||||||
ctx->SetOutputDim("Out", framework::make_ddim(shape_int64));
|
|
||||||
// TODO(qijun): need to handle LodTensor later
|
|
||||||
}
|
|
||||||
|
|
||||||
framework::DataType IndicateDataType(
|
|
||||||
const framework::ExecutionContext& ctx) const override {
|
|
||||||
return static_cast<framework::DataType>(Attr<int>("dataType"));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
class FeedOpMaker : public framework::OpProtoAndCheckerMaker {
|
|
||||||
public:
|
public:
|
||||||
FeedOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker)
|
FeedOp(const std::string &type, const framework::VariableNameMap &inputs,
|
||||||
: OpProtoAndCheckerMaker(proto, op_checker) {
|
const framework::VariableNameMap &outputs,
|
||||||
AddAttr<int>("dataType", "output data type")
|
const framework::AttributeMap &attrs)
|
||||||
.SetDefault(framework::DataType::FP32);
|
: OperatorBase(type, inputs, outputs, attrs) {}
|
||||||
AddAttr<int>("col", "The col in global feed variable").SetDefault(0);
|
void Run(const framework::Scope &scope,
|
||||||
AddAttr<std::vector<int>>("dims", "The dimension of feed tensor.");
|
const platform::DeviceContext &dev_ctx) const override {
|
||||||
AddOutput("Out", "The output of feed op.");
|
auto feed_var_name = Input("Input");
|
||||||
AddComment(R"DOC(Feed data from global feed variable)DOC");
|
auto *feed_var = scope.FindVar(feed_var_name);
|
||||||
|
PADDLE_ENFORCE(feed_var != nullptr,
|
||||||
|
"Cannot find feed_var in scope, feed_var_name is %s",
|
||||||
|
feed_var_name);
|
||||||
|
|
||||||
|
auto out_name = this->Output("Out");
|
||||||
|
auto *out_var = scope.FindVar(out_name);
|
||||||
|
PADDLE_ENFORCE(out_var != nullptr,
|
||||||
|
"Cannot find out_var in scope, out_var_name is %s",
|
||||||
|
out_name);
|
||||||
|
|
||||||
|
auto col = Attr<int>("col");
|
||||||
|
|
||||||
|
auto &feed_list = feed_var->Get<framework::FeedFetchList>();
|
||||||
|
auto &feed_item = feed_list.at(static_cast<size_t>(col));
|
||||||
|
auto *out_item = out_var->GetMutable<framework::FeedFetchType>();
|
||||||
|
out_item->CopyFromTensor(feed_item, dev_ctx.GetPlace(), dev_ctx);
|
||||||
|
out_item->set_lod(feed_item.lod());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace operators
|
} // namespace operators
|
||||||
} // namespace paddle
|
} // namespace paddle
|
||||||
|
|
||||||
namespace ops = paddle::operators;
|
// We do not need to register OpInfoMaker,
|
||||||
REGISTER_OP_WITHOUT_GRADIENT(feed, ops::FeedOp, ops::FeedOpMaker);
|
// since feed operator will not be used by end users directly
|
||||||
REGISTER_OP_CPU_KERNEL(feed, ops::FeedKernel<float>);
|
REGISTER_OPERATOR(feed, paddle::operators::FeedOp,
|
||||||
|
paddle::framework::EmptyGradOpMaker);
|
||||||
|
@ -1,18 +0,0 @@
|
|||||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License. */
|
|
||||||
|
|
||||||
#include "paddle/operators/feed_op.h"
|
|
||||||
|
|
||||||
namespace ops = paddle::operators;
|
|
||||||
REGISTER_OP_GPU_KERNEL(feed, ops::FeedKernel<float>);
|
|
@ -1,42 +0,0 @@
|
|||||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License. */
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
#include "paddle/framework/eigen.h"
|
|
||||||
#include "paddle/framework/op_registry.h"
|
|
||||||
|
|
||||||
namespace paddle {
|
|
||||||
namespace operators {
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
class FeedKernel : public framework::OpKernel<T> {
|
|
||||||
public:
|
|
||||||
void Compute(const framework::ExecutionContext& ctx) const override {
|
|
||||||
framework::Tensor* out = ctx.Output<framework::Tensor>("Out");
|
|
||||||
out->mutable_data<T>(ctx.GetPlace());
|
|
||||||
framework::Variable* g_feed_variable =
|
|
||||||
framework::GetGlobalScope().FindVar("feed_value");
|
|
||||||
const auto& tensors =
|
|
||||||
g_feed_variable->Get<std::vector<framework::Tensor>>();
|
|
||||||
int col = ctx.template Attr<int>("col");
|
|
||||||
PADDLE_ENFORCE_GT(tensors.size(), static_cast<size_t>(col));
|
|
||||||
// TODO(qijun):
|
|
||||||
// check tensors[col].dims() with attribute,
|
|
||||||
// except the first dimenson.
|
|
||||||
out->CopyFrom<T>(tensors[col], ctx.GetPlace(), ctx.device_context());
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace operators
|
|
||||||
} // namespace paddle
|
|
@ -1,52 +1,64 @@
|
|||||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
You may obtain a copy of the License at
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
Unless required by applicable law or agreed to in writing, software
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
See the License for the specific language governing permissions and
|
See the License for the specific language governing permissions and
|
||||||
limitations under the License. */
|
limitations under the License. */
|
||||||
|
|
||||||
#include "paddle/operators/fetch_op.h"
|
#include "paddle/framework/feed_fetch_type.h"
|
||||||
|
#include "paddle/framework/op_registry.h"
|
||||||
|
|
||||||
namespace paddle {
|
namespace paddle {
|
||||||
namespace operators {
|
namespace operators {
|
||||||
|
|
||||||
class FetchOp : public framework::OperatorWithKernel {
|
class FetchOp : public framework::OperatorBase {
|
||||||
public:
|
public:
|
||||||
using framework::OperatorWithKernel::OperatorWithKernel;
|
FetchOp(const std::string &type, const framework::VariableNameMap &inputs,
|
||||||
|
const framework::VariableNameMap &outputs,
|
||||||
|
const framework::AttributeMap &attrs)
|
||||||
|
: OperatorBase(type, inputs, outputs, attrs) {}
|
||||||
|
|
||||||
protected:
|
void Run(const framework::Scope &scope,
|
||||||
void InferShape(framework::InferShapeContext* ctx) const override {
|
const platform::DeviceContext &dev_ctx) const override {
|
||||||
PADDLE_ENFORCE(ctx->HasInput("Input"), "Input should be not null.");
|
auto fetch_var_name = Input("Input");
|
||||||
}
|
auto *fetch_var = scope.FindVar(fetch_var_name);
|
||||||
|
PADDLE_ENFORCE(fetch_var != nullptr,
|
||||||
|
"Cannot find fetch variable in scope, fetch_var_name is %s",
|
||||||
|
fetch_var_name);
|
||||||
|
|
||||||
framework::DataType IndicateDataType(
|
auto out_name = this->Output("Out");
|
||||||
const framework::ExecutionContext& ctx) const override {
|
auto *out_var = scope.FindVar(out_name);
|
||||||
return static_cast<framework::DataType>(Attr<int>("dataType"));
|
PADDLE_ENFORCE(out_var != nullptr,
|
||||||
}
|
"Cannot find out_var in scope, out_var_name is %s",
|
||||||
};
|
out_name);
|
||||||
|
|
||||||
class FetchOpMaker : public framework::OpProtoAndCheckerMaker {
|
auto col = static_cast<size_t>(Attr<int>("col"));
|
||||||
public:
|
|
||||||
FetchOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker)
|
auto *fetch_list = out_var->GetMutable<framework::FeedFetchList>();
|
||||||
: OpProtoAndCheckerMaker(proto, op_checker) {
|
auto &src_item = fetch_var->Get<framework::FeedFetchType>();
|
||||||
AddAttr<int>("dataType", "output data type")
|
|
||||||
.SetDefault(framework::DataType::FP32);
|
if (col >= fetch_list->size()) {
|
||||||
AddAttr<int>("col", "The col in global fetch variable").SetDefault(0);
|
fetch_list->resize(col + 1);
|
||||||
AddInput("Input", "The output of fetch op.");
|
}
|
||||||
AddComment(R"DOC(Fetch data to global fetch variable)DOC");
|
auto &dst_item = fetch_list->at(col);
|
||||||
|
|
||||||
|
// FIXME(yuyang18): Should we assume the fetch operator always generate
|
||||||
|
// CPU outputs?
|
||||||
|
dst_item.CopyFromTensor(src_item, platform::CPUPlace(), dev_ctx);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace operators
|
} // namespace operators
|
||||||
} // namespace paddle
|
} // namespace paddle
|
||||||
|
|
||||||
namespace ops = paddle::operators;
|
// We do not need to register OpInfoMaker,
|
||||||
REGISTER_OP_WITHOUT_GRADIENT(fetch, ops::FetchOp, ops::FetchOpMaker);
|
// since fetch operator will not be used by end users directly
|
||||||
REGISTER_OP_CPU_KERNEL(fetch, ops::FetchKernel<float>);
|
REGISTER_OPERATOR(fetch, paddle::operators::FetchOp,
|
||||||
|
paddle::framework::EmptyGradOpMaker);
|
||||||
|
@ -1,18 +0,0 @@
|
|||||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License. */
|
|
||||||
|
|
||||||
#include "paddle/operators/fetch_op.h"
|
|
||||||
|
|
||||||
namespace ops = paddle::operators;
|
|
||||||
REGISTER_OP_GPU_KERNEL(fetch, ops::FetchKernel<float>);
|
|
@ -1,45 +0,0 @@
|
|||||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License. */
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
#include "paddle/framework/eigen.h"
|
|
||||||
#include "paddle/framework/op_registry.h"
|
|
||||||
|
|
||||||
namespace paddle {
|
|
||||||
namespace operators {
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
class FetchKernel : public framework::OpKernel<T> {
|
|
||||||
public:
|
|
||||||
void Compute(const framework::ExecutionContext& ctx) const override {
|
|
||||||
const framework::Tensor* input = ctx.Input<framework::Tensor>("Input");
|
|
||||||
framework::Variable* g_fetch_variable =
|
|
||||||
framework::GetGlobalScope().FindVar("fetch_value");
|
|
||||||
auto* tensors =
|
|
||||||
g_fetch_variable->GetMutable<std::vector<framework::Tensor>>();
|
|
||||||
int col = ctx.template Attr<int>("col");
|
|
||||||
if (tensors->size() < static_cast<size_t>(col + 1)) {
|
|
||||||
tensors->resize(col + 1);
|
|
||||||
}
|
|
||||||
PADDLE_ENFORCE_GT(tensors->size(), static_cast<size_t>(col));
|
|
||||||
(*tensors)[col].Resize(input->dims());
|
|
||||||
(*tensors)[col].mutable_data<T>(platform::CPUPlace());
|
|
||||||
(*tensors)[col].CopyFrom<T>(*input, platform::CPUPlace(),
|
|
||||||
ctx.device_context());
|
|
||||||
// TODO(qijun): need to handle LodTensor later
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace operators
|
|
||||||
} // namespace paddle
|
|
Loading…
Reference in new issue