parent
03a38b3d55
commit
b943874f16
@ -0,0 +1,157 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/operators/rnn/recurrent_op_utils.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
namespace rnn {
|
||||
|
||||
namespace fmw = paddle::framework;
|
||||
|
||||
void SegmentInputs(const std::vector<Scope*>& step_scopes,
|
||||
const std::vector<Link>& inlinks, const size_t seq_len,
|
||||
bool infer_shape_mode) {
|
||||
PADDLE_ENFORCE(!inlinks.empty(), "no in links are provided.");
|
||||
for (size_t i = 0; i < inlinks.size(); ++i) {
|
||||
auto input_var = step_scopes[0]->FindVar(inlinks[i].external);
|
||||
PADDLE_ENFORCE(input_var != nullptr, "input link [%s] is not in scope.",
|
||||
inlinks[i].external);
|
||||
|
||||
Tensor* input = input_var->GetMutable<Tensor>();
|
||||
fmw::DDim dims = input->dims();
|
||||
PADDLE_ENFORCE(static_cast<size_t>(dims[0]) == seq_len,
|
||||
"all the inlinks must have same length");
|
||||
fmw::DDim step_dims = slice_ddim(dims, 1, dims.size());
|
||||
for (size_t j = 0; j < seq_len; j++) {
|
||||
Tensor* step_input =
|
||||
step_scopes[j]->NewVar(inlinks[i].internal)->GetMutable<Tensor>();
|
||||
if (!infer_shape_mode) {
|
||||
*step_input = input->Slice<float>(j, j + 1);
|
||||
}
|
||||
step_input->Resize(step_dims);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ConcatOutputs(const std::vector<Scope*>& step_scopes,
|
||||
const std::vector<Link>& outlinks, const size_t seq_len,
|
||||
bool infer_shape_mode) {
|
||||
for (size_t i = 0; i < outlinks.size(); i++) {
|
||||
auto output_var = step_scopes[0]->FindVar(outlinks[i].external);
|
||||
PADDLE_ENFORCE(output_var != nullptr, "output link [%s] is not in scope.",
|
||||
outlinks[i].external);
|
||||
Tensor* output = output_var->GetMutable<Tensor>();
|
||||
if (infer_shape_mode) {
|
||||
fmw::DDim step_dims = step_scopes[0]
|
||||
->FindVar(outlinks[i].internal)
|
||||
->GetMutable<Tensor>()
|
||||
->dims();
|
||||
std::vector<int> dims_vec = vectorize(step_dims);
|
||||
dims_vec.insert(dims_vec.begin(), seq_len);
|
||||
output->Resize(fmw::make_ddim(dims_vec));
|
||||
} else {
|
||||
output->mutable_data<float>(platform::CPUPlace());
|
||||
for (size_t j = 0; j < seq_len; j++) {
|
||||
Tensor* step_output =
|
||||
step_scopes[j]->FindVar(outlinks[i].internal)->GetMutable<Tensor>();
|
||||
// TODO(luotao02) data type and platform::DeviceContext() should set
|
||||
// correctly
|
||||
(output->Slice<float>(j, j + 1))
|
||||
.CopyFrom<float>(*step_output, platform::CPUPlace());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void LinkMemories(const std::vector<Scope*>& scopes,
|
||||
const std::vector<rnn::MemoryAttr>& memories,
|
||||
const size_t step_id, const int offset,
|
||||
bool infer_shape_mode) {
|
||||
PADDLE_ENFORCE(step_id < scopes.size(),
|
||||
"step [%d] is out of range of step scopes' size [%d]", step_id,
|
||||
scopes.size());
|
||||
PADDLE_ENFORCE(static_cast<int>(step_id) + offset >= 0,
|
||||
"offset [%d] must be large than -[%d]", offset, step_id);
|
||||
PADDLE_ENFORCE(step_id + offset < scopes.size(),
|
||||
"offset [%d] is out of range, it must be less than (%d - %d)",
|
||||
offset, scopes.size(), step_id);
|
||||
auto scope = scopes[step_id];
|
||||
auto linked_scope = scopes[step_id + offset];
|
||||
for (auto& attr : memories) {
|
||||
auto mem = scope->FindVar(attr.pre_var)->GetMutable<Tensor>();
|
||||
auto linked_mem = linked_scope->FindVar(attr.var)->GetMutable<Tensor>();
|
||||
if (infer_shape_mode) {
|
||||
mem->Resize(linked_mem->dims());
|
||||
} else {
|
||||
mem->ShareDataWith<float>(*linked_mem);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void InitArgument(const ArgumentName& name, Argument* arg,
|
||||
const OperatorBase& op) {
|
||||
arg->step_net = op.Input(name.step_net);
|
||||
arg->step_scopes = op.Output(name.step_scopes);
|
||||
|
||||
auto inlinks = op.Inputs(name.inlinks);
|
||||
auto inlink_alias = op.GetAttr<std::vector<std::string>>(name.inlink_alias);
|
||||
PADDLE_ENFORCE(inlinks.size() == inlink_alias.size(),
|
||||
"the size of inlinks and inlink_alias don't match:%d,%d",
|
||||
inlinks.size(), inlink_alias.size());
|
||||
for (size_t i = 0; i < inlinks.size(); ++i) {
|
||||
rnn::Link link;
|
||||
link.external = inlinks[i];
|
||||
link.internal = inlink_alias[i];
|
||||
(arg->inlinks).push_back(link);
|
||||
}
|
||||
|
||||
auto outlinks = op.Outputs(name.outlinks);
|
||||
auto outlink_alias = op.GetAttr<std::vector<std::string>>(name.outlink_alias);
|
||||
PADDLE_ENFORCE(outlinks.size() == outlink_alias.size(),
|
||||
"the size of outlinks and outlink_alias don't match:%d,%d",
|
||||
outlinks.size(), outlink_alias.size());
|
||||
for (size_t i = 0; i < outlinks.size(); ++i) {
|
||||
rnn::Link link;
|
||||
link.external = outlinks[i];
|
||||
link.internal = outlink_alias[i];
|
||||
(arg->outlinks).push_back(link);
|
||||
}
|
||||
|
||||
auto boot_memories = op.Inputs(name.boot_memories);
|
||||
|
||||
// attributes
|
||||
auto memories = op.GetAttr<std::vector<std::string>>(name.memories);
|
||||
auto pre_memories = op.GetAttr<std::vector<std::string>>(name.pre_memories);
|
||||
|
||||
PADDLE_ENFORCE(memories.size() == boot_memories.size(),
|
||||
"the size of memories, boot_memories don't match:%d,%d",
|
||||
memories.size(), boot_memories.size());
|
||||
PADDLE_ENFORCE(pre_memories.size() == boot_memories.size(),
|
||||
"the size of pre_memories, boot_memories don't match:%d,%d",
|
||||
pre_memories.size(), boot_memories.size());
|
||||
PADDLE_ENFORCE(memories.size() > 0, "more than 1 memories should be set");
|
||||
|
||||
for (size_t i = 0; i < memories.size(); ++i) {
|
||||
rnn::MemoryAttr mem_attr;
|
||||
mem_attr.var = memories[i];
|
||||
mem_attr.pre_var = pre_memories[i];
|
||||
mem_attr.boot_var = boot_memories[i];
|
||||
(arg->memories).push_back(mem_attr);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace rnn
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
@ -0,0 +1,93 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "paddle/framework/operator.h"
|
||||
#include "paddle/operators/type_alias.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
namespace rnn {
|
||||
|
||||
/**
|
||||
* Memory of a RNN (same as the role of `Momory` in PaddlePaddle).
|
||||
*
|
||||
* Memory attributes cached by this op, dims will be infered from
|
||||
* boot memories in father scope. Other attributes are copied from Op's proto
|
||||
* attributes.
|
||||
*/
|
||||
struct MemoryAttr {
|
||||
// name of current state variable
|
||||
std::string var;
|
||||
// name of previous step's state variable
|
||||
std::string pre_var;
|
||||
// name of the variables to init this memory (same role of `boot_layer` in
|
||||
// PaddlePaddle), which is store in father's scope.
|
||||
std::string boot_var;
|
||||
};
|
||||
|
||||
struct Link {
|
||||
// input or output links name.
|
||||
std::string internal;
|
||||
// alias to avoid duplicate keys in scopes.
|
||||
std::string external;
|
||||
};
|
||||
|
||||
struct Argument {
|
||||
std::string step_net;
|
||||
std::string step_scopes;
|
||||
std::vector<Link> inlinks;
|
||||
std::vector<Link> outlinks;
|
||||
std::vector<rnn::MemoryAttr> memories;
|
||||
};
|
||||
|
||||
struct ArgumentName {
|
||||
std::string step_net;
|
||||
std::string step_scopes;
|
||||
std::string inlinks;
|
||||
std::string outlinks;
|
||||
std::string inlink_alias; // the alias of inlinks in step net.
|
||||
std::string outlink_alias; // the alias of outlinks in step net.
|
||||
std::string memories; // the memory name
|
||||
std::string pre_memories; // the previous memory name
|
||||
std::string boot_memories; // the boot memory name
|
||||
};
|
||||
|
||||
/**
|
||||
* Prepare inputs for each step net.
|
||||
*/
|
||||
void SegmentInputs(const std::vector<Scope*>& step_scopes,
|
||||
const std::vector<Link>& inlinks, const size_t seq_len,
|
||||
bool infer_shape_mode);
|
||||
|
||||
/**
|
||||
* Process outputs of step nets and merge to variables.
|
||||
*/
|
||||
void ConcatOutputs(const std::vector<Scope*>& step_scopes,
|
||||
const std::vector<Link>& outlinks, const size_t seq_len,
|
||||
bool infer_shape_mode);
|
||||
|
||||
void LinkMemories(const std::vector<Scope*>& step_scopes,
|
||||
const std::vector<MemoryAttr>& memories, const size_t step_id,
|
||||
const int offset, bool infer_shape_mode);
|
||||
|
||||
void InitArgument(const ArgumentName& name, Argument* arg,
|
||||
const OperatorBase& op);
|
||||
|
||||
} // namespace rnn
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
Loading…
Reference in new issue