RecurrentOp implementation (#2890)
* add rnn op interfaces * add Run * rename state -> memory * change state -> memory * make compilable * add .cc * init test * add op fake implementation * add CreateStepNet and CreateScopes implementation. * add TODO list * init memory attributes. * add LinkMemories * add PlainNet fake implementation * Use std::shared_ptr<Scope> in the OpRunContext. * add test * disable mutable_data * finist segmentInput function * enable mutable_data with a trick * RNNOp test. * enable LinkMemories with mutable_data * update SegmentInput function with comments * finish ConcatOutput function * reformat inputs and attributes boot_memories * Refine unit test. * Refine unit test. * modify inlinks. * add OpDesc to Net * fix bug and update unit test. * move step scopes from inputs to outputs * fix merge conflict, update SegmentInput function * add RecurrentOpProtoAndCheckerMaker. * clean the codes * Abstract GetStepScopes and GetMaxSeqLen function * refine LinkMemories * Refine code and add some comments. * add backward core * update for develop branch. * add forward core * add forward algorithm * Add RecurrentGradientAlgorithm implenmention. * use CopyFrom and Slice function in RecurrentOp * add unit test for LinkMemories. * fix unit test. * use the latest tensor.h, solve conflict * add maker * move SegmentInput and ConcatOutput to details nameplace * unit test for RecurrentGradientAlgorithm. * apply OperatorBase * apply net operator. * move memorys to attributes * add RecurrentGradientOp * open test unit test in recurrent_network_op_test. * revert some files. * add RecurrentArgument and Link struct to simplify member variable. * rename. * move recurrent_op from framework to operators * add RecurrentGradientOp Init * fix name * fix Link.interal/external name * use namespace operators instead of framework * clean the code * use the latest add_op and mul_op, don't test backward now * Remove ScopePtr and OperatorPtr * add get_net to pybind * add test_recurrent_op.py * add random into gen_tensor * update to develop branch and refine some code. * add some comments.cblas_new
parent
ca8275d0e3
commit
aee0d3ec5f
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,216 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "paddle/framework/operator.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
using namespace paddle::framework;
|
||||
|
||||
namespace rnn {
|
||||
|
||||
/**
|
||||
* Memory of a RNN (same as the role of `Momory` in PaddlePaddle).
|
||||
*
|
||||
* Memory attributes cached by this op, dims will be infered from
|
||||
* boot memories in father scope. Other attributes are copied from Op's proto
|
||||
* attributes.
|
||||
*/
|
||||
struct MemoryAttr {
|
||||
// name of current state variable
|
||||
std::string var;
|
||||
// name of previous step's state variable
|
||||
std::string pre_var;
|
||||
// name of the variables to init this memory (same role of `boot_layer` in
|
||||
// PaddlePaddle), which is store in father's scope.
|
||||
std::string boot_var;
|
||||
};
|
||||
|
||||
struct Link {
|
||||
// input or output links name.
|
||||
std::string internal;
|
||||
// alias to avoid duplicate keys in scopes.
|
||||
std::string external;
|
||||
};
|
||||
|
||||
struct Argument {
|
||||
std::string step_net;
|
||||
std::string step_scopes;
|
||||
std::vector<Link> inlinks;
|
||||
std::vector<Link> outlinks;
|
||||
std::vector<rnn::MemoryAttr> memories;
|
||||
};
|
||||
|
||||
struct ArgumentName {
|
||||
std::string step_net;
|
||||
std::string step_scopes;
|
||||
std::string inlinks;
|
||||
std::string outlinks;
|
||||
std::string inlink_alias; // the alias of inlinks in step net.
|
||||
std::string outlink_alias; // the alias of outlinks in step net.
|
||||
std::string memories; // the memory name
|
||||
std::string pre_memories; // the previous memory name
|
||||
std::string boot_memories; // the boot memory name
|
||||
};
|
||||
|
||||
/**
|
||||
* Prepare inputs for each step net.
|
||||
*/
|
||||
void SegmentInputs(std::vector<std::shared_ptr<Scope>>& step_scopes,
|
||||
const std::vector<Link>& inlinks,
|
||||
const size_t seq_len);
|
||||
|
||||
/**
|
||||
* Process outputs of step nets and merge to variables.
|
||||
*/
|
||||
void ConcatOutputs(std::vector<std::shared_ptr<Scope>>& step_scopes,
|
||||
const std::vector<Link>& outlinks,
|
||||
const size_t seq_len);
|
||||
|
||||
void LinkMemories(std::vector<std::shared_ptr<Scope>>& step_scopes,
|
||||
const std::vector<MemoryAttr>& memories,
|
||||
size_t step_id,
|
||||
int offset);
|
||||
|
||||
void InitArgument(const ArgumentName& name, Argument* arg);
|
||||
|
||||
}; // namespace rnn
|
||||
|
||||
// The sequence format in RecurrentOp is Tensor<seq_len, batch_size, dim> now.
|
||||
// TODO:
|
||||
// 1. No-padding computing for sequences with indifinite length in one batch.
|
||||
// 2. Hierarchical RNN for sequence with sub-sequence.
|
||||
// 3. Internal Memory.
|
||||
// 4. More Complex RNN architecture, such as Gated Feedback RNN.
|
||||
// Refer to: https://arxiv.org/pdf/1502.02367.pdf
|
||||
|
||||
class RecurrentAlgorithm {
|
||||
public:
|
||||
void Run(const std::shared_ptr<Scope>& scope,
|
||||
const platform::DeviceContext& dev_ctx) const;
|
||||
|
||||
void Init(std::unique_ptr<rnn::Argument> arg) { arg_ = std::move(arg); }
|
||||
|
||||
/**
|
||||
* InferShape must be called before Run.
|
||||
*/
|
||||
void InferShape(const std::shared_ptr<Scope>& scope) const;
|
||||
|
||||
protected:
|
||||
/*
|
||||
* The step scopes will be stored in the father scope as a variable.
|
||||
*
|
||||
* NOTE the scopes are reused in both the forward and backward, so just
|
||||
* create once and expand its size if more steps need.
|
||||
*/
|
||||
void CreateScopes(std::shared_ptr<Scope> scope) const;
|
||||
|
||||
inline const std::vector<std::shared_ptr<Scope>>& GetStepScopes(
|
||||
std::shared_ptr<Scope> scope) const {
|
||||
return *(scope->GetVariable(arg_->step_scopes))
|
||||
->GetMutable<std::vector<std::shared_ptr<Scope>>>();
|
||||
}
|
||||
|
||||
void InitMemories(std::shared_ptr<Scope> step_scopes) const;
|
||||
|
||||
private:
|
||||
std::unique_ptr<rnn::Argument> arg_;
|
||||
mutable size_t seq_len_;
|
||||
};
|
||||
|
||||
class RecurrentGradientAlgorithm {
|
||||
/**
|
||||
* RNN's backward alogorithm.
|
||||
*
|
||||
* To accelerate the development of RecurrentGradientOp, we decouple RNN's
|
||||
* algorithm and `OperatorBase`'s implementation, the former contains the core
|
||||
* implementation of a RNN, and will keep stable even if the framework changes
|
||||
* a
|
||||
* lot, and the latter is a wrapper acts like an dapter for it to make RNN an
|
||||
* operator.
|
||||
*/
|
||||
public:
|
||||
void Init(std::unique_ptr<rnn::Argument> arg) { arg_ = std::move(arg); }
|
||||
|
||||
void Run(const std::shared_ptr<Scope>& scope,
|
||||
const platform::DeviceContext& dev_ctx) const;
|
||||
|
||||
void LinkBootMemoryGradients(std::shared_ptr<Scope> step_scopes) const;
|
||||
|
||||
/**
|
||||
* InferShape must be called before Run.
|
||||
*/
|
||||
void InferShape(const std::shared_ptr<Scope>& scope) const;
|
||||
|
||||
protected:
|
||||
inline const std::vector<std::shared_ptr<Scope>>& GetStepScopes(
|
||||
std::shared_ptr<Scope> scope) const {
|
||||
return *(scope->GetVariable(arg_->step_scopes))
|
||||
->GetMutable<std::vector<std::shared_ptr<Scope>>>();
|
||||
}
|
||||
|
||||
private:
|
||||
std::unique_ptr<rnn::Argument> arg_;
|
||||
mutable size_t seq_len_;
|
||||
};
|
||||
|
||||
class RecurrentOp final : public OperatorBase {
|
||||
public:
|
||||
void Init() override;
|
||||
|
||||
/**
|
||||
* InferShape must be called before Run.
|
||||
*/
|
||||
virtual void InferShape(const std::shared_ptr<Scope>& scope) const override {
|
||||
alg_.InferShape(scope);
|
||||
}
|
||||
|
||||
virtual void Run(const std::shared_ptr<Scope>& scope,
|
||||
const platform::DeviceContext& dev_ctx) const override {
|
||||
alg_.Run(scope, dev_ctx);
|
||||
}
|
||||
|
||||
static const rnn::ArgumentName kArgName;
|
||||
|
||||
private:
|
||||
RecurrentAlgorithm alg_;
|
||||
};
|
||||
|
||||
class RecurrentGradientOp final : public OperatorBase {
|
||||
public:
|
||||
void Init() override;
|
||||
|
||||
/**
|
||||
* InferShape must be called before Run.
|
||||
*/
|
||||
virtual void InferShape(const std::shared_ptr<Scope>& scope) const override {
|
||||
alg_.InferShape(scope);
|
||||
}
|
||||
|
||||
virtual void Run(const std::shared_ptr<Scope>& scope,
|
||||
const platform::DeviceContext& dev_ctx) const override {
|
||||
alg_.Run(scope, dev_ctx);
|
||||
}
|
||||
|
||||
static const rnn::ArgumentName kArgName;
|
||||
|
||||
private:
|
||||
RecurrentGradientAlgorithm alg_;
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
File diff suppressed because it is too large
Load Diff
@ -1,2 +1,2 @@
|
||||
cc_library(paddle_pybind SHARED SRCS pybind.cc DEPS pybind python
|
||||
add_op fc_op sgd_op cross_entropy_op)
|
||||
add_op fc_op sgd_op cross_entropy_op recurrent_network_op)
|
||||
|
@ -0,0 +1,92 @@
|
||||
import paddle.v2.framework.core as core
|
||||
import unittest
|
||||
import numpy as np
|
||||
import paddle.v2.framework.create_op_creation_methods as creation
|
||||
|
||||
ops = creation.op_creations
|
||||
|
||||
|
||||
def create_tensor(scope, name, shape):
|
||||
tensor = scope.create_var(name).get_tensor()
|
||||
tensor.set_dims(shape)
|
||||
tensor.alloc_float()
|
||||
tensor.set(np.random.random(shape))
|
||||
return tensor
|
||||
|
||||
|
||||
class TestRNN(unittest.TestCase):
|
||||
'''
|
||||
Test RNNOp
|
||||
|
||||
equation:
|
||||
h_t = \sigma (W x_t + U h_{t-1})
|
||||
weights:
|
||||
- W
|
||||
- U
|
||||
vars:
|
||||
- x
|
||||
memories:
|
||||
- h
|
||||
outputs:
|
||||
- h
|
||||
'''
|
||||
|
||||
def init(self):
|
||||
input_dim = 30
|
||||
batch_size = 50
|
||||
weight_dim = 15
|
||||
|
||||
self.scope = core.Scope(None)
|
||||
|
||||
# create vars
|
||||
create_tensor(self.scope, "x", [batch_size, input_dim])
|
||||
create_tensor(self.scope, "W", [input_dim, weight_dim])
|
||||
create_tensor(self.scope, "U", [weight_dim, weight_dim])
|
||||
create_tensor(self.scope, "h_boot", [batch_size, weight_dim])
|
||||
|
||||
x_alias = "x@alias"
|
||||
y_alias = "y@alias"
|
||||
memory = "h@alias"
|
||||
prememory = "h@pre"
|
||||
output = "rnn_out"
|
||||
output_alias = "rnn_out@alias"
|
||||
|
||||
# create step net
|
||||
stepnet_var = self.scope.create_var("stepnet")
|
||||
stepnet = stepnet_var.get_net()
|
||||
# stepnet = core.Net.create()
|
||||
x_fc_op = ops.fc(X=x_alias, W="W", Y="Wx")
|
||||
h_fc_op = ops.fc(X=prememory, W="U", Y="Uh")
|
||||
sum_op = ops.add_two(X="Wx", Y="Uh", Out="sum")
|
||||
sig_op = ops.sigmoid(X="sum", Y=memory)
|
||||
stepnet.add_op(x_fc_op)
|
||||
stepnet.add_op(h_fc_op)
|
||||
stepnet.add_op(sum_op)
|
||||
stepnet.add_op(sig_op)
|
||||
stepnet.complete_add_op(True)
|
||||
|
||||
# create RNNOp
|
||||
rnnop = ops.recurrent_op(
|
||||
# inputs
|
||||
inlinks=["x"],
|
||||
boot_memories=["h_boot"],
|
||||
step_net="stepnet",
|
||||
# outputs
|
||||
outlinks=[output],
|
||||
step_scopes="step_scopes",
|
||||
# attributes
|
||||
inlink_alias=["x@alias"],
|
||||
outlink_alias=[output_alias],
|
||||
pre_memories=[prememory],
|
||||
memories=[memory])
|
||||
|
||||
ctx = core.DeviceContext.cpu_context()
|
||||
rnnop.infer_shape(self.scope)
|
||||
rnnop.run(self.scope, ctx)
|
||||
|
||||
def test_recurrent(self):
|
||||
self.init()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
Loading…
Reference in new issue