|
|
|
@ -34,41 +34,40 @@ protected:
|
|
|
|
|
virtual void TearDown() override {}
|
|
|
|
|
|
|
|
|
|
void CreateGlobalVariables() {
|
|
|
|
|
scope_ = std::make_shared<Scope>();
|
|
|
|
|
// create input, and init content
|
|
|
|
|
LOG(INFO) << "create global variable x";
|
|
|
|
|
for (auto inlink : std::vector<std::string>{"x", "x0", "x1", "h"}) {
|
|
|
|
|
Variable* x = scope_->NewVar(inlink);
|
|
|
|
|
Variable* x = scope_.NewVar(inlink);
|
|
|
|
|
DDim dims = make_ddim(std::vector<int>{
|
|
|
|
|
10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/});
|
|
|
|
|
x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace());
|
|
|
|
|
}
|
|
|
|
|
// create output alias just for test
|
|
|
|
|
for (auto inlink : std::vector<std::string>{"h@alias"}) {
|
|
|
|
|
Variable* x = scope_->NewVar(inlink);
|
|
|
|
|
Variable* x = scope_.NewVar(inlink);
|
|
|
|
|
DDim dims =
|
|
|
|
|
make_ddim(std::vector<int>{20 /*batch size*/, 30 /*input dim*/});
|
|
|
|
|
x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
LOG(INFO) << "create global variable w";
|
|
|
|
|
Variable* w = scope_->NewVar("rnn/w");
|
|
|
|
|
Variable* w = scope_.NewVar("rnn/w");
|
|
|
|
|
w->GetMutable<Tensor>()->mutable_data<float>(
|
|
|
|
|
make_ddim(std::vector<int>{30, 30}), platform::CPUPlace());
|
|
|
|
|
|
|
|
|
|
for (auto boot : std::vector<std::string>{"x_boot", "h_boot"}) {
|
|
|
|
|
LOG(INFO) << "create global variable " << boot;
|
|
|
|
|
Variable* h_boot = scope_->NewVar(boot);
|
|
|
|
|
Variable* h_boot = scope_.NewVar(boot);
|
|
|
|
|
h_boot->GetMutable<Tensor>()->mutable_data<float>(
|
|
|
|
|
make_ddim(std::vector<int>{20 /*batch size*/, 30 /*input dim*/}),
|
|
|
|
|
platform::CPUPlace());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
LOG(INFO) << "create variable step_scopes";
|
|
|
|
|
scope_->NewVar("step_scopes");
|
|
|
|
|
scope_.NewVar("step_scopes");
|
|
|
|
|
|
|
|
|
|
LOG(INFO) << "create variable h";
|
|
|
|
|
scope_->NewVar("h");
|
|
|
|
|
scope_.NewVar("h");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void CreateRNNOp() {
|
|
|
|
@ -150,7 +149,7 @@ protected:
|
|
|
|
|
|
|
|
|
|
void CreateStepNet() {
|
|
|
|
|
LOG(INFO) << "create variable step_net";
|
|
|
|
|
Variable* var = scope_->NewVar("step_net");
|
|
|
|
|
Variable* var = scope_.NewVar("step_net");
|
|
|
|
|
auto net = var->GetMutable<NetOp>();
|
|
|
|
|
// rnn/s is net's input or output?
|
|
|
|
|
net->inputs_ = {"rnn/h@pre", "rnn/w", "rnn/x"};
|
|
|
|
@ -164,7 +163,7 @@ protected:
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// father scope
|
|
|
|
|
std::shared_ptr<Scope> scope_;
|
|
|
|
|
Scope scope_;
|
|
|
|
|
std::shared_ptr<OperatorBase> rnn_op_;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
@ -191,66 +190,64 @@ protected:
|
|
|
|
|
virtual void TearDown() override {}
|
|
|
|
|
|
|
|
|
|
void CreateGlobalVariables() {
|
|
|
|
|
scope_ = std::make_shared<Scope>();
|
|
|
|
|
// inputs: x
|
|
|
|
|
LOG(INFO) << "create global variable x";
|
|
|
|
|
Variable* x = scope_->NewVar("x");
|
|
|
|
|
Variable* x = scope_.NewVar("x");
|
|
|
|
|
DDim dims =
|
|
|
|
|
make_ddim({10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/});
|
|
|
|
|
x->GetMutable<Tensor>()->mutable_data<float>(dims, platform::CPUPlace());
|
|
|
|
|
// inputs: h_boot
|
|
|
|
|
LOG(INFO) << "create global variable h_boot";
|
|
|
|
|
Variable* h_boot = scope_->NewVar("h_boot");
|
|
|
|
|
Variable* h_boot = scope_.NewVar("h_boot");
|
|
|
|
|
h_boot->GetMutable<Tensor>()->mutable_data<float>(
|
|
|
|
|
make_ddim({20 /*batch size*/, 30 /*input dim*/}), platform::CPUPlace());
|
|
|
|
|
// inputs: w
|
|
|
|
|
LOG(INFO) << "create global variable w";
|
|
|
|
|
Variable* w = scope_->NewVar("rnn/w");
|
|
|
|
|
Variable* w = scope_.NewVar("rnn/w");
|
|
|
|
|
w->GetMutable<Tensor>()->mutable_data<float>(make_ddim({30, 30}),
|
|
|
|
|
platform::CPUPlace());
|
|
|
|
|
// inputs: h_grad
|
|
|
|
|
LOG(INFO) << "create variable h_grad";
|
|
|
|
|
Variable* dh = scope_->NewVar("h_grad");
|
|
|
|
|
Variable* dh = scope_.NewVar("h_grad");
|
|
|
|
|
dh->GetMutable<Tensor>()->mutable_data<float>(make_ddim({10, 20, 30}),
|
|
|
|
|
platform::CPUPlace());
|
|
|
|
|
// inputs: step_scopes
|
|
|
|
|
LOG(INFO) << "create variable step_scopes";
|
|
|
|
|
scope_->NewVar("step_scopes");
|
|
|
|
|
scope_.NewVar("step_scopes");
|
|
|
|
|
// inputs: step_net
|
|
|
|
|
LOG(INFO) << "create variable step_net";
|
|
|
|
|
scope_->NewVar("step_net");
|
|
|
|
|
scope_.NewVar("step_net");
|
|
|
|
|
// outputs: w_grad
|
|
|
|
|
LOG(INFO) << "create global variable w_grad";
|
|
|
|
|
scope_->NewVar("rnn/w_grad");
|
|
|
|
|
scope_.NewVar("rnn/w_grad");
|
|
|
|
|
// outputs: x_grad
|
|
|
|
|
LOG(INFO) << "create global variable x_grad";
|
|
|
|
|
scope_->NewVar("x_grad");
|
|
|
|
|
scope_.NewVar("x_grad");
|
|
|
|
|
// outputs: h_boot_grad
|
|
|
|
|
LOG(INFO) << "create global variable h_boot_grad";
|
|
|
|
|
scope_->NewVar("h_boot_grad");
|
|
|
|
|
scope_.NewVar("h_boot_grad");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void CreateStepScopes() {
|
|
|
|
|
std::vector<std::shared_ptr<Scope>>* step_scopes =
|
|
|
|
|
scope_->FindVar("step_scopes")
|
|
|
|
|
->GetMutable<std::vector<std::shared_ptr<Scope>>>();
|
|
|
|
|
auto step_scopes =
|
|
|
|
|
scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
|
|
|
|
|
for (int i = 0; i < 10; ++i) {
|
|
|
|
|
auto scope = std::make_shared<Scope>(scope_);
|
|
|
|
|
auto pre_t = scope->NewVar("rnn/pre_h")->GetMutable<Tensor>();
|
|
|
|
|
pre_t->mutable_data<float>(make_ddim({20, 30}), platform::CPUPlace());
|
|
|
|
|
auto tensor = scope->NewVar("rnn/h")->GetMutable<Tensor>();
|
|
|
|
|
tensor->mutable_data<float>(make_ddim({20, 30}), platform::CPUPlace());
|
|
|
|
|
auto& scope = scope_.NewScope();
|
|
|
|
|
auto pre_t = scope.NewVar("rnn/pre_h")->GetMutable<Tensor>();
|
|
|
|
|
pre_t->mutable_data<float>({20, 30}, platform::CPUPlace());
|
|
|
|
|
auto tensor = scope.NewVar("rnn/h")->GetMutable<Tensor>();
|
|
|
|
|
tensor->mutable_data<float>({20, 30}, platform::CPUPlace());
|
|
|
|
|
|
|
|
|
|
// for unit test of ConcatOutputs
|
|
|
|
|
auto xg = scope->NewVar("rnn/x_grad")->GetMutable<Tensor>();
|
|
|
|
|
xg->mutable_data<float>(make_ddim({20, 30}), platform::CPUPlace());
|
|
|
|
|
auto xg = scope.NewVar("rnn/x_grad")->GetMutable<Tensor>();
|
|
|
|
|
xg->mutable_data<float>({20, 30}, platform::CPUPlace());
|
|
|
|
|
|
|
|
|
|
step_scopes->push_back(scope);
|
|
|
|
|
step_scopes->emplace_back(&scope);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// last time step
|
|
|
|
|
auto g = (*step_scopes)[9]->NewVar("rnn/h_pre_grad")->GetMutable<Tensor>();
|
|
|
|
|
g->mutable_data<float>(make_ddim({20, 30}), platform::CPUPlace());
|
|
|
|
|
g->mutable_data<float>({20, 30}, platform::CPUPlace());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void CreateRNNGradientAlgorithm() {
|
|
|
|
@ -278,7 +275,7 @@ protected:
|
|
|
|
|
|
|
|
|
|
void CreateStepNet() {
|
|
|
|
|
LOG(INFO) << "create variable step_net";
|
|
|
|
|
Variable* var = scope_->NewVar("step_net");
|
|
|
|
|
Variable* var = scope_.NewVar("step_net");
|
|
|
|
|
auto net = var->GetMutable<NetOp>();
|
|
|
|
|
net->AddOp(OpRegistry::CreateOp("mul",
|
|
|
|
|
{"rnn/h_pre", "rnn/w", "rnn/s_grad"},
|
|
|
|
@ -298,9 +295,8 @@ protected:
|
|
|
|
|
rnn::Link inlink;
|
|
|
|
|
inlink.external = "x";
|
|
|
|
|
inlink.internal = "rnn/x";
|
|
|
|
|
std::vector<std::shared_ptr<Scope>>* step_scopes =
|
|
|
|
|
scope_->FindVar("step_scopes")
|
|
|
|
|
->GetMutable<std::vector<std::shared_ptr<Scope>>>();
|
|
|
|
|
auto step_scopes =
|
|
|
|
|
scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
|
|
|
|
|
rnn::SegmentInputs(*step_scopes, std::vector<rnn::Link>{inlink}, 10);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -312,15 +308,14 @@ protected:
|
|
|
|
|
mem_attr.boot_var = "boot_h";
|
|
|
|
|
std::vector<rnn::MemoryAttr> memories;
|
|
|
|
|
memories.push_back(mem_attr);
|
|
|
|
|
std::vector<std::shared_ptr<Scope>>* step_scopes =
|
|
|
|
|
scope_->FindVar("step_scopes")
|
|
|
|
|
->GetMutable<std::vector<std::shared_ptr<Scope>>>();
|
|
|
|
|
auto step_scopes =
|
|
|
|
|
scope_.FindVar("step_scopes")->GetMutable<std::vector<Scope*>>();
|
|
|
|
|
for (int i = 1; i < 10; ++i) {
|
|
|
|
|
rnn::LinkMemories(*step_scopes, memories, i, -1);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::shared_ptr<Scope> scope_;
|
|
|
|
|
Scope scope_;
|
|
|
|
|
RecurrentGradientAlgorithm rnn_grad_algo_;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
@ -339,14 +334,14 @@ TEST(RecurrentOp, LinkMemories) {
|
|
|
|
|
|
|
|
|
|
// create and init step scopes
|
|
|
|
|
int len = 10;
|
|
|
|
|
std::vector<std::shared_ptr<Scope>> step_scopes;
|
|
|
|
|
std::vector<Scope*> step_scopes;
|
|
|
|
|
for (int i = 0; i < len; ++i) {
|
|
|
|
|
auto scope = std::make_shared<Scope>();
|
|
|
|
|
auto scope = new Scope();
|
|
|
|
|
scope->NewVar("pre_h");
|
|
|
|
|
auto tensor = scope->NewVar("h")->GetMutable<Tensor>();
|
|
|
|
|
float* data = tensor->mutable_data<float>(make_ddim({15, 20}), CPUPlace());
|
|
|
|
|
for (int i = 0; i < 15 * 20; ++i) {
|
|
|
|
|
data[i] = rand() * (1. / (double)RAND_MAX);
|
|
|
|
|
float* data = tensor->mutable_data<float>({15, 20}, CPUPlace());
|
|
|
|
|
for (int j = 0; j < 15 * 20; ++j) {
|
|
|
|
|
data[j] = rand() * (1. / (double)RAND_MAX);
|
|
|
|
|
}
|
|
|
|
|
step_scopes.push_back(scope);
|
|
|
|
|
}
|
|
|
|
@ -388,7 +383,17 @@ TEST(RecurrentOp, LinkMemories) {
|
|
|
|
|
ASSERT_FLOAT_EQ(a[i], b[i]);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (auto s : step_scopes) {
|
|
|
|
|
delete s;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
USE_OP(add_two);
|
|
|
|
|
USE_OP(mul);
|
|
|
|
|
|
|
|
|
|
// int main() {
|
|
|
|
|
// //! TODO(yuyang18): Temporary disable this unit-test because implementation
|
|
|
|
|
// //! error.
|
|
|
|
|
// return 0;
|
|
|
|
|
//}
|