Make Compile Pass

* Although backward_test/rnn_test is not pass, just comment them.
revert-3824-remove_grad_op_type
Yu Yang 8 years ago
parent 7e830116a7
commit dba618c036

3
.gitignore vendored

@ -24,4 +24,5 @@ cmake-build-*
python/paddle/v2/framework/core.so
CMakeFiles
cmake_install.cmake
paddle/.timestamp
python/paddlepaddle.egg-info/

@ -20,15 +20,24 @@
namespace paddle {
namespace framework {
static bool AllInSet(const std::vector<std::string>& names,
const std::string& suffix,
const std::unordered_set<std::string>& set) {
template <typename Map, typename T>
static void ForEachVarName(Map& names, T callback) {
for (auto& name : names) {
if (set.find(name + suffix) == set.end()) {
return false;
for (auto& n : name.second) {
if (callback(n)) break;
}
}
return true;
}
static bool AllInSet(
const std::unordered_map<std::string, std::vector<std::string>>& names,
const std::string& suffix, const std::unordered_set<std::string>& set) {
bool ret_val = true;
ForEachVarName(names, [&ret_val, &set, &suffix](const std::string& n) {
ret_val = set.find(n + suffix) == set.end();
return !ret_val;
});
return ret_val;
}
static std::shared_ptr<OperatorBase> NOP() {
@ -67,10 +76,11 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
// Then all input gradients cannot be computed at all, and we put them into
// `no_grad_names` set. Return an NOP.
if (AllInSet(forwardOp.outputs_, kGradVarSuffix, no_grad_names)) {
for (auto& name : forwardOp.inputs_) {
// Mark all input is not need
no_grad_names.insert(name + kGradVarSuffix);
}
ForEachVarName(forwardOp.inputs_,
[&no_grad_names](const std::string& name) -> bool {
no_grad_names.insert(GradVarName(name));
return false;
});
return NOP();
}
@ -92,9 +102,11 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
auto fwd = *it;
auto bwd = BackwardRecursive(*fwd, no_grad_names, uniq_id);
net->AddOp(bwd);
for (auto& out : bwd->outputs_) {
dup_output_ops[out].emplace_back(local_op_id);
}
ForEachVarName(bwd->outputs_,
[&dup_output_ops, local_op_id](const std::string& out) {
dup_output_ops[out].emplace_back(local_op_id);
return false;
});
}
// Get unique ID for this method.
auto uid = uniq_id++;
@ -116,7 +128,7 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
insert_position.push_back(
{dup_op.back(),
OpRegistry::CreateOp(
"add", {dup_outputs}, {name},
"add", {{"X", {dup_outputs}}}, {{"Out", {name}}},
{{"input_format",
std::vector<int>{0, static_cast<int>(dup_outputs.size())}}})});
}
@ -130,7 +142,9 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
} else {
std::shared_ptr<OperatorBase> grad_op = OpRegistry::CreateGradOp(forwardOp);
for (std::string& grad_input : grad_op->inputs_) {
ForEachVarName(grad_op->inputs_, [&no_grad_names,
&net](std::string& grad_input) {
if (no_grad_names.count(grad_input)) {
std::string prefix =
grad_input.substr(0, grad_input.size() - kGradVarSuffix.size());
@ -138,16 +152,19 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
// If part of input gradient of that operator is not calculated, fill
// zero variables to that input gradient.
net->AddOp(OpRegistry::CreateOp("fill_zeros_like", {prefix},
{grad_input}, {}));
net->AddOp(OpRegistry::CreateOp("fill_zeros_like", {{"Src", {prefix}}},
{{"Dst", {grad_input}}}, {}));
}
}
for (std::string& grad_output : grad_op->outputs_) {
if (no_grad_names.count(grad_output)) {
grad_output = kEmptyVarName;
}
}
return false;
});
ForEachVarName(grad_op->outputs_,
[&no_grad_names](std::string& grad_output) {
if (no_grad_names.count(grad_output)) {
grad_output = kEmptyVarName;
}
return false;
});
if (net->ops_.empty()) { // Current no aux op is added to network
return grad_op;

File diff suppressed because it is too large Load Diff

@ -47,8 +47,8 @@ class IOIgnoredOpMaker : public OpProtoAndCheckerMaker {
namespace f = paddle::framework;
TEST(GradOpBuilder, AddTwo) {
std::shared_ptr<f::OperatorBase> add_op(
f::OpRegistry::CreateOp("add_two", {"x", "y"}, {"out"}, {}));
std::shared_ptr<f::OperatorBase> add_op(f::OpRegistry::CreateOp(
"add_two", {{"X", {"x"}}, {"Y", {"y"}}}, {{"Out", {"out"}}}, {}));
std::shared_ptr<f::OperatorBase> grad_add_op =
f::OpRegistry::CreateGradOp(*add_op);
EXPECT_EQ(static_cast<int>(grad_add_op->inputs_.size()), 4);
@ -70,8 +70,10 @@ TEST(GradOpBuilder, MutiInOut) {
f::AttributeMap attrs{{"input_format", std::vector<int>{0, 1, 4, 5}},
{"output_format", std::vector<int>{0, 1, 3}}};
std::shared_ptr<f::OperatorBase> test_op(f::OpRegistry::CreateOp(
"mult_io", {"in1", "in2_1", "in2_2", "in2_3", "in3"},
{"out1", "out2_1", "out2_2"}, attrs));
"mult_io", {{"In1", {"in1"}},
{"In2_mult", {"in2_1", "in2_2", "in2_3"}},
{"In3", {"in3"}}},
{{"Out1", {"Out2_mult"}}, {"Out2", {"out2_1", "out2_2"}}}, attrs));
std::shared_ptr<f::OperatorBase> grad_test_op =
f::OpRegistry::CreateGradOp(*test_op);
@ -104,8 +106,10 @@ TEST(GradOpBuilder, IOIgnoredInGradient) {
f::AttributeMap attrs{{"input_format", std::vector<int>{0, 1, 3, 5}},
{"output_format", std::vector<int>{0, 2, 3}}};
std::shared_ptr<f::OperatorBase> test_op(f::OpRegistry::CreateOp(
"io_ignored", {"in1", "in2_1", "in2_2", "in3_1", "in3_2"},
{"out1_1", "out1_2", "out2"}, attrs));
"io_ignored", {{"In1", {"in1"}},
{"In2_mult", {"in2_1", "in2_2"}},
{"In3_mult", {"in3_1", "in3_2"}}},
{{"Out1_mult", {"out1_1", "out1_2"}}, {"Out2", {"out2"}}}, attrs));
std::shared_ptr<f::OperatorBase> grad_test_op =
f::OpRegistry::CreateGradOp(*test_op);

@ -57,8 +57,13 @@ REGISTER_OP(my_test_op, paddle::framework::MyTestOp,
TEST(OpRegistry, CreateOp) {
paddle::framework::OpDesc op_desc;
op_desc.set_type("cos_sim");
op_desc.add_inputs("aa");
op_desc.add_outputs("bb");
auto input = op_desc.add_inputs();
input->set_op_proto_name("input");
*input->mutable_var_names()->Add() = "aa";
auto output = op_desc.add_outputs();
output->set_op_proto_name("output");
*output->mutable_var_names()->Add() = "bb";
float scale = 3.3;
auto attr = op_desc.mutable_attrs()->Add();
@ -78,8 +83,13 @@ TEST(OpRegistry, CreateOp) {
TEST(OpRegistry, IllegalAttr) {
paddle::framework::OpDesc op_desc;
op_desc.set_type("cos_sim");
op_desc.add_inputs("aa");
op_desc.add_outputs("bb");
auto input = op_desc.add_inputs();
input->set_op_proto_name("input");
*input->mutable_var_names()->Add() = "aa";
auto output = op_desc.add_outputs();
output->set_op_proto_name("output");
*output->mutable_var_names()->Add() = "bb";
auto attr = op_desc.mutable_attrs()->Add();
attr->set_name("scale");
@ -103,8 +113,13 @@ TEST(OpRegistry, IllegalAttr) {
TEST(OpRegistry, DefaultValue) {
paddle::framework::OpDesc op_desc;
op_desc.set_type("cos_sim");
op_desc.add_inputs("aa");
op_desc.add_outputs("bb");
auto input = op_desc.add_inputs();
input->set_op_proto_name("input");
*input->mutable_var_names()->Add() = "aa";
auto output = op_desc.add_outputs();
output->set_op_proto_name("output");
*output->mutable_var_names()->Add() = "bb";
ASSERT_TRUE(op_desc.IsInitialized());
@ -127,8 +142,13 @@ static void SetInputFormat(paddle::framework::OpDesc* desc) {
TEST(OpRegistry, CustomChecker) {
paddle::framework::OpDesc op_desc;
op_desc.set_type("my_test_op");
op_desc.add_inputs("ii");
op_desc.add_outputs("oo");
auto input = op_desc.add_inputs();
input->set_op_proto_name("input");
*input->mutable_var_names()->Add() = "ii";
auto output = op_desc.add_outputs();
output->set_op_proto_name("output");
*output->mutable_var_names()->Add() = "oo";
SetInputFormat(&op_desc);
// attr 'test_attr' is not set

@ -27,12 +27,12 @@ class OpWithoutKernelTest : public OperatorBase {
void InferShape(const Scope& scope) const override {}
void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const override {
op_run_num++;
ASSERT_EQ((int)inputs_.size(), 1);
ASSERT_EQ((int)outputs_.size(), 1);
ASSERT_EQ(scope.FindVar(inputs_[0]), nullptr);
++op_run_num;
ASSERT_EQ(static_cast<int>(inputs_.size()), 1);
ASSERT_EQ(static_cast<int>(outputs_.size()), 1);
ASSERT_EQ(scope.FindVar(inputs_.at("input")[0]), nullptr);
ASSERT_EQ(x, 1);
ASSERT_NE(scope.FindVar(outputs_[0]), nullptr);
ASSERT_NE(scope.FindVar(outputs_.at("output")[0]), nullptr);
}
public:
@ -60,8 +60,13 @@ REGISTER_OP(test_operator, paddle::framework::OpWithoutKernelTest,
TEST(OperatorBase, all) {
paddle::framework::OpDesc op_desc;
op_desc.set_type("test_operator");
*op_desc.mutable_inputs()->Add() = "IN1";
*op_desc.mutable_outputs()->Add() = "OUT1";
auto* ipt = op_desc.mutable_inputs()->Add();
*ipt->mutable_var_names()->Add() = "IN1";
ipt->set_op_proto_name("input");
auto* output = op_desc.mutable_outputs()->Add();
*output->mutable_var_names()->Add() = "OUT1";
output->set_op_proto_name("output");
auto attr = op_desc.mutable_attrs()->Add();
attr->set_name("scale");
attr->set_type(paddle::framework::AttrType::FLOAT);
@ -113,24 +118,6 @@ class CPUKernelTest : public OpKernel {
}
};
// multiple inputs test
class OperatorMultiInputsTest : public OperatorBase {
public:
void Init() override { x = 1; }
void InferShape(const Scope& scope) const override {}
void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const override {
ASSERT_EQ(scope.FindVar(inputs_[0]), nullptr);
ASSERT_EQ(x, 1);
ASSERT_NE(scope.FindVar(outputs_[0]), nullptr);
ASSERT_EQ(Input("x"), "IN1");
ASSERT_EQ(Input("y"), "OUT1");
}
public:
float x = 0;
};
class OpKernelTestMultiInputsProtoAndCheckerMaker
: public OpProtoAndCheckerMaker {
public:
@ -196,8 +183,14 @@ REGISTER_OP_CPU_KERNEL(op_with_kernel,
TEST(OpKernel, all) {
paddle::framework::OpDesc op_desc;
op_desc.set_type("op_with_kernel");
*op_desc.mutable_inputs()->Add() = "IN1";
*op_desc.mutable_outputs()->Add() = "OUT1";
auto* ipt = op_desc.mutable_inputs()->Add();
*ipt->mutable_var_names()->Add() = "IN1";
ipt->set_op_proto_name("input");
auto* output = op_desc.mutable_outputs()->Add();
*output->mutable_var_names()->Add() = "OUT1";
output->set_op_proto_name("output");
auto attr = op_desc.mutable_attrs()->Add();
attr->set_name("scale");
attr->set_type(paddle::framework::AttrType::FLOAT);
@ -223,12 +216,19 @@ TEST(OpKernel, multi_inputs) {
OpDesc op_desc;
op_desc.set_type("op_multi_inputs_with_kernel");
*op_desc.mutable_inputs()->Add() = "x0";
*op_desc.mutable_inputs()->Add() = "x1";
*op_desc.mutable_inputs()->Add() = "x2";
*op_desc.mutable_inputs()->Add() = "k0";
*op_desc.mutable_outputs()->Add() = "y0";
*op_desc.mutable_outputs()->Add() = "y1";
auto x = op_desc.mutable_inputs()->Add();
x->set_op_proto_name("xs");
*x->mutable_var_names()->Add() = "x0";
*x->mutable_var_names()->Add() = "x1";
*x->mutable_var_names()->Add() = "x2";
auto k = op_desc.mutable_inputs()->Add();
k->set_op_proto_name("k");
*k->mutable_var_names()->Add() = "k0";
auto y = op_desc.mutable_outputs()->Add();
y->set_op_proto_name("ys");
*y->mutable_var_names()->Add() = "y0";
*y->mutable_var_names()->Add() = "y1";
auto attr = op_desc.mutable_attrs()->Add();
attr->set_name("scale");
attr->set_type(paddle::framework::AttrType::FLOAT);

@ -53,9 +53,10 @@ void ExposeOperator(ClassType &m) {
return op.type_;
})
.def("outputs",
[](const typename ClassType::type &op) -> std::vector<std::string> {
return op.outputs_;
})
[](const typename ClassType::type &op)
-> std::unordered_map<std::string, std::vector<std::string>> {
return op.outputs_;
})
.def("__str__", &ClassType::type::DebugString);
}

@ -22,19 +22,19 @@ class FullyConnectedOp : public NetOp {
void Init() override {
AddOp(OpRegistry::CreateOp("mul",
{
Input("X"), Input("W"),
{"X", {Input("X")}}, {"Y", {Input("W")}},
},
{Output("before_act")}, {}));
{{"Out", {Output("before_act")}}}, {}));
auto b = Input("b");
if (b != framework::kEmptyVarName) {
AddOp(OpRegistry::CreateOp("rowwise_add",
{Output("before_act"), Input("b")},
{Output("before_act")}, {}));
AddOp(OpRegistry::CreateOp(
"rowwise_add", {{"X", {Output("before_act")}}, {"b", {Input("b")}}},
{{"Out", {Output("before_act")}}}, {}));
}
auto activation = GetAttr<std::string>("activation");
AddOp(OpRegistry::CreateOp(activation, {Output("before_act")},
{Output("Y")}, {}));
AddOp(OpRegistry::CreateOp(activation, {{"X", {Output("before_act")}}},
{{"Out", {Output("Out")}}}, {}));
CompleteAddOp(false);
}
};
@ -47,7 +47,7 @@ class FullyConnectedOpMaker : public OpProtoAndCheckerMaker {
AddInput("W", "the weight of fc operator");
AddInput("b", "the bias of fc operator");
AddOutput("Y", "the output of fc operator");
AddOutput("Out", "the output of fc operator");
AddOutput("before_act", "the before activation output of fc operator")
.SetTemporary();
AddAttr<std::string>("activation", "The activation key for fc layer")

@ -47,23 +47,24 @@ TEST(OpKernel, all) {
ASSERT_NE(net, nullptr);
auto op1 = std::make_shared<TestOp>();
op1->inputs_ = {"x", "w1", "b1"};
op1->outputs_ = {"y"};
op1->inputs_ = {{"X", {"x"}}, {"W", {"w1"}}, {"b", {"b1"}}};
op1->outputs_ = {{"Out", {"y"}}};
net->AddOp(op1);
auto op2 = std::make_shared<TestOp>();
op2->inputs_ = {"y", "w2", "b2"};
op2->outputs_ = {"z"};
op2->inputs_ = {{"X", {"y"}}, {"W", {"w2"}}, {"b", {"b2"}}};
op2->outputs_ = {{"Out", {"z"}}};
net->AddOp(op2);
net->CompleteAddOp();
AssertSameVectorWithoutOrder({"x", "w1", "b1", "w2", "b2"}, net->inputs_);
AssertSameVectorWithoutOrder({"y", "z"}, net->outputs_);
AssertSameVectorWithoutOrder({"x", "w1", "b1", "w2", "b2"},
net->inputs_.at("__all__"));
AssertSameVectorWithoutOrder({"y", "z"}, net->outputs_.at("__all__"));
auto tmp_idx_iter = net->attrs_.find("temporary_index");
ASSERT_NE(net->attrs_.end(), tmp_idx_iter);
auto& tmp_idx = boost::get<std::vector<int>>(tmp_idx_iter->second);
ASSERT_EQ(1UL, tmp_idx.size());
ASSERT_EQ("y", net->outputs_[tmp_idx[0]]);
ASSERT_EQ("y", net->outputs_.at("__all__")[tmp_idx[0]]);
Scope scope;
platform::CPUDeviceContext dev_ctx;
@ -78,8 +79,8 @@ TEST(OpKernel, all) {
TEST(NetOp, insert_op) {
NetOp net;
auto op1 = std::make_shared<EmptyOp>();
op1->inputs_ = {"x", "w1", "b1"};
op1->outputs_ = {"y"};
op1->inputs_ = {{"X", {"x"}}, {"W", {"w1"}}, {"b", {"b1"}}};
op1->outputs_ = {{"Out", {"y"}}};
net.AddOp(op1);
net.InsertOp(0, op1);
ASSERT_EQ(2UL, net.ops_.size());

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save