Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into cmake_speed
commit
884ce5d5a2
@ -0,0 +1,110 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/operators/beam_search_decode_op.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
class BeamSearchDecodeOp : public framework::OperatorBase {
|
||||
public:
|
||||
BeamSearchDecodeOp(const std::string& type,
|
||||
const framework::VariableNameMap& inputs,
|
||||
const framework::VariableNameMap& outputs,
|
||||
const framework::AttributeMap& attrs)
|
||||
: OperatorBase(type, inputs, outputs, attrs) {}
|
||||
void Run(const framework::Scope& scope,
|
||||
const platform::DeviceContext& dev_ctx) const override {
|
||||
framework::ExecutionContext ctx(*this, scope, dev_ctx);
|
||||
const LoDTensorArray* ids = ctx.Input<LoDTensorArray>("Ids");
|
||||
const LoDTensorArray* scores = ctx.Input<LoDTensorArray>("Scores");
|
||||
const size_t step_num = ids->size();
|
||||
PADDLE_ENFORCE_GT(step_num, 0UL,
|
||||
"beam search steps should be larger than 0");
|
||||
const size_t source_num = ids->at(0).lod().at(0).size() - 1;
|
||||
PADDLE_ENFORCE_GT(source_num, 0UL, "source num should be larger than 0");
|
||||
|
||||
for (size_t i = 0; i < step_num; ++i) {
|
||||
PADDLE_ENFORCE_EQ(ids->at(i).lod().size(), 2UL,
|
||||
"Level of LodTensor should be 2");
|
||||
}
|
||||
|
||||
// prepare output
|
||||
LoDTensor* sentenceIds = ctx.Output<LoDTensor>("SentenceIds");
|
||||
LoDTensor* sentenceScores = ctx.Output<LoDTensor>("SentenceScores");
|
||||
|
||||
BeamSearchDecoder<float> beam_search_decoder;
|
||||
beam_search_decoder.PackAllSteps(*ids, *scores, sentenceIds,
|
||||
sentenceScores);
|
||||
}
|
||||
};
|
||||
|
||||
class BeamSearchDecodeOpProtoMaker : public framework::OpProtoAndCheckerMaker {
|
||||
public:
|
||||
BeamSearchDecodeOpProtoMaker(framework::OpProto* proto,
|
||||
framework::OpAttrChecker* op_checker)
|
||||
: OpProtoAndCheckerMaker(proto, op_checker) {
|
||||
AddInput("Ids",
|
||||
"(LodTensorArray)"
|
||||
"score of the candidate words in each step");
|
||||
AddInput("Scores",
|
||||
"(LodTensorArray)"
|
||||
"score of the candidate words in each step");
|
||||
AddOutput("SentenceIds",
|
||||
"(LodTensor)"
|
||||
"All possible result sentences of word ids");
|
||||
AddOutput("SentenceScores",
|
||||
"(LodTensor)"
|
||||
"All possible result sentences of word scores");
|
||||
AddComment(R"DOC(
|
||||
Pack the result of Beam search op into SentenceIds and SentenceScores.
|
||||
)DOC");
|
||||
}
|
||||
};
|
||||
|
||||
class BeamSearchDecodeInferShape : public framework::InferShapeBase {
|
||||
public:
|
||||
void operator()(framework::InferShapeContext* context) const override {
|
||||
PADDLE_ENFORCE(context->HasInput("Ids"),
|
||||
"BeamSearchDecodeOp must has input Ids");
|
||||
PADDLE_ENFORCE(context->HasInput("Scores"),
|
||||
"BeamSearchDecodeOp must has input Scores");
|
||||
PADDLE_ENFORCE(context->HasOutput("SentenceIds"),
|
||||
"BeamSearchDecodeOp must has output SentenceIds");
|
||||
PADDLE_ENFORCE(context->HasOutput("SentenceScores"),
|
||||
"BeamSearchDecodeOp must has output SentenceScores");
|
||||
}
|
||||
};
|
||||
|
||||
class BeamSearchDecodeInferVarType : public framework::VarTypeInference {
|
||||
public:
|
||||
void operator()(const framework::OpDescBind& op_desc,
|
||||
framework::BlockDescBind* block) const override {
|
||||
for (auto& o : op_desc.Output("SentenceIds")) {
|
||||
block->Var(o)->SetType(framework::VarDesc::LOD_TENSOR);
|
||||
}
|
||||
for (auto& o : op_desc.Output("SentenceScores")) {
|
||||
block->Var(o)->SetType(framework::VarDesc::LOD_TENSOR);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
||||
|
||||
REGISTER_OPERATOR(beam_search_decode, paddle::operators::BeamSearchDecodeOp,
|
||||
paddle::operators::BeamSearchDecodeOpProtoMaker,
|
||||
paddle::operators::BeamSearchDecodeInferShape,
|
||||
paddle::operators::BeamSearchDecodeInferVarType,
|
||||
paddle::framework::EmptyGradOpMaker);
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,221 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/operators/beam_search_decode_op.h"
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
using CPUPlace = paddle::platform::CPUPlace;
|
||||
using LoD = paddle::framework::LoD;
|
||||
using LoDTensor = paddle::framework::LoDTensor;
|
||||
using LoDTensorArray = paddle::framework::LoDTensorArray;
|
||||
|
||||
template <typename T>
|
||||
using BeamNode = paddle::operators::BeamNode<T>;
|
||||
template <typename T>
|
||||
using BeamSearchDecoder = paddle::operators::BeamSearchDecoder<T>;
|
||||
template <typename T>
|
||||
using Sentence = paddle::operators::Sentence<T>;
|
||||
template <typename T>
|
||||
using BeamNodeVector = paddle::operators::BeamNodeVector<T>;
|
||||
template <typename T>
|
||||
using SentenceVector = paddle::operators::SentenceVector<T>;
|
||||
|
||||
namespace paddle {
|
||||
namespace test {
|
||||
|
||||
void GenerateExample(const std::vector<size_t>& level_0,
|
||||
const std::vector<size_t>& level_1,
|
||||
const std::vector<int>& data, LoDTensorArray* ids,
|
||||
LoDTensorArray* scores) {
|
||||
PADDLE_ENFORCE_EQ(level_0.back(), level_1.size() - 1,
|
||||
"source level is used to describe candidate set");
|
||||
PADDLE_ENFORCE_EQ(level_1.back(), data.size(),
|
||||
"the lowest level is used to describe data"
|
||||
", so it's last element should be data length");
|
||||
|
||||
CPUPlace place;
|
||||
|
||||
LoD lod;
|
||||
lod.push_back(level_0);
|
||||
lod.push_back(level_1);
|
||||
|
||||
// Ids
|
||||
LoDTensor tensor_id;
|
||||
tensor_id.set_lod(lod);
|
||||
tensor_id.Resize({static_cast<int64_t>(data.size())});
|
||||
// malloc memory
|
||||
int64_t* id_ptr = tensor_id.mutable_data<int64_t>(place);
|
||||
for (size_t i = 0; i < data.size(); ++i) {
|
||||
id_ptr[i] = static_cast<int64_t>(data.at(i));
|
||||
}
|
||||
|
||||
// Scores
|
||||
LoDTensor tensor_score;
|
||||
tensor_score.set_lod(lod);
|
||||
tensor_score.Resize({static_cast<int64_t>(data.size())});
|
||||
// malloc memory
|
||||
float* score_ptr = tensor_score.mutable_data<float>(place);
|
||||
for (size_t i = 0; i < data.size(); ++i) {
|
||||
score_ptr[i] = static_cast<float>(data.at(i));
|
||||
}
|
||||
|
||||
ids->push_back(tensor_id);
|
||||
scores->push_back(tensor_score);
|
||||
}
|
||||
|
||||
} // namespace test
|
||||
} // namespace paddle
|
||||
|
||||
TEST(BeamSearchDecodeOp, DeleteBeamNode) {
|
||||
auto* root = new BeamNode<float>(0, 0);
|
||||
auto* b1 = new BeamNode<float>(1, 1);
|
||||
auto* b2 = new BeamNode<float>(2, 2);
|
||||
auto* b3 = new BeamNode<float>(3, 3);
|
||||
|
||||
b1->AppendTo(root);
|
||||
b2->AppendTo(root);
|
||||
b3->AppendTo(b1);
|
||||
|
||||
delete b3;
|
||||
delete b2;
|
||||
}
|
||||
|
||||
TEST(BeamSearchDecodeOp, MakeSentence) {
|
||||
auto* root = new BeamNode<float>(0, 0);
|
||||
auto* b1 = new BeamNode<float>(1, 1);
|
||||
auto* end = new BeamNode<float>(2, 2);
|
||||
b1->AppendTo(root);
|
||||
end->AppendTo(b1);
|
||||
|
||||
BeamSearchDecoder<float> helper;
|
||||
Sentence<float> sentence = helper.MakeSentence(end);
|
||||
delete end;
|
||||
|
||||
std::vector<int64_t> expect_ids = {0, 1, 2};
|
||||
ASSERT_EQ(sentence.word_ids, expect_ids);
|
||||
|
||||
std::vector<float> expect_scores = {0, 1, 2};
|
||||
ASSERT_EQ(sentence.scores, expect_scores);
|
||||
}
|
||||
|
||||
TEST(BeamSearchDecodeOp, PackTwoStepsFistStep) {
|
||||
CPUPlace place;
|
||||
|
||||
LoDTensorArray ids;
|
||||
LoDTensorArray scores;
|
||||
|
||||
paddle::test::GenerateExample(
|
||||
std::vector<size_t>{0, 2, 6}, std::vector<size_t>{0, 1, 2, 3, 4, 5, 6},
|
||||
std::vector<int>{1, 2, 3, 4, 5, 6}, &ids, &scores);
|
||||
|
||||
std::vector<BeamNodeVector<float>> beamnode_vector_list;
|
||||
std::vector<SentenceVector<float>> sentence_vector_list(
|
||||
2, SentenceVector<float>());
|
||||
|
||||
BeamSearchDecoder<float> helper;
|
||||
beamnode_vector_list = helper.PackTwoSteps(
|
||||
ids[0], scores[0], beamnode_vector_list, &sentence_vector_list);
|
||||
ASSERT_EQ(beamnode_vector_list.size(), 2UL);
|
||||
ASSERT_EQ(beamnode_vector_list[0].size(), 2UL);
|
||||
ASSERT_EQ(beamnode_vector_list[1].size(), 4UL);
|
||||
}
|
||||
|
||||
TEST(BeamSearchDecodeOp, PackTwoSteps) {
|
||||
CPUPlace place;
|
||||
|
||||
// first source has three prefix
|
||||
BeamNodeVector<float> source0_prefixes;
|
||||
source0_prefixes.push_back(
|
||||
std::unique_ptr<BeamNode<float>>(new BeamNode<float>(1, 1)));
|
||||
source0_prefixes.push_back(
|
||||
std::unique_ptr<BeamNode<float>>(new BeamNode<float>(0, 0)));
|
||||
source0_prefixes.push_back(
|
||||
std::unique_ptr<BeamNode<float>>(new BeamNode<float>(3, 3)));
|
||||
|
||||
// second source has two prefix
|
||||
BeamNodeVector<float> source1_prefixes;
|
||||
source1_prefixes.push_back(
|
||||
std::unique_ptr<BeamNode<float>>(new BeamNode<float>(4, 4)));
|
||||
source1_prefixes.push_back(
|
||||
std::unique_ptr<BeamNode<float>>(new BeamNode<float>(5, 5)));
|
||||
|
||||
std::vector<BeamNodeVector<float>> beamnode_vector_list;
|
||||
std::vector<SentenceVector<float>> sentence_vector_list(
|
||||
2, SentenceVector<float>());
|
||||
|
||||
beamnode_vector_list.push_back(std::move(source0_prefixes));
|
||||
beamnode_vector_list.push_back(std::move(source1_prefixes));
|
||||
|
||||
// generate data for one step
|
||||
LoDTensorArray ids;
|
||||
LoDTensorArray scores;
|
||||
|
||||
paddle::test::GenerateExample(std::vector<size_t>{0, 3, 5},
|
||||
std::vector<size_t>{0, 1, 1, 3, 4, 5},
|
||||
std::vector<int>{0, 1, 2, 3, 4}, &ids, &scores);
|
||||
|
||||
BeamSearchDecoder<float> helper1;
|
||||
beamnode_vector_list = helper1.PackTwoSteps(
|
||||
ids[0], scores[0], beamnode_vector_list, &sentence_vector_list);
|
||||
|
||||
ASSERT_EQ(sentence_vector_list[0].size(), 1UL);
|
||||
ASSERT_EQ(sentence_vector_list[1].size(), 0UL);
|
||||
ASSERT_EQ(beamnode_vector_list[0].size(), 3UL);
|
||||
ASSERT_EQ(beamnode_vector_list[1].size(), 2UL);
|
||||
}
|
||||
|
||||
TEST(BeamSearchDecodeOp, PackAllSteps) {
|
||||
CPUPlace place;
|
||||
|
||||
// we will constuct a sample data with 3 steps and 2 source sentences
|
||||
LoDTensorArray ids;
|
||||
LoDTensorArray scores;
|
||||
|
||||
paddle::test::GenerateExample(
|
||||
std::vector<size_t>{0, 3, 6}, std::vector<size_t>{0, 1, 2, 3, 4, 5, 6},
|
||||
std::vector<int>{1, 2, 3, 4, 5, 6}, &ids, &scores);
|
||||
paddle::test::GenerateExample(
|
||||
std::vector<size_t>{0, 3, 6}, std::vector<size_t>{0, 1, 1, 3, 5, 5, 6},
|
||||
std::vector<int>{0, 1, 2, 3, 4, 5}, &ids, &scores);
|
||||
paddle::test::GenerateExample(std::vector<size_t>{0, 3, 6},
|
||||
std::vector<size_t>{0, 0, 1, 2, 3, 4, 5},
|
||||
std::vector<int>{0, 1, 2, 3, 4}, &ids, &scores);
|
||||
|
||||
ASSERT_EQ(ids.size(), 3UL);
|
||||
ASSERT_EQ(scores.size(), 3UL);
|
||||
|
||||
BeamSearchDecoder<float> helper;
|
||||
|
||||
LoDTensor id_tensor;
|
||||
LoDTensor score_tensor;
|
||||
helper.PackAllSteps(ids, scores, &id_tensor, &score_tensor);
|
||||
|
||||
LoD lod = id_tensor.lod();
|
||||
std::vector<size_t> expect_source_lod = {0, 4, 8};
|
||||
EXPECT_EQ(lod[0], expect_source_lod);
|
||||
std::vector<size_t> expect_sentence_lod = {0, 1, 3, 6, 9, 10, 13, 16, 19};
|
||||
EXPECT_EQ(lod[1], expect_sentence_lod);
|
||||
// 2| 1, 0| 3, 1, 0| 3, 2, 1| 5| 4, 3, 2| 4, 4, 3| 6, 5, 4
|
||||
std::vector<int> expect_data = {2, 1, 0, 3, 1, 0, 3, 2, 1, 5,
|
||||
4, 3, 2, 4, 4, 3, 6, 5, 4};
|
||||
ASSERT_EQ(id_tensor.dims()[0], static_cast<int64_t>(expect_data.size()));
|
||||
for (size_t i = 0; i < expect_data.size(); ++i) {
|
||||
ASSERT_EQ(id_tensor.data<int64_t>()[i],
|
||||
static_cast<int64_t>(expect_data[i]));
|
||||
}
|
||||
for (int64_t i = 0; i < id_tensor.dims()[0]; ++i) {
|
||||
ASSERT_EQ(score_tensor.data<float>()[i],
|
||||
static_cast<float>(id_tensor.data<int64_t>()[i]));
|
||||
}
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
import unittest
|
||||
import paddle.v2.framework.layers as layers
|
||||
|
||||
|
||||
class TestDocString(unittest.TestCase):
|
||||
def test_layer_doc_string(self):
|
||||
print layers.dropout.__doc__
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
@ -0,0 +1,110 @@
|
||||
import paddle.v2 as paddle
|
||||
import paddle.v2.framework.layers as layers
|
||||
import paddle.v2.framework.nets as nets
|
||||
import paddle.v2.framework.core as core
|
||||
import paddle.v2.framework.optimizer as optimizer
|
||||
|
||||
from paddle.v2.framework.framework import Program, g_main_program, g_startup_program
|
||||
from paddle.v2.framework.executor import Executor
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
def stacked_lstm_net(input_dim,
|
||||
class_dim=2,
|
||||
emb_dim=128,
|
||||
hid_dim=512,
|
||||
stacked_num=3):
|
||||
assert stacked_num % 2 == 1
|
||||
data = layers.data(name="words", shape=[1], data_type="int64")
|
||||
label = layers.data(name="label", shape=[1], data_type="int64")
|
||||
|
||||
emb = layers.embedding(input=data, size=[input_dim, emb_dim])
|
||||
# add bias attr
|
||||
|
||||
# TODO(qijun) linear act
|
||||
fc1 = layers.fc(input=emb, size=hid_dim)
|
||||
lstm1, cell1 = layers.dynamic_lstm(input=fc1, size=hid_dim)
|
||||
|
||||
inputs = [fc1, lstm1]
|
||||
|
||||
for i in range(2, stacked_num + 1):
|
||||
fc = layers.fc(input=inputs, size=hid_dim)
|
||||
lstm, cell = layers.dynamic_lstm(
|
||||
input=fc, size=hid_dim, is_reverse=(i % 2) == 0)
|
||||
inputs = [fc, lstm]
|
||||
|
||||
fc_last = layers.sequence_pool(input=inputs[0], pool_type='max')
|
||||
lstm_last = layers.sequence_pool(input=inputs[1], pool_type='max')
|
||||
|
||||
prediction = layers.fc(input=[fc_last, lstm_last],
|
||||
size=class_dim,
|
||||
act='softmax')
|
||||
cost = layers.cross_entropy(input=prediction, label=label)
|
||||
avg_cost = layers.mean(x=cost)
|
||||
adam_optimizer = optimizer.AdamOptimizer(learning_rate=0.002)
|
||||
opts = adam_optimizer.minimize(avg_cost)
|
||||
acc = layers.accuracy(input=prediction, label=label)
|
||||
return avg_cost, acc
|
||||
|
||||
|
||||
def to_lodtensor(data, place):
|
||||
seq_lens = [len(seq) for seq in data]
|
||||
cur_len = 0
|
||||
lod = [cur_len]
|
||||
for l in seq_lens:
|
||||
cur_len += l
|
||||
lod.append(cur_len)
|
||||
flattened_data = np.concatenate(data, axis=0).astype("int64")
|
||||
flattened_data = flattened_data.reshape([len(flattened_data), 1])
|
||||
res = core.LoDTensor()
|
||||
res.set(flattened_data, place)
|
||||
res.set_lod([lod])
|
||||
return res
|
||||
|
||||
|
||||
def main():
|
||||
BATCH_SIZE = 100
|
||||
PASS_NUM = 5
|
||||
|
||||
word_dict = paddle.dataset.imdb.word_dict()
|
||||
print "load word dict successfully"
|
||||
dict_dim = len(word_dict)
|
||||
class_dim = 2
|
||||
|
||||
cost, acc = stacked_lstm_net(input_dim=dict_dim, class_dim=class_dim)
|
||||
|
||||
train_data = paddle.batch(
|
||||
paddle.reader.shuffle(
|
||||
paddle.dataset.imdb.train(word_dict), buf_size=1000),
|
||||
batch_size=BATCH_SIZE)
|
||||
place = core.CPUPlace()
|
||||
exe = Executor(place)
|
||||
|
||||
exe.run(g_startup_program)
|
||||
|
||||
for pass_id in xrange(PASS_NUM):
|
||||
for data in train_data():
|
||||
tensor_words = to_lodtensor(map(lambda x: x[0], data), place)
|
||||
|
||||
label = np.array(map(lambda x: x[1], data)).astype("int64")
|
||||
label = label.reshape([BATCH_SIZE, 1])
|
||||
|
||||
tensor_label = core.LoDTensor()
|
||||
tensor_label.set(label, place)
|
||||
|
||||
outs = exe.run(g_main_program,
|
||||
feed={"words": tensor_words,
|
||||
"label": tensor_label},
|
||||
fetch_list=[cost, acc])
|
||||
cost_val = np.array(outs[0])
|
||||
acc_val = np.array(outs[1])
|
||||
|
||||
print("cost=" + str(cost_val) + " acc=" + str(acc_val))
|
||||
if cost_val < 1.0 and acc_val > 0.7:
|
||||
exit(0)
|
||||
exit(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
Loading…
Reference in new issue