revert-3824-remove_grad_op_type
commit
dee4c832cc
@ -0,0 +1,221 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "Layer.h"
|
||||
#include "paddle/math/Matrix.h"
|
||||
#include "paddle/math/Vector.h"
|
||||
#include "paddle/utils/Logging.h"
|
||||
#include "paddle/utils/Stat.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
class SequenceSliceLayer : public Layer {
|
||||
public:
|
||||
explicit SequenceSliceLayer(const LayerConfig& config) : Layer(config) {}
|
||||
|
||||
bool init(const LayerMap& layerMap,
|
||||
const ParameterMap& parameterMap) override;
|
||||
|
||||
void forward(PassType passType) override;
|
||||
void backward(const UpdateCallback& callback = nullptr) override;
|
||||
|
||||
private:
|
||||
/*
|
||||
* TODO(caoying)
|
||||
* In PaddePaddle, currently all matrices are real number types,
|
||||
* but the second and the (optional) third input which are some
|
||||
* selected indices of the give sequence to trim the sequence, are actually
|
||||
* filled with int types so that storing int types information in real number
|
||||
* matrices is very dangerous, since real numbers will be convered to int
|
||||
* types. If a user fills this matrix himself, invalid data may occor.
|
||||
*/
|
||||
|
||||
MatrixPtr startIdsOnCpu_;
|
||||
MatrixPtr endIdsOnCpu_;
|
||||
|
||||
std::vector<int> selectedRows_;
|
||||
IVectorPtr rowIndice_;
|
||||
std::vector<std::vector<int>> inputSeqInfoVec_;
|
||||
std::vector<int> outSubSeqStartPos_;
|
||||
std::vector<int> outSeqStartPos_;
|
||||
|
||||
void checkInputs();
|
||||
void copySliceIdsToCpu();
|
||||
void calSelectedRows(const MatrixPtr starts, const MatrixPtr ends);
|
||||
};
|
||||
|
||||
REGISTER_LAYER(seq_slice, SequenceSliceLayer);
|
||||
|
||||
bool SequenceSliceLayer::init(const LayerMap& layerMap,
|
||||
const ParameterMap& parameterMap) {
|
||||
/* Initialize the basic parent class */
|
||||
Layer::init(layerMap, parameterMap);
|
||||
CHECK_GE(inputLayers_.size(), 2U);
|
||||
CHECK_LE(inputLayers_.size(), 3U);
|
||||
|
||||
setNeedSequenceInfo(false);
|
||||
return true;
|
||||
}
|
||||
|
||||
void SequenceSliceLayer::checkInputs() {
|
||||
const Argument& inputSeq = getInput(0);
|
||||
CHECK(inputSeq.hasSeq()) << "The first input of sequence slice layer "
|
||||
<< "must be a sequence.";
|
||||
const MatrixPtr indices1 = getInputValue(1);
|
||||
CHECK_EQ(static_cast<size_t>(indices1->getHeight()),
|
||||
inputSeq.hasSubseq() ? inputSeq.getNumSubSequences()
|
||||
: inputSeq.getNumSequences())
|
||||
<< "Height of the second input should be equal to number of sequence "
|
||||
<< "in the first input.";
|
||||
if (inputLayers_.size() == 3) {
|
||||
const MatrixPtr indices2 = getInputValue(2);
|
||||
CHECK_EQ(indices2->getHeight(), indices1->getHeight())
|
||||
<< "start indices and end indices should have the same height.";
|
||||
CHECK_EQ(indices2->getWidth(), indices1->getWidth())
|
||||
<< "start indices and end indices should have the same Width.";
|
||||
}
|
||||
}
|
||||
|
||||
void SequenceSliceLayer::copySliceIdsToCpu() {
|
||||
const MatrixPtr indices1 = getInputValue(1);
|
||||
if (inputLayers_.size() == 2U) {
|
||||
if (config_.select_first()) {
|
||||
Matrix::resizeOrCreate(startIdsOnCpu_,
|
||||
indices1->getHeight(),
|
||||
indices1->getWidth(),
|
||||
false /* trans */,
|
||||
false /* useGpu */);
|
||||
startIdsOnCpu_->copyFrom(*indices1);
|
||||
endIdsOnCpu_ = nullptr;
|
||||
} else {
|
||||
Matrix::resizeOrCreate(endIdsOnCpu_,
|
||||
indices1->getHeight(),
|
||||
indices1->getWidth(),
|
||||
false /* trans */,
|
||||
false /* useGpu */);
|
||||
endIdsOnCpu_->copyFrom(*indices1);
|
||||
startIdsOnCpu_ = nullptr;
|
||||
}
|
||||
} else if (inputLayers_.size() == 3U) {
|
||||
Matrix::resizeOrCreate(startIdsOnCpu_,
|
||||
indices1->getHeight(),
|
||||
indices1->getWidth(),
|
||||
false /* trans */,
|
||||
false /* useGpu */);
|
||||
startIdsOnCpu_->copyFrom(*indices1);
|
||||
|
||||
const MatrixPtr indices2 = getInputValue(2);
|
||||
Matrix::resizeOrCreate(endIdsOnCpu_,
|
||||
indices2->getHeight(),
|
||||
indices2->getWidth(),
|
||||
false /* trans */,
|
||||
false /* useGpu */);
|
||||
endIdsOnCpu_->copyFrom(*indices2);
|
||||
}
|
||||
}
|
||||
|
||||
void SequenceSliceLayer::calSelectedRows(const MatrixPtr starts,
|
||||
const MatrixPtr ends) {
|
||||
CHECK(starts || ends) << "At least one of the start or end indices "
|
||||
<< "should be given.";
|
||||
|
||||
outSeqStartPos_.resize(1, 0);
|
||||
outSubSeqStartPos_.resize(1, 0);
|
||||
selectedRows_.clear();
|
||||
|
||||
size_t beamSize = starts ? starts->getWidth() : ends->getWidth();
|
||||
size_t rowIdx = 0;
|
||||
for (size_t i = 0; i < inputSeqInfoVec_.size(); ++i) {
|
||||
for (size_t j = 0; j < inputSeqInfoVec_[i].size() - 1; ++j) {
|
||||
for (size_t k = 0; k < beamSize; ++k) {
|
||||
if (starts && starts->getElement(rowIdx, k) == -1.) break;
|
||||
if (ends && ends->getElement(rowIdx, k) == -1.) break;
|
||||
|
||||
int begPos = inputSeqInfoVec_[i][j];
|
||||
if (starts) begPos += starts->getElement(rowIdx, k);
|
||||
|
||||
int endPos = inputSeqInfoVec_[i][j + 1] - 1;
|
||||
if (ends) endPos = inputSeqInfoVec_[i][j] + ends->getElement(rowIdx, k);
|
||||
|
||||
int seqLen = endPos - begPos + 1;
|
||||
CHECK_GT(seqLen, 0U);
|
||||
for (int m = begPos; m <= endPos; ++m) selectedRows_.push_back(m);
|
||||
inputSeqInfoVec_.size() > 1
|
||||
? outSubSeqStartPos_.push_back(outSubSeqStartPos_.back() + seqLen)
|
||||
: outSeqStartPos_.push_back(outSeqStartPos_.back() + seqLen);
|
||||
}
|
||||
rowIdx++;
|
||||
}
|
||||
if (inputSeqInfoVec_.size() > 1)
|
||||
outSeqStartPos_.push_back(outSubSeqStartPos_.back());
|
||||
}
|
||||
|
||||
if (useGpu_) {
|
||||
rowIndice_ = IVector::create(selectedRows_.size(), useGpu_);
|
||||
rowIndice_->copyFrom(selectedRows_.data(), selectedRows_.size());
|
||||
} else {
|
||||
rowIndice_ =
|
||||
IVector::create(selectedRows_.data(), selectedRows_.size(), useGpu_);
|
||||
}
|
||||
|
||||
// create the sequence information for the output.
|
||||
ICpuGpuVector::resizeOrCreate(
|
||||
output_.sequenceStartPositions, outSeqStartPos_.size(), false);
|
||||
output_.sequenceStartPositions->copyFrom(
|
||||
outSeqStartPos_.data(), outSeqStartPos_.size(), false);
|
||||
|
||||
if (inputSeqInfoVec_.size() > 1) {
|
||||
ICpuGpuVector::resizeOrCreate(
|
||||
output_.subSequenceStartPositions, outSubSeqStartPos_.size(), false);
|
||||
output_.subSequenceStartPositions->copyFrom(
|
||||
outSubSeqStartPos_.data(), outSubSeqStartPos_.size(), false);
|
||||
}
|
||||
}
|
||||
|
||||
void SequenceSliceLayer::forward(PassType passType) {
|
||||
Layer::forward(passType);
|
||||
checkInputs();
|
||||
|
||||
const Argument& inputSeq = getInput(0);
|
||||
inputSeqInfoVec_.clear();
|
||||
Argument::reorganizeSeqInfo(inputSeq.sequenceStartPositions,
|
||||
inputSeq.subSequenceStartPositions,
|
||||
inputSeqInfoVec_);
|
||||
if (!useGpu_) {
|
||||
if (inputLayers_.size() == 2U) {
|
||||
startIdsOnCpu_ = config_.select_first() ? getInputValue(1) : nullptr;
|
||||
endIdsOnCpu_ = config_.select_first() ? nullptr : getInputValue(1);
|
||||
} else if (inputLayers_.size() == 3U) {
|
||||
startIdsOnCpu_ = getInputValue(1);
|
||||
endIdsOnCpu_ = getInputValue(2);
|
||||
}
|
||||
} else {
|
||||
copySliceIdsToCpu();
|
||||
}
|
||||
|
||||
// calculate the selected row indices in a batch,
|
||||
// and build the output sequence information.
|
||||
calSelectedRows(startIdsOnCpu_ ? startIdsOnCpu_ : nullptr,
|
||||
endIdsOnCpu_ ? endIdsOnCpu_ : nullptr);
|
||||
|
||||
resetOutput(selectedRows_.size(), getSize());
|
||||
|
||||
getOutputValue()->selectRows(*getInputValue(0), *rowIndice_);
|
||||
}
|
||||
|
||||
void SequenceSliceLayer::backward(const UpdateCallback& callback) {
|
||||
getOutputGrad()->addToRows(*getInputGrad(0), *rowIndice_);
|
||||
}
|
||||
|
||||
} // namespace paddle
|
@ -0,0 +1,223 @@
|
||||
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "ModelConfig.pb.h"
|
||||
#include "paddle/gserver/layers/DataLayer.h"
|
||||
#include "paddle/trainer/Trainer.h"
|
||||
|
||||
#include "LayerGradUtil.h"
|
||||
#include "paddle/testing/TestUtil.h"
|
||||
|
||||
using namespace paddle; // NOLINT
|
||||
using namespace std; // NOLINT
|
||||
|
||||
DECLARE_int32(gpu_id);
|
||||
DECLARE_bool(thread_local_rand_use_global_seed);
|
||||
|
||||
const int MAX_SEQ_NUM = 17;
|
||||
const int MAX_SEQ_LEN = 23;
|
||||
const int MAX_BEAM_SIZE = 13;
|
||||
|
||||
vector<real> randSampling(real range, int n) {
|
||||
CHECK_GE(range, n);
|
||||
vector<real> num(range);
|
||||
iota(begin(num), end(num), 0.);
|
||||
if (range == n) return num;
|
||||
|
||||
random_shuffle(begin(num), end(num));
|
||||
num.resize(n);
|
||||
sort(begin(num), end(num));
|
||||
return num;
|
||||
}
|
||||
|
||||
void genSeqInfo(vector<int>& seqStartPos, vector<int>& subSeqStartPos) {
|
||||
seqStartPos.resize(1, 0);
|
||||
subSeqStartPos.resize(1, 0);
|
||||
|
||||
srand((size_t)(time(NULL)));
|
||||
int seqNum = 1 + (rand() % MAX_SEQ_NUM);
|
||||
for (int i = 0; i < seqNum; ++i) {
|
||||
int subSeqNum = 1 + (rand() % MAX_SEQ_NUM);
|
||||
for (int j = 0; j < subSeqNum; ++j)
|
||||
subSeqStartPos.push_back(subSeqStartPos.back() +
|
||||
(1 + (rand() % MAX_SEQ_LEN)));
|
||||
seqStartPos.push_back(subSeqStartPos.back());
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
generate start indices according to sequence start positions.
|
||||
*/
|
||||
void genStarts(vector<int>& seqStartPos,
|
||||
vector<vector<real>>& starts,
|
||||
size_t beamSize) {
|
||||
starts.clear();
|
||||
starts.resize(seqStartPos.size() - 1, vector<real>(beamSize, -1.));
|
||||
|
||||
for (size_t i = 0; i < seqStartPos.size() - 1; ++i) {
|
||||
int seqLen = seqStartPos[i + 1] - seqStartPos[i];
|
||||
vector<real> randStarts =
|
||||
randSampling(seqLen, min(seqLen, static_cast<int>(beamSize)));
|
||||
copy(begin(randStarts), end(randStarts), begin(starts[i]));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
generate end indices according to sequence start positions and start indices.
|
||||
*/
|
||||
void genEnds(vector<int>& seqStartPos,
|
||||
vector<vector<real>>& starts,
|
||||
vector<vector<real>>& ends,
|
||||
size_t beamSize) {
|
||||
CHECK_EQ(seqStartPos.size() - 1, starts.size());
|
||||
ends.clear();
|
||||
ends.resize(seqStartPos.size() - 1, vector<real>(beamSize, -1.));
|
||||
|
||||
for (size_t i = 0; i < starts.size(); ++i) {
|
||||
for (size_t j = 0; j < starts[i].size(); ++j) {
|
||||
int seqLen = seqStartPos[i + 1] - seqStartPos[i];
|
||||
CHECK_GE(seqLen - 1, starts[i][j]);
|
||||
if (starts[i][j] == -1.) break;
|
||||
if (starts[i][j] == (seqLen - 1)) {
|
||||
ends[i][j] = starts[i][j];
|
||||
} else {
|
||||
ends[i][j] = starts[i][j] + randSampling(seqLen - starts[i][j], 1)[0];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void genTestData(vector<int>& seqStartPos,
|
||||
vector<int>& subSeqStartPos,
|
||||
vector<vector<real>>& starts,
|
||||
vector<vector<real>>& ends,
|
||||
bool hasSubseq) {
|
||||
size_t beamSize = 1 + (rand() % MAX_BEAM_SIZE);
|
||||
genSeqInfo(seqStartPos, subSeqStartPos);
|
||||
|
||||
genStarts(hasSubseq ? subSeqStartPos : seqStartPos, starts, beamSize);
|
||||
genEnds(hasSubseq ? subSeqStartPos : seqStartPos, starts, ends, beamSize);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void flatten2dVector(vector<vector<T>>& inVec, vector<T>& outVec) {
|
||||
size_t totalSize{0};
|
||||
for (auto const& items : inVec) totalSize += items.size();
|
||||
outVec.reserve(totalSize);
|
||||
|
||||
for (auto& items : inVec)
|
||||
move(items.begin(), items.end(), back_inserter(outVec));
|
||||
}
|
||||
|
||||
void testSeqSliceLayer(bool hasSubseq,
|
||||
bool useGpu,
|
||||
vector<int>& seqStartPos,
|
||||
vector<int>& subSeqStartPos,
|
||||
vector<vector<real>>& starts,
|
||||
vector<vector<real>>& ends) {
|
||||
// layer size is not crutial for this layer,
|
||||
// so here use a small layer size in the unittest.
|
||||
const size_t layerSize{4};
|
||||
TestConfig config;
|
||||
config.layerConfig.set_type("seq_slice");
|
||||
config.layerConfig.set_size(layerSize);
|
||||
|
||||
// add the first input
|
||||
MatrixPtr seqInputPtr =
|
||||
Matrix::create(hasSubseq ? subSeqStartPos.back() : seqStartPos.back(),
|
||||
layerSize,
|
||||
false,
|
||||
false);
|
||||
seqInputPtr->randomizeUniform();
|
||||
|
||||
if (hasSubseq) {
|
||||
config.inputDefs.push_back({INPUT_SELF_DEFINE_DATA,
|
||||
"seq_input",
|
||||
seqInputPtr,
|
||||
seqStartPos,
|
||||
subSeqStartPos});
|
||||
} else {
|
||||
config.inputDefs.push_back(
|
||||
{INPUT_SELF_DEFINE_DATA, "seq_input", seqInputPtr, seqStartPos});
|
||||
}
|
||||
config.layerConfig.add_inputs();
|
||||
|
||||
// add start indices
|
||||
if (starts.size()) {
|
||||
vector<real> startsToVec;
|
||||
flatten2dVector(starts, startsToVec);
|
||||
|
||||
MatrixPtr startMatrixPtr =
|
||||
Matrix::create(starts.size(), starts[0].size(), false, false);
|
||||
startMatrixPtr->copyFrom(startsToVec.data(), startsToVec.size());
|
||||
|
||||
config.inputDefs.push_back(
|
||||
{INPUT_SELF_DEFINE_DATA, "starts", startMatrixPtr});
|
||||
config.layerConfig.add_inputs();
|
||||
config.layerConfig.set_select_first(true);
|
||||
}
|
||||
|
||||
// add end indices
|
||||
if (ends.size()) {
|
||||
vector<real> endsToVec;
|
||||
flatten2dVector(ends, endsToVec);
|
||||
|
||||
MatrixPtr endMatrixPtr =
|
||||
Matrix::create(ends.size(), ends[0].size(), false, false);
|
||||
endMatrixPtr->copyFrom(endsToVec.data(), endsToVec.size());
|
||||
|
||||
config.inputDefs.push_back({INPUT_SELF_DEFINE_DATA, "ends", endMatrixPtr});
|
||||
config.layerConfig.add_inputs();
|
||||
config.layerConfig.set_select_first(false);
|
||||
}
|
||||
|
||||
testLayerGrad(config, "seq_slice", /*batchSize*/ 100, false, useGpu, false);
|
||||
}
|
||||
|
||||
TEST(Layer, SeqSliceLayer) {
|
||||
vector<int> seqStartPos;
|
||||
vector<int> subSeqStartPos;
|
||||
vector<vector<real>> starts;
|
||||
vector<vector<real>> ends;
|
||||
|
||||
std::vector<bool> mode = {false};
|
||||
#ifndef PADDLE_ONLY_CPU
|
||||
mode.push_back(true);
|
||||
#endif
|
||||
genSeqInfo(seqStartPos, subSeqStartPos);
|
||||
for (bool hasSubseq : {true, false}) {
|
||||
LOG(INFO) << "hasSubSeq : " << hasSubseq;
|
||||
genTestData(seqStartPos, subSeqStartPos, starts, ends, hasSubseq);
|
||||
for (bool useGpu : mode) {
|
||||
vector<vector<real>> tmp;
|
||||
testSeqSliceLayer(
|
||||
hasSubseq, useGpu, seqStartPos, subSeqStartPos, tmp, ends);
|
||||
testSeqSliceLayer(
|
||||
hasSubseq, useGpu, seqStartPos, subSeqStartPos, starts, tmp);
|
||||
testSeqSliceLayer(
|
||||
hasSubseq, useGpu, seqStartPos, subSeqStartPos, starts, ends);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
initMain(argc, argv);
|
||||
hl_start();
|
||||
hl_init(FLAGS_gpu_id);
|
||||
FLAGS_thread_local_rand_use_global_seed = true;
|
||||
srand(1);
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
@ -0,0 +1,20 @@
|
||||
if(WITH_PYTHON)
|
||||
cc_library(paddle_pybind SHARED
|
||||
SRCS pybind.cc
|
||||
DEPS pybind python backward
|
||||
sgd_op
|
||||
gather_op
|
||||
add_op
|
||||
mul_op
|
||||
rowwise_add_op
|
||||
sigmoid_op
|
||||
softmax_op
|
||||
mean_op
|
||||
cross_entropy_op
|
||||
recurrent_op
|
||||
uniform_random_op
|
||||
gaussian_random_op
|
||||
fill_zeros_like_op
|
||||
scale_op
|
||||
minus_op)
|
||||
endif(WITH_PYTHON)
|
@ -0,0 +1,79 @@
|
||||
type: "nn"
|
||||
layers {
|
||||
name: "word"
|
||||
type: "data"
|
||||
size: 128
|
||||
active_type: ""
|
||||
}
|
||||
layers {
|
||||
name: "starts"
|
||||
type: "data"
|
||||
size: 5
|
||||
active_type: ""
|
||||
}
|
||||
layers {
|
||||
name: "ends"
|
||||
type: "data"
|
||||
size: 5
|
||||
active_type: ""
|
||||
}
|
||||
layers {
|
||||
name: "__seq_slice_layer_0__"
|
||||
type: "seq_slice"
|
||||
size: 128
|
||||
active_type: ""
|
||||
inputs {
|
||||
input_layer_name: "word"
|
||||
}
|
||||
inputs {
|
||||
input_layer_name: "starts"
|
||||
}
|
||||
inputs {
|
||||
input_layer_name: "ends"
|
||||
}
|
||||
}
|
||||
layers {
|
||||
name: "__seq_slice_layer_1__"
|
||||
type: "seq_slice"
|
||||
size: 128
|
||||
active_type: ""
|
||||
inputs {
|
||||
input_layer_name: "word"
|
||||
}
|
||||
inputs {
|
||||
input_layer_name: "starts"
|
||||
}
|
||||
select_first: true
|
||||
}
|
||||
layers {
|
||||
name: "__seq_slice_layer_2__"
|
||||
type: "seq_slice"
|
||||
size: 128
|
||||
active_type: ""
|
||||
inputs {
|
||||
input_layer_name: "word"
|
||||
}
|
||||
inputs {
|
||||
input_layer_name: "ends"
|
||||
}
|
||||
select_first: false
|
||||
}
|
||||
input_layer_names: "word"
|
||||
output_layer_names: "__seq_slice_layer_0__"
|
||||
output_layer_names: "__seq_slice_layer_1__"
|
||||
output_layer_names: "__seq_slice_layer_2__"
|
||||
sub_models {
|
||||
name: "root"
|
||||
layer_names: "word"
|
||||
layer_names: "starts"
|
||||
layer_names: "ends"
|
||||
layer_names: "__seq_slice_layer_0__"
|
||||
layer_names: "__seq_slice_layer_1__"
|
||||
layer_names: "__seq_slice_layer_2__"
|
||||
input_layer_names: "word"
|
||||
output_layer_names: "__seq_slice_layer_0__"
|
||||
output_layer_names: "__seq_slice_layer_1__"
|
||||
output_layer_names: "__seq_slice_layer_2__"
|
||||
is_recurrent_layer_group: false
|
||||
}
|
||||
|
@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env python
|
||||
#coding=utf-8
|
||||
from paddle.trainer_config_helpers import *
|
||||
|
||||
input_seq = data_layer("word", size=128)
|
||||
starts = data_layer("starts", size=5)
|
||||
ends = data_layer("ends", size=5)
|
||||
|
||||
seq_slice1 = seq_slice_layer(input=input_seq, starts=starts, ends=ends)
|
||||
seq_slice2 = seq_slice_layer(input=input_seq, starts=starts, ends=None)
|
||||
seq_slice3 = seq_slice_layer(input=input_seq, starts=None, ends=ends)
|
||||
|
||||
outputs(seq_slice1, seq_slice2, seq_slice3)
|
Loading…
Reference in new issue