Merge pull request #651 from Xreki/warpctc
Integrate warp-ctc as WarpCTCLayer, including unit test and layer interface.avx_docs
commit
4823075f95
@ -0,0 +1,3 @@
|
||||
[submodule "warp-ctc"]
|
||||
path = warp-ctc
|
||||
url = https://github.com/baidu-research/warp-ctc.git
|
@ -0,0 +1,93 @@
|
||||
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#ifndef HL_WARPCTC_WRAP_H_
|
||||
#define HL_WARPCTC_WRAP_H_
|
||||
|
||||
#include "hl_base.h"
|
||||
#include "warp-ctc/include/ctc.h"
|
||||
|
||||
typedef ctcStatus_t hl_warpctc_status_t;
|
||||
typedef ctcOptions hl_warpctc_options_t;
|
||||
|
||||
/**
|
||||
* @brief Init ctc options.
|
||||
*
|
||||
* @param[in] blank blank label used in ctc loss function.
|
||||
* @param[in] useGpu whether use gpu.
|
||||
* @param[out] options handle to store cpu or gpu informations.
|
||||
*
|
||||
*/
|
||||
extern void hl_warpctc_init(const size_t blank,
|
||||
bool useGpu,
|
||||
hl_warpctc_options_t* options);
|
||||
|
||||
/**
|
||||
* @brief Compute the connectionist temporal classification loss,
|
||||
* and optionally compute the gradient with respect to the inputs.
|
||||
*
|
||||
* if batchGrad == nullptr
|
||||
*
|
||||
* only compute the ctc loss.
|
||||
*
|
||||
* if batchGrad != nullptr
|
||||
*
|
||||
* compute both ctc loss and gradient.
|
||||
*
|
||||
* @param[in] batchInput batch matrix of input probabilities,
|
||||
* in maxSequenceLength x numSequence x numClasses
|
||||
* (row-major) format.
|
||||
* @param[out] batchGrad batch matrix of gradient.
|
||||
* @param[in] cpuLabels labels always in CPU memory.
|
||||
* @param[in] cpuLabelLengths length of all labels in CPU memory.
|
||||
* @param[in] cpuInputLengths length of all sequences in CPU memory.
|
||||
* @param[in] numClasses number of possible output symbols.
|
||||
* @param[in] numSequences number of sequence.
|
||||
* @param[out] cpuCosts cost of each sequence in CPU memory.
|
||||
* @param[out] workspace workspace to store some temporary results.
|
||||
* @param[in] options handle to store cpu or gpu informations.
|
||||
*
|
||||
*/
|
||||
extern void hl_warpctc_compute_loss(const real* batchInput,
|
||||
real* batchGrad,
|
||||
const int* cpuLabels,
|
||||
const int* cpuLabelLengths,
|
||||
const int* cpuInputLengths,
|
||||
const size_t numClasses,
|
||||
const size_t numSequences,
|
||||
real* cpuCosts,
|
||||
void* workspace,
|
||||
hl_warpctc_options_t* options);
|
||||
|
||||
/**
|
||||
* @brief Compute the required workspace size.
|
||||
* There is no memory allocated operations within warp-ctc.
|
||||
*
|
||||
* @param[in] cpuLabelLengths length of all labels in CPU memory.
|
||||
* @param[in] cpuInputLengths length of all sequences in CPU memory.
|
||||
* @param[in] numClasses number of possible output symbols.
|
||||
* @param[in] numSequences number of sequence.
|
||||
* @param[in] options handle to store cpu or gpu informations.
|
||||
* @param[out] bytes pointer to a scalar where the memory
|
||||
* requirement in bytes will be placed.
|
||||
*
|
||||
*/
|
||||
extern void hl_warpctc_get_workspace_size(const int* cpuLabelLengths,
|
||||
const int* cpuInputLengths,
|
||||
const size_t numClasses,
|
||||
const size_t numSequences,
|
||||
hl_warpctc_options_t* options,
|
||||
size_t* bytes);
|
||||
|
||||
#endif // HL_WARPCTC_WRAP_H_
|
@ -0,0 +1,157 @@
|
||||
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include <mutex>
|
||||
#include "hl_warpctc_wrap.h"
|
||||
#include "hl_dso_loader.h"
|
||||
#include "paddle/utils/Logging.h"
|
||||
|
||||
namespace dynload {
|
||||
|
||||
std::once_flag warpctc_dso_flag;
|
||||
void* warpctc_dso_handle = nullptr;
|
||||
|
||||
/**
|
||||
* The following macro definition can generate structs
|
||||
* (for each function) to dynamic load warpctc routine
|
||||
* via operator overloading. When PADDLE_USE_DSO is
|
||||
* false, you need to add the path of libwarp-ctc.so to
|
||||
* the linked-libs of paddle or to LD_PRELOAD.
|
||||
*/
|
||||
#ifdef PADDLE_USE_DSO
|
||||
#define DYNAMIC_LOAD_WARPCTC_WRAP(__name) \
|
||||
struct DynLoad__##__name { \
|
||||
template <typename... Args> \
|
||||
auto operator()(Args... args) -> decltype(__name(args...)) { \
|
||||
using warpctcFunc = decltype(__name(args...)) (*)(Args...); \
|
||||
std::call_once( \
|
||||
warpctc_dso_flag, GetWarpCTCDsoHandle, &warpctc_dso_handle); \
|
||||
void* p_##_name = dlsym(warpctc_dso_handle, #__name); \
|
||||
return reinterpret_cast<warpctcFunc>(p_##_name)(args...); \
|
||||
} \
|
||||
} __name; // struct DynLoad__##__name
|
||||
#else
|
||||
#define DYNAMIC_LOAD_WARPCTC_WRAP(__name) \
|
||||
struct DynLoad__##__name { \
|
||||
template <typename... Args> \
|
||||
auto operator()(Args... args) -> decltype(__name(args...)) { \
|
||||
return __name(args...); \
|
||||
} \
|
||||
} __name; // struct DynLoad__##__name
|
||||
#endif
|
||||
|
||||
// include all needed warp-ctc functions
|
||||
DYNAMIC_LOAD_WARPCTC_WRAP(get_warpctc_version)
|
||||
DYNAMIC_LOAD_WARPCTC_WRAP(ctcGetStatusString)
|
||||
DYNAMIC_LOAD_WARPCTC_WRAP(compute_ctc_loss)
|
||||
DYNAMIC_LOAD_WARPCTC_WRAP(get_workspace_size)
|
||||
|
||||
#undef DYNAMIC_LOAD_WARPCTC_WRAP
|
||||
|
||||
} /* namespace dynload */
|
||||
|
||||
#define WARPCTC_GET_VERSION dynload::get_warpctc_version
|
||||
#define WARPCTC_GET_STATUS_STRING dynload::ctcGetStatusString
|
||||
|
||||
#ifndef PADDLE_TYPE_DOUBLE
|
||||
#define WARPCTC_COMPUTE_LOSS dynload::compute_ctc_loss
|
||||
#define WARPCTC_GET_WORKSPACE_SIZE dynload::get_workspace_size
|
||||
#else
|
||||
#define WARPCTC_LOG_FATAL \
|
||||
LOG(FATAL) << "warp-ctc [version " << g_warpctcVersion \
|
||||
<< "] Error: not support double precision."
|
||||
#define WARPCTC_COMPUTE_LOSS(...) WARPCTC_LOG_FATAL(__VA_ARGS__)
|
||||
#define WARPCTC_GET_WORKSPACE_SIZE(...) WARPCTC_LOG_FATAL(__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Check build-in warp-ctc function using glog and it also
|
||||
* support << operator for more details error info.
|
||||
*/
|
||||
static int g_warpctcVersion = -1;
|
||||
#define CHECK_WARPCTC(warpctcStat) \
|
||||
CHECK_EQ(CTC_STATUS_SUCCESS, warpctcStat) \
|
||||
<< "warp-ctc [version " << g_warpctcVersion \
|
||||
<< "] Error: " << WARPCTC_GET_STATUS_STRING(warpctcStat) << " "
|
||||
|
||||
void hl_warpctc_init(const size_t blank,
|
||||
bool useGpu,
|
||||
hl_warpctc_options_t* options) {
|
||||
CHECK_NOTNULL(options);
|
||||
|
||||
g_warpctcVersion = WARPCTC_GET_VERSION();
|
||||
|
||||
if (useGpu) {
|
||||
#ifdef __NVCC__
|
||||
options->loc = CTC_GPU;
|
||||
options->stream = STREAM_DEFAULT;
|
||||
#else
|
||||
LOG(FATAL) << "[warpctc init] GPU is not enabled.";
|
||||
#endif
|
||||
} else {
|
||||
options->loc = CTC_CPU;
|
||||
options->num_threads = 1;
|
||||
}
|
||||
|
||||
options->blank_label = blank;
|
||||
}
|
||||
|
||||
void hl_warpctc_compute_loss(const real* batchInput,
|
||||
real* batchGrad,
|
||||
const int* cpuLabels,
|
||||
const int* cpuLabelLengths,
|
||||
const int* cpuInputLengths,
|
||||
const size_t numClasses,
|
||||
const size_t numSequences,
|
||||
real* cpuCosts,
|
||||
void* workspace,
|
||||
hl_warpctc_options_t* options) {
|
||||
CHECK_NOTNULL(batchInput);
|
||||
CHECK_NOTNULL(cpuLabels);
|
||||
CHECK_NOTNULL(cpuLabelLengths);
|
||||
CHECK_NOTNULL(cpuInputLengths);
|
||||
CHECK_NOTNULL(cpuCosts);
|
||||
CHECK_NOTNULL(workspace);
|
||||
CHECK_NOTNULL(options);
|
||||
|
||||
CHECK_WARPCTC(WARPCTC_COMPUTE_LOSS(batchInput,
|
||||
batchGrad,
|
||||
cpuLabels,
|
||||
cpuLabelLengths,
|
||||
cpuInputLengths,
|
||||
numClasses,
|
||||
numSequences,
|
||||
cpuCosts,
|
||||
workspace,
|
||||
*options));
|
||||
}
|
||||
|
||||
void hl_warpctc_get_workspace_size(const int* cpuLabelLengths,
|
||||
const int* cpuInputLengths,
|
||||
const size_t numClasses,
|
||||
const size_t numSequences,
|
||||
hl_warpctc_options_t* options,
|
||||
size_t* bytes) {
|
||||
CHECK_NOTNULL(cpuLabelLengths);
|
||||
CHECK_NOTNULL(cpuInputLengths);
|
||||
CHECK_NOTNULL(options);
|
||||
CHECK_NOTNULL(bytes);
|
||||
|
||||
CHECK_WARPCTC(WARPCTC_GET_WORKSPACE_SIZE(cpuLabelLengths,
|
||||
cpuInputLengths,
|
||||
numClasses,
|
||||
numSequences,
|
||||
*options,
|
||||
bytes));
|
||||
}
|
@ -0,0 +1,223 @@
|
||||
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "WarpCTCLayer.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
REGISTER_LAYER(warp_ctc, WarpCTCLayer);
|
||||
|
||||
bool WarpCTCLayer::init(const LayerMap& layerMap,
|
||||
const ParameterMap& parameterMap) {
|
||||
/* Initialize the basic parament class */
|
||||
Layer::init(layerMap, parameterMap);
|
||||
|
||||
CHECK_EQ(inputLayers_.size(), 2UL);
|
||||
|
||||
/* The inputLayers_[0] must be sequence output without softmax */
|
||||
numClasses_ = config_.size();
|
||||
CHECK_GE(numClasses_, 2UL);
|
||||
CHECK_EQ(numClasses_, inputLayers_[0]->getSize());
|
||||
|
||||
blank_ = config_.blank();
|
||||
CHECK_GE(blank_, 0UL);
|
||||
CHECK_LT(blank_, numClasses_);
|
||||
|
||||
normByTimes_ = config_.norm_by_times();
|
||||
|
||||
// We don't need sequenceStartPositions because each sample of output_ is
|
||||
// for the cost of one sequence.
|
||||
setNeedSequenceInfo(false);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void WarpCTCLayer::forward(PassType passType) {
|
||||
Layer::forward(passType);
|
||||
|
||||
const Argument& output = getInput(0);
|
||||
const Argument& labels = getInput(1);
|
||||
|
||||
CHECK(output.sequenceStartPositions);
|
||||
CHECK(labels.sequenceStartPositions);
|
||||
CHECK(labels.ids);
|
||||
|
||||
size_t numSequences = labels.sequenceStartPositions->getSize() - 1;
|
||||
CHECK_EQ(numSequences, output.sequenceStartPositions->getSize() - 1);
|
||||
|
||||
resizeOutput(numSequences, 1);
|
||||
|
||||
const int* cpuLabelStartPositions =
|
||||
labels.sequenceStartPositions->getData(false);
|
||||
const int* cpuOutputStartPositions =
|
||||
output.sequenceStartPositions->getData(false);
|
||||
|
||||
std::vector<int> cpuLabelLengths(numSequences);
|
||||
std::vector<int> cpuOutputLengths(numSequences);
|
||||
for (size_t i = 0; i < numSequences; i++) {
|
||||
cpuLabelLengths[i] =
|
||||
cpuLabelStartPositions[i + 1] - cpuLabelStartPositions[i];
|
||||
cpuOutputLengths[i] =
|
||||
cpuOutputStartPositions[i + 1] - cpuOutputStartPositions[i];
|
||||
}
|
||||
|
||||
/* Get the maximum sequence length */
|
||||
maxSequenceLength_ = 0;
|
||||
maxSequenceLength_ = *std::max_element(
|
||||
cpuOutputLengths.data(), cpuOutputLengths.data() + numSequences);
|
||||
|
||||
Matrix::resizeOrCreate(batchValue_,
|
||||
/* height */ numSequences * maxSequenceLength_,
|
||||
/* width */ numClasses_,
|
||||
/* trans */ false,
|
||||
/* useGpu */ useGpu_);
|
||||
|
||||
Matrix::resizeOrCreate(batchGrad_,
|
||||
/* height */ numSequences * maxSequenceLength_,
|
||||
/* width */ numClasses_,
|
||||
/* trans */ false,
|
||||
/* useGpu */ useGpu_);
|
||||
batchGrad_->zeroMem();
|
||||
|
||||
seq2batchPadding(output.value, batchValue_, output.sequenceStartPositions);
|
||||
|
||||
/* labels always in CPU memory */
|
||||
IVector::resizeOrCreate(cpuLabels_,
|
||||
/* size */ (labels.ids)->getSize(),
|
||||
/* useGpu */ false);
|
||||
cpuLabels_->copyFrom(*(labels.ids));
|
||||
|
||||
/* labels always in CPU memory */
|
||||
Matrix::resizeOrCreate(cpuCosts_,
|
||||
/* height */ numSequences,
|
||||
/* width */ 1,
|
||||
/* trans */ false,
|
||||
/* useGpu */ false);
|
||||
|
||||
/* Init warp-ctc options */
|
||||
hl_warpctc_options_t options;
|
||||
hl_warpctc_init(blank_, useGpu_, &options);
|
||||
|
||||
/* Get the needed workspace size */
|
||||
size_t workspaceBytes = 0;
|
||||
hl_warpctc_get_workspace_size(cpuLabelLengths.data(),
|
||||
cpuOutputLengths.data(),
|
||||
numClasses_,
|
||||
numSequences,
|
||||
&options,
|
||||
&workspaceBytes);
|
||||
CHECK_GT(workspaceBytes, 0UL);
|
||||
|
||||
size_t workspaceLength = workspaceBytes / sizeof(real) + 1;
|
||||
Vector::resizeOrCreate(workspace_,
|
||||
/* size */ workspaceLength,
|
||||
/* useGpu */ useGpu_);
|
||||
|
||||
hl_warpctc_compute_loss(batchValue_->getData(),
|
||||
batchGrad_->getData(),
|
||||
cpuLabels_->getData(),
|
||||
cpuLabelLengths.data(),
|
||||
cpuOutputLengths.data(),
|
||||
numClasses_,
|
||||
numSequences,
|
||||
cpuCosts_->getData(),
|
||||
workspace_->getData(),
|
||||
&options);
|
||||
|
||||
/* Copy the costs */
|
||||
output_.value->copyFrom(*cpuCosts_);
|
||||
}
|
||||
|
||||
void WarpCTCLayer::backward(const UpdateCallback& callback) {
|
||||
(void)callback;
|
||||
|
||||
const Argument& output = getInput(0);
|
||||
CHECK(batchGrad_);
|
||||
|
||||
batch2seqPadding(
|
||||
output.grad, batchGrad_, output.sequenceStartPositions, normByTimes_);
|
||||
}
|
||||
|
||||
void WarpCTCLayer::seq2batchPadding(const MatrixPtr& seqValue,
|
||||
MatrixPtr& batchValue,
|
||||
const ICpuGpuVectorPtr& seqStartPositions) {
|
||||
size_t numSequences = seqStartPositions->getSize() - 1;
|
||||
const int* seqStartPositionsData = seqStartPositions->getData(useGpu_);
|
||||
|
||||
real* seqData = seqValue->getData();
|
||||
real* batchData = batchValue->getData();
|
||||
if (useGpu_) {
|
||||
hl_sequence2batch_copy_padding(batchData,
|
||||
seqData,
|
||||
seqStartPositionsData,
|
||||
numClasses_,
|
||||
maxSequenceLength_,
|
||||
numSequences,
|
||||
false,
|
||||
true);
|
||||
} else {
|
||||
for (size_t i = 0; i < maxSequenceLength_; i++) {
|
||||
for (size_t j = 0; j < numSequences; j++) {
|
||||
size_t sequenceStart = seqStartPositionsData[j];
|
||||
size_t sequenceLength =
|
||||
seqStartPositionsData[j + 1] - seqStartPositionsData[j];
|
||||
if (i < sequenceLength) {
|
||||
memcpy(batchData + (i * numSequences + j) * numClasses_,
|
||||
seqData + (sequenceStart + i) * numClasses_,
|
||||
numClasses_ * sizeof(real));
|
||||
} else {
|
||||
memset(batchData + (i * numSequences + j) * numClasses_,
|
||||
0,
|
||||
numClasses_ * sizeof(real));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void WarpCTCLayer::batch2seqPadding(const MatrixPtr& seqValue,
|
||||
MatrixPtr& batchValue,
|
||||
const ICpuGpuVectorPtr& seqStartPositions,
|
||||
bool normByTimes) {
|
||||
size_t numSequences = seqStartPositions->getSize() - 1;
|
||||
const int* seqStartPositionsData = seqStartPositions->getData(useGpu_);
|
||||
|
||||
real* seqData = seqValue->getData();
|
||||
real* batchData = batchValue->getData();
|
||||
if (useGpu_) {
|
||||
hl_sequence2batch_copy_padding(batchData,
|
||||
seqData,
|
||||
seqStartPositionsData,
|
||||
numClasses_,
|
||||
maxSequenceLength_,
|
||||
numSequences,
|
||||
normByTimes,
|
||||
false);
|
||||
} else {
|
||||
for (size_t i = 0; i < numSequences; i++) {
|
||||
int sequenceStart = seqStartPositionsData[i];
|
||||
int sequenceLength =
|
||||
seqStartPositionsData[i + 1] - seqStartPositionsData[i];
|
||||
real scale = normByTimes ? (1.0f / (real)sequenceLength) : 1.0f;
|
||||
for (int j = 0; j < sequenceLength; j++) {
|
||||
for (size_t k = 0; k < numClasses_; k++) {
|
||||
seqData[(sequenceStart + j) * numClasses_ + k] =
|
||||
batchData[(j * numSequences + i) * numClasses_ + k] * scale;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace paddle
|
@ -0,0 +1,65 @@
|
||||
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "Layer.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
/**
|
||||
* @brief A layer integrating the open-source warp-ctc library
|
||||
* <https://github.com/baidu-research/warp-ctc> to compute connectionist
|
||||
* temporal classification cost.
|
||||
*
|
||||
* The config file api is warp_ctc_layer.
|
||||
*/
|
||||
class WarpCTCLayer : public Layer {
|
||||
public:
|
||||
explicit WarpCTCLayer(const LayerConfig& config) : Layer(config) {}
|
||||
~WarpCTCLayer() {}
|
||||
|
||||
virtual bool init(const LayerMap& layerMap, const ParameterMap& parameterMap);
|
||||
virtual void forward(PassType passType);
|
||||
virtual void backward(const UpdateCallback& callback);
|
||||
|
||||
protected:
|
||||
/**
|
||||
* sequence matrix and batch matrix copy:
|
||||
* sequence (s0, s0, s0, s0; s1, s1; s2, s2, s2; s3)
|
||||
* batch (s0, s1, s2, s3; s0, s1, s2, 0; s0, 0, s2, 0; s0, 0, 0, 0)
|
||||
*/
|
||||
void seq2batchPadding(const MatrixPtr& seqValue,
|
||||
MatrixPtr& batchValue,
|
||||
const ICpuGpuVectorPtr& seqStartPositions);
|
||||
void batch2seqPadding(const MatrixPtr& seqValue,
|
||||
MatrixPtr& batchValue,
|
||||
const ICpuGpuVectorPtr& seqStartPositions,
|
||||
bool normByTimes);
|
||||
|
||||
protected:
|
||||
size_t numClasses_;
|
||||
size_t blank_;
|
||||
size_t maxSequenceLength_;
|
||||
bool normByTimes_;
|
||||
|
||||
MatrixPtr batchValue_;
|
||||
MatrixPtr batchGrad_;
|
||||
VectorPtr workspace_;
|
||||
|
||||
IVectorPtr cpuLabels_;
|
||||
MatrixPtr cpuCosts_;
|
||||
};
|
||||
|
||||
} // namespace paddle
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
WORK_DIR=$PWD
|
||||
PROJ_ROOT=$(git rev-parse --show-cdup)
|
||||
SUBMODULES=$(grep path ${PROJ_ROOT}.gitmodules | sed 's/^.*path = //')
|
||||
|
||||
for module in $SUBMODULES
|
||||
do
|
||||
case $module in
|
||||
"warp-ctc")
|
||||
if [ -d ${PROJ_ROOT}warp-ctc/build ]; then
|
||||
rm -rf ${PROJ_ROOT}warp-ctc/build
|
||||
fi
|
||||
mkdir ${PROJ_ROOT}warp-ctc/build
|
||||
cd ${PROJ_ROOT}warp-ctc/build
|
||||
cmake ..; make
|
||||
;;
|
||||
esac
|
||||
done
|
||||
cd $WORK_DIR
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue