diff --git a/.gitignore b/.gitignore index 7480bd53a4..020d3f0c30 100644 --- a/.gitignore +++ b/.gitignore @@ -28,4 +28,3 @@ cmake_install.cmake paddle/.timestamp python/paddlepaddle.egg-info/ paddle/pybind/pybind.h -python/paddle/v2/framework/tests/tmp/* diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake index 05d83ad58e..324e29f931 100644 --- a/cmake/external/openblas.cmake +++ b/cmake/external/openblas.cmake @@ -98,7 +98,7 @@ IF(NOT ${CBLAS_FOUND}) ENDIF() INSTALL(CODE "execute_process( COMMAND ${CMAKE_COMMAND} -E copy_directory ${CBLAS_INSTALL_DIR}/lib - destination ${CMAKE_INSTALL_PREFIX}/${TMP_INSTALL_DIR} + ${CMAKE_INSTALL_PREFIX}/${TMP_INSTALL_DIR} )" ) INSTALL(CODE "MESSAGE(STATUS \"Installing: \" diff --git a/doc/design/evaluator.md b/doc/design/evaluator.md new file mode 100644 index 0000000000..a62d75ffef --- /dev/null +++ b/doc/design/evaluator.md @@ -0,0 +1,58 @@ +## Evaluator Design + +### The Problem + +During training or serving, we provide the evaluation function to measure the model performance, e.g., accuracy, precision. In the operator based framework design, the data go through the network pipeline batch by batch. As a result, inside the operator, we only can calculate one minibatch metrics. We need to provide a mechanism to calculate the metrics for each N pass/batch the user wanted. + +### Evaluator Design +Currently, every operation is expressed in the graph. we divide the evaluator process into three steps. + +1. Initialize the metric state and add it into the block. + +2. Calculate the statistic of the metric state in every mini-batch. The single operator is only responsible for calculating necessary statistics for one mini-batch. For example, accuracy operator only calculate a minibatch data if run once. + + +3. Merge the mini-batch statistics to form the evaluation result for multiple mini-batches. When it comes to distributed training/Multi-GPU training, aggregate the value from different devices. + +### Implementation +This design is shown in python API. +Each metric operator need to caculate the metric statistic and return the batch aware states, Python side responsible for accumulate the states for each pass. + + +```python +class Evaluator(object): + """ + Evaluator Base class. + """ + def __init__(self, name, **kwargs): + """ + Different evaluator may has different metric states. E.g, Accuracy need two variables, total and right sample counts. + Auc need four variables, `true_positives`, + `true_negatives`, `false_positives` and `false_negatives`. So every evaluator should create its needed variables and append to main_program + + The initialization of Evaluator should be responsible for: + create metric states and append to the main_program + """ + pass + + def _update_ops(self, input, label, **kwargs) + """ + Add mini-batch evaluator caculate operators to the main_program. + Add increment operator to accumulate the metric states. + """ + + + def reset(self, executor, reset_program=None): + """ + Reset metric states at the begin of each pass/user specified batch number. + Execute the reset_program to reset the states. + """ + + + def eval(self, executor, eval_program=None): + """ + Merge the mini-batch statistics to form the evaluation result for multiple mini-batches. + Execute the eval_program and return the result. + """ + return eval_result +``` diff --git a/paddle/capi/Matrix.cpp b/paddle/capi/Matrix.cpp index 53a36f8f20..d5b55e1c95 100644 --- a/paddle/capi/Matrix.cpp +++ b/paddle/capi/Matrix.cpp @@ -121,6 +121,7 @@ paddle_error paddle_matrix_get_shape(paddle_matrix mat, paddle_matrix paddle_matrix_create_sparse( uint64_t height, uint64_t width, uint64_t nnz, bool isBinary, bool useGpu) { +#ifndef PADDLE_MOBILE_INFERENCE auto ptr = new paddle::capi::CMatrix(); ptr->mat = paddle::Matrix::createSparseMatrix( height, @@ -131,6 +132,9 @@ paddle_matrix paddle_matrix_create_sparse( false, useGpu); return ptr; +#else + return nullptr; +#endif } paddle_error paddle_matrix_sparse_copy_from(paddle_matrix mat, @@ -140,6 +144,7 @@ paddle_error paddle_matrix_sparse_copy_from(paddle_matrix mat, uint64_t colSize, float* valueArray, uint64_t valueSize) { +#ifndef PADDLE_MOBILE_INFERENCE if (mat == nullptr) return kPD_NULLPTR; auto ptr = cast(mat); if (rowArray == nullptr || colArray == nullptr || @@ -160,4 +165,7 @@ paddle_error paddle_matrix_sparse_copy_from(paddle_matrix mat, } else { return kPD_NOT_SUPPORTED; } +#else + return kPD_NOT_SUPPORTED; +#endif } diff --git a/paddle/capi/matrix.h b/paddle/capi/matrix.h index bb5223f8a2..01b8bad2ee 100644 --- a/paddle/capi/matrix.h +++ b/paddle/capi/matrix.h @@ -48,6 +48,7 @@ PD_API paddle_matrix paddle_matrix_create(uint64_t height, * @param isBinary is binary (either 1 or 0 in matrix) or not. * @param useGpu is using GPU or not. * @return paddle_matrix. + * @note Mobile inference does not support this interface. */ PD_API paddle_matrix paddle_matrix_create_sparse( uint64_t height, uint64_t width, uint64_t nnz, bool isBinary, bool useGpu); @@ -129,6 +130,7 @@ PD_API paddle_error paddle_matrix_get_shape(paddle_matrix mat, * NULL if the matrix is binary. * @param [in] valueSize length of value array. Zero if the matrix is binary. * @return paddle_error + * @note Mobile inference does not support this interface. */ PD_API paddle_error paddle_matrix_sparse_copy_from(paddle_matrix mat, int* rowArray, diff --git a/paddle/cuda/CMakeLists.txt b/paddle/cuda/CMakeLists.txt index 0865b02c4f..efd1b7a73e 100755 --- a/paddle/cuda/CMakeLists.txt +++ b/paddle/cuda/CMakeLists.txt @@ -27,7 +27,9 @@ if(WITH_GPU) set_source_files_properties(${CUDA_CXX_SOURCES} PROPERTIES COMPILE_FLAGS "-D__NVCC__") else() + if (NOT MOBILE_INFERENCE) set(CUDA_CXX_SOURCES src/hl_warpctc_wrap.cc) + endif() endif() set(CUDA_CU_SOURCES diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 1afc524208..c08e844847 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -38,9 +38,9 @@ py_proto_compile(framework_py_proto SRCS framework.proto) add_custom_target(framework_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py) add_dependencies(framework_py_proto framework_py_proto_init) add_custom_command(TARGET framework_py_proto POST_BUILD - COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SOURCE_DIR}/python/paddle/v2/framework/proto - COMMAND cp *.py ${PADDLE_SOURCE_DIR}/python/paddle/v2/framework/proto/ - COMMENT "Copy generated python proto into directory paddle/v2/framework/proto." + COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/proto + COMMAND cp *.py ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/proto/ + COMMENT "Copy generated python proto into directory paddle/v2/fluid/proto." WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) cc_library(backward SRCS backward.cc DEPS net_op) diff --git a/paddle/gserver/CMakeLists.txt b/paddle/gserver/CMakeLists.txt index 5f39167afc..91d732641a 100644 --- a/paddle/gserver/CMakeLists.txt +++ b/paddle/gserver/CMakeLists.txt @@ -85,9 +85,49 @@ if(MOBILE_INFERENCE) gradientmachines/GradientMachineMode.cpp gradientmachines/MultiGradientMachine.cpp) - # Remove useless layers + # Remove layers that used in training list(REMOVE_ITEM GSERVER_SOURCES - layers/RecurrentLayerGroup.cpp) + layers/RecurrentLayerGroup.cpp + layers/CostLayer.cpp + layers/MultiBoxLossLayer.cpp + layers/WarpCTCLayer.cpp + layers/CTCLayer.cpp + layers/LinearChainCTC.cpp + layers/PrintLayer.cpp) + list(REMOVE_ITEM GSERVER_SOURCES + layers/OuterProdLayer.cpp + layers/SumToOneNormLayer.cpp + layers/ConvShiftLayer.cpp + layers/InterpolationLayer.cpp + layers/AgentLayer.cpp + layers/DotMulOperator.cpp + layers/GruStepLayer.cpp + layers/LstmStepLayer.cpp + layers/ConvexCombinationLayer.cpp + layers/Conv3DLayer.cpp + layers/DeConv3DLayer.cpp + layers/CropLayer.cpp + layers/CrossEntropyOverBeam.cpp + layers/DataNormLayer.cpp + layers/FeatureMapExpandLayer.cpp + layers/HierarchicalSigmoidLayer.cpp + layers/MultinomialSampler.cpp + layers/NCELayer.cpp + layers/KmaxSeqScoreLayer.cpp + layers/MDLstmLayer.cpp + layers/MultiplexLayer.cpp + layers/PadLayer.cpp + layers/Pool3DLayer.cpp + layers/ResizeLayer.cpp + layers/RotateLayer.cpp + layers/RowConvLayer.cpp + layers/RowL2NormLayer.cpp + layers/SamplingIdLayer.cpp + layers/ScaleShiftLayer.cpp + layers/SelectiveFullyConnectedLayer.cpp + layers/SpatialPyramidPoolLayer.cpp + layers/BilinearInterpLayer.cpp + layers/ClipLayer.cpp) endif() if(WITH_GPU) diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.cpp b/paddle/gserver/gradientmachines/NeuralNetwork.cpp index dbadc352a4..be112b4123 100644 --- a/paddle/gserver/gradientmachines/NeuralNetwork.cpp +++ b/paddle/gserver/gradientmachines/NeuralNetwork.cpp @@ -16,7 +16,6 @@ limitations under the License. */ #include "NeuralNetwork.h" #include "hl_gpu.h" -#include "paddle/gserver/layers/AgentLayer.h" #include "paddle/utils/CustomStackTrace.h" #include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" @@ -28,6 +27,7 @@ limitations under the License. */ #ifndef PADDLE_MOBILE_INFERENCE #include "MultiNetwork.h" #include "RecurrentGradientMachine.h" +#include "paddle/gserver/layers/AgentLayer.h" #endif namespace paddle { @@ -192,9 +192,11 @@ void NeuralNetwork::init(const ModelConfig& config, void NeuralNetwork::connect(LayerPtr agentLayer, LayerPtr realLayer, int height) { +#ifndef PADDLE_MOBILE_INFERENCE AgentLayer* agent = dynamic_cast(agentLayer.get()); CHECK_NOTNULL(agent); agent->setRealLayer(realLayer, height); +#endif } void NeuralNetwork::connect(std::string agentLayerName, diff --git a/paddle/gserver/layers/Layer.cpp b/paddle/gserver/layers/Layer.cpp index 01f2aae6cf..b55b86221c 100644 --- a/paddle/gserver/layers/Layer.cpp +++ b/paddle/gserver/layers/Layer.cpp @@ -98,6 +98,7 @@ ClassRegistrar Layer::registrar_; LayerPtr Layer::create(const LayerConfig& config) { std::string type = config.type(); +#ifndef PADDLE_MOBILE_INFERENCE // NOTE: As following types have illegal character '-', // they can not use REGISTER_LAYER to registrar. // Besides, to fit with old training models, @@ -106,7 +107,6 @@ LayerPtr Layer::create(const LayerConfig& config) { return LayerPtr(new MultiClassCrossEntropy(config)); else if (type == "rank-cost") return LayerPtr(new RankingCost(config)); -#ifndef PADDLE_MOBILE_INFERENCE else if (type == "auc-validation") return LayerPtr(new AucValidation(config)); else if (type == "pnpair-validation") diff --git a/paddle/gserver/layers/ROIPoolLayer.cpp b/paddle/gserver/layers/ROIPoolLayer.cpp index 35d4b12d3d..02402894d3 100644 --- a/paddle/gserver/layers/ROIPoolLayer.cpp +++ b/paddle/gserver/layers/ROIPoolLayer.cpp @@ -100,8 +100,9 @@ void ROIPoolLayer::forward(PassType passType) { size_t roiEndH = round(bottomROIs[4] * spatialScale_); CHECK_GE(roiBatchIdx, 0UL); CHECK_LT(roiBatchIdx, batchSize); - size_t roiHeight = std::max(roiEndH - roiStartH + 1, 1UL); - size_t roiWidth = std::max(roiEndW - roiStartW + 1, 1UL); + size_t roiHeight = + std::max(roiEndH - roiStartH + 1, static_cast(1)); + size_t roiWidth = std::max(roiEndW - roiStartW + 1, static_cast(1)); real binSizeH = static_cast(roiHeight) / static_cast(pooledHeight_); real binSizeW = @@ -114,10 +115,14 @@ void ROIPoolLayer::forward(PassType passType) { size_t wstart = static_cast(std::floor(pw * binSizeW)); size_t hend = static_cast(std::ceil((ph + 1) * binSizeH)); size_t wend = static_cast(std::ceil((pw + 1) * binSizeW)); - hstart = std::min(std::max(hstart + roiStartH, 0UL), height_); - wstart = std::min(std::max(wstart + roiStartW, 0UL), width_); - hend = std::min(std::max(hend + roiStartH, 0UL), height_); - wend = std::min(std::max(wend + roiStartW, 0UL), width_); + hstart = std::min( + std::max(hstart + roiStartH, static_cast(0)), height_); + wstart = std::min( + std::max(wstart + roiStartW, static_cast(0)), width_); + hend = std::min(std::max(hend + roiStartH, static_cast(0)), + height_); + wend = std::min(std::max(wend + roiStartW, static_cast(0)), + width_); bool isEmpty = (hend <= hstart) || (wend <= wstart); size_t poolIndex = ph * pooledWidth_ + pw; diff --git a/paddle/gserver/tests/CMakeLists.txt b/paddle/gserver/tests/CMakeLists.txt index e24d423b9e..4bea348f63 100644 --- a/paddle/gserver/tests/CMakeLists.txt +++ b/paddle/gserver/tests/CMakeLists.txt @@ -1,9 +1,12 @@ # gserver pacakge unittests add_simple_unittest(test_LinearChainCRF) -add_simple_unittest(test_MultinomialSampler) add_simple_unittest(test_RecurrentLayer) +if(NOT MOBILE_INFERENCE) + add_simple_unittest(test_MultinomialSampler) +endif() + function(gserver_test TARGET) add_unittest_without_exec(${TARGET} ${TARGET}.cpp @@ -49,7 +52,7 @@ if(WITH_PYTHON) endif() ############### test_WarpCTCLayer ####################### -if(NOT WITH_DOUBLE) +if(NOT WITH_DOUBLE AND NOT MOBILE_INFERENCE) add_unittest_without_exec(test_WarpCTCLayer test_WarpCTCLayer.cpp) diff --git a/paddle/math/BaseMatrix.cu b/paddle/math/BaseMatrix.cu index 53dd538360..e3eff59dc5 100644 --- a/paddle/math/BaseMatrix.cu +++ b/paddle/math/BaseMatrix.cu @@ -1902,5 +1902,52 @@ void BaseMatrixT::sumOfProducts(BaseMatrixT& b, } template class BaseMatrixT; + +#ifndef PADDLE_MOBILE_INFERENCE + template class BaseMatrixT; + +#else + +template <> +void BaseMatrixT::zero() { + applyUnary(unary::Zero()); +} + +template <> +void BaseMatrixT::assign(int p) { + applyUnary(unary::Assign(p)); +} + +template <> +void BaseMatrixT::isEqualTo(BaseMatrixT& b, int value) { + applyBinary(binary::IsEqual(value), b); +} + +template <> +void BaseMatrixT::neg() { + applyUnary(unary::Neg()); +} + +template <> +void BaseMatrixT::abs2() { + applyUnary(unary::Abs()); +} + +template <> +void BaseMatrixT::add(int p) { + applyUnary(unary::Add(p)); +} + +template <> +void BaseMatrixT::add(int p1, int p2) { + applyUnary(unary::Add2(p1, p2)); +} + +template <> +void BaseMatrixT::applyL1(int learningRate, int decayRate) { + applyUnary(unary::ApplyL1(learningRate * decayRate)); +} + +#endif } // namespace paddle diff --git a/paddle/math/CMakeLists.txt b/paddle/math/CMakeLists.txt index 68b5296228..86bb270a43 100644 --- a/paddle/math/CMakeLists.txt +++ b/paddle/math/CMakeLists.txt @@ -25,6 +25,19 @@ else() message(STATUS "Compile with MKLDNNMatrix") endif() +if(MOBILE_INFERENCE) + list(REMOVE_ITEM MATH_SOURCES + ${CMAKE_CURRENT_SOURCE_DIR}/SIMDFunctions.cpp) + # Remove sparse + list(REMOVE_ITEM MATH_HEADERS + ${CMAKE_CURRENT_SOURCE_DIR}/CpuSparseMatrix.h + ${CMAKE_CURRENT_SOURCE_DIR}/SparseMatrix.h + ${CMAKE_CURRENT_SOURCE_DIR}/SparseRowMatrix.h) + list(REMOVE_ITEM MATH_SOURCES + ${CMAKE_CURRENT_SOURCE_DIR}/CpuSparseMatrix.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/SparseMatrix.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/SparseRowMatrix.cpp) +endif() set(MATH_SOURCES "${PADDLE_SOURCE_DIR}/paddle/math/BaseMatrix.cu" "${PADDLE_SOURCE_DIR}/paddle/math/TrainingAlgorithmOp.cu" diff --git a/paddle/math/CpuSparseMatrix.h b/paddle/math/CpuSparseMatrix.h index 36d57bbb65..aad1348353 100644 --- a/paddle/math/CpuSparseMatrix.h +++ b/paddle/math/CpuSparseMatrix.h @@ -13,6 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once + +#ifndef PADDLE_MOBILE_INFERENCE + #include #include "Matrix.h" @@ -309,3 +312,57 @@ private: using Matrix::subMatrix; }; } // namespace paddle + +#else + +#include "Matrix.h" + +namespace paddle { + +class CpuSparseMatrix : public Matrix { +public: + CpuSparseMatrix(size_t height, + size_t width, + size_t nnz, /* used to allocate space */ + SparseValueType valueType = FLOAT_VALUE, + SparseFormat format = SPARSE_CSR, + bool trans = false) + : Matrix(NULL, height, width, trans, false) {} + + CpuSparseMatrix(real* data, + int* rows, + int* cols, + size_t height, + size_t width, + size_t nnz, + SparseValueType valueType, + SparseFormat format, + bool trans) + : Matrix(NULL, height, width, trans, false) {} + + real* getValue() const { return nullptr; } + size_t getColStartIdx(size_t i) const { return 0; } + size_t getRowStartIdx(size_t i) const { return 0; } + size_t getColNum(size_t i) const { return 0; } + int* getRowCols(size_t i) const { return nullptr; } + + CpuSparseMatrixPtr getTmpSparseMatrix(size_t height, size_t width) { + return nullptr; + } + + void resize(size_t newHeight, + size_t newWidth, + size_t newNnz, /* used to allocate space */ + SparseValueType valueType, + SparseFormat format) {} + void resize(size_t newHeight, size_t newWidth) {} + MatrixPtr getTranspose() { return nullptr; } + void setRow(size_t row, + size_t colNum, + const unsigned int* cols, + const real* values) {} +}; + +} // namespace paddle + +#endif diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index 41ee508967..88e9180690 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -451,6 +451,7 @@ void GpuMatrix::addSharedBias(Matrix& b, real scale) { } void GpuMatrix::collectBias(Matrix& a, real scale) { +#ifdef PADDLE_WITH_CUDA CHECK_EQ(getHeight(), (size_t)1); CHECK_EQ(width_, a.getWidth()); GpuSparseMatrix* sMatPtr = dynamic_cast(&a); @@ -461,6 +462,7 @@ void GpuMatrix::collectBias(Matrix& a, real scale) { hl_sparse_matrix_s A_d = sMatPtr->sMatrix_.get(); hl_sparse_matrix_column_sum(data, A_d, sMatPtr->getHeight(), width_, scale); } +#endif } void GpuMatrix::collectSharedBias(Matrix& a, real scale) { @@ -552,6 +554,7 @@ void GpuMatrix::mul(const GpuSparseMatrix& a, const GpuMatrix& b, real scaleAB, real scaleT) { +#ifdef PADDLE_WITH_CUDA CHECK(isContiguous()); CHECK(b.isContiguous()); CHECK(b.useGpu_ == true) << "Matrix type are not equal"; @@ -578,12 +581,14 @@ void GpuMatrix::mul(const GpuSparseMatrix& a, b.height_, scaleAB, scaleT); +#endif } void GpuMatrix::mul(const GpuMatrix& a, const GpuSparseMatrix& b, real scaleAB, real scaleT) { +#ifdef PADDLE_WITH_CUDA CHECK(isContiguous()); CHECK(a.isContiguous()); CHECK(a.useGpu_ == true) << "Matrix type are not equal"; @@ -622,6 +627,7 @@ void GpuMatrix::mul(const GpuMatrix& a, scaleAB, scaleT); } +#endif } /* this = a*b */ @@ -1557,6 +1563,7 @@ void GpuMatrix::bilinearBackward(const Matrix& out, } void GpuMatrix::multiBinaryLabelCrossEntropy(Matrix& output, Matrix& label) { +#ifdef PADDLE_WITH_CUDA GpuMatrix* outputPtr = dynamic_cast(&output); auto labelPtr = dynamic_cast(&label); @@ -1572,9 +1579,11 @@ void GpuMatrix::multiBinaryLabelCrossEntropy(Matrix& output, Matrix& label) { hl_sparse_matrix_s mat_d = labelPtr->sMatrix_.get(); hl_matrix_multi_binary_cross_entropy( output_d, entropy_d, mat_d, height_, outputPtr->width_); +#endif } void GpuMatrix::multiBinaryLabelCrossEntropyBp(Matrix& output, Matrix& label) { +#ifdef PADDLE_WITH_CUDA GpuMatrix* outputPtr = dynamic_cast(&output); auto labelPtr = dynamic_cast(&label); @@ -1590,6 +1599,7 @@ void GpuMatrix::multiBinaryLabelCrossEntropyBp(Matrix& output, Matrix& label) { hl_sparse_matrix_s mat_d = labelPtr->sMatrix_.get(); hl_matrix_multi_binary_cross_entropy_bp( output_d, grad_d, mat_d, height_, width_); +#endif } void GpuMatrix::vol2Col(real* dataSrc, @@ -3255,6 +3265,7 @@ template void CpuMatrix::mul(CpuSparseMatrix* a, real scaleAB, real scaleT); +#ifndef PADDLE_MOBILE_INFERENCE void SharedCpuMatrix::mul(CpuSparseMatrix* a, CpuMatrix* b, real scaleAB, @@ -3383,6 +3394,7 @@ void SharedCpuMatrix::initBlock(int blockNum) { } } +#endif /* Add a (column) vector b to matrix a, column by column */ void CpuMatrix::addColumnVector(const Matrix& b) { BaseMatrix::addColVector(const_cast(b)); diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h index d252d64225..e273f11236 100644 --- a/paddle/math/Matrix.h +++ b/paddle/math/Matrix.h @@ -2070,6 +2070,7 @@ public: class SharedCpuMatrix : public CpuMatrix { public: +#ifndef PADDLE_MOBILE_INFERENCE /* blockNum is number of partitions of the matrix */ SharedCpuMatrix(int blockNum, size_t height, size_t width, bool trans = false) : CpuMatrix(height, width, trans) { @@ -2115,6 +2116,7 @@ private: ThreadLocal localBuf_; ThreadLocal> localBufRows_; ThreadLocal> blockSeq_; +#endif }; typedef struct { unsigned int col; } sparse_non_value_t; diff --git a/paddle/math/SparseMatrix.h b/paddle/math/SparseMatrix.h index 16300db081..e0a3c6d228 100644 --- a/paddle/math/SparseMatrix.h +++ b/paddle/math/SparseMatrix.h @@ -13,6 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once + +#ifndef PADDLE_MOBILE_INFERENCE + #include #include "CpuSparseMatrix.h" #include "Matrix.h" @@ -237,3 +240,47 @@ private: }; } // namespace paddle + +#else + +#include "CpuSparseMatrix.h" + +namespace paddle { + +class GpuSparseMatrix : public Matrix { +public: + GpuSparseMatrix(size_t height, + size_t width, + size_t nnz, /* used to allocate space */ + SparseValueType valueType = FLOAT_VALUE, + SparseFormat format_ = SPARSE_CSR, + bool trans = false) + : Matrix(NULL, height, width, trans, false) {} + + GpuSparseMatrix(real* value, + int* rows, + int* cols, + size_t height, + size_t width, + size_t nnz, + SparseValueType valueType, + SparseFormat format, + bool trans) + : Matrix(NULL, height, width, trans, true) {} + + void resize(size_t newHeight, + size_t newWidth, + size_t newNnz, /* used to allocate space */ + SparseValueType valueType, + SparseFormat format) {} + void resize(size_t newHeight, size_t newWidth) {} + MatrixPtr getTranspose() { return nullptr; } + void setRow(size_t row, + size_t colNum, + const unsigned int* cols, + const real* values) {} +}; + +} // namespace paddle + +#endif diff --git a/paddle/math/SparseRowMatrix.h b/paddle/math/SparseRowMatrix.h index 8704eb038d..ca7a6806da 100644 --- a/paddle/math/SparseRowMatrix.h +++ b/paddle/math/SparseRowMatrix.h @@ -14,6 +14,8 @@ limitations under the License. */ #pragma once +#ifndef PADDLE_MOBILE_INFERENCE + #include #include #include @@ -313,3 +315,27 @@ private: }; } // namespace paddle + +#else +namespace paddle { + +class SparseRowCpuMatrix : public CpuMatrix { +public: + void reserveStore() {} + void clearIndices() {} +}; + +class SparsePrefetchRowCpuMatrix : public SparseRowCpuMatrix { +public: + void setupIndices() {} + void addRows(MatrixPtr input) {} + void addRows(IVectorPtr ids) {} +}; + +class SparseAutoGrowRowCpuMatrix : public SparseRowCpuMatrix {}; +class CacheRowCpuMatrix : public SparseAutoGrowRowCpuMatrix {}; +class SparseRowIdsCpuMatrix : public CpuMatrix {}; + +} // namespace paddle + +#endif diff --git a/paddle/math/tests/CMakeLists.txt b/paddle/math/tests/CMakeLists.txt index ceb96b2e25..d8b7f9e3fc 100644 --- a/paddle/math/tests/CMakeLists.txt +++ b/paddle/math/tests/CMakeLists.txt @@ -3,8 +3,10 @@ add_simple_unittest(test_ExecViaCpu) add_simple_unittest(test_SIMDFunctions) add_simple_unittest(test_TrainingAlgorithm) -add_simple_unittest(test_SparseMatrix) add_simple_unittest(test_RowBuffer) +if(NOT MOBILE_INFERENCE) + add_simple_unittest(test_SparseMatrix) +endif() # TODO(yuyang18): Refactor TestUtil.cpp. Remove this cross module reference. add_unittest(test_matrixCompare diff --git a/paddle/operators/accuracy_op.cc b/paddle/operators/accuracy_op.cc index 03c2fa945d..2785a8c6fb 100644 --- a/paddle/operators/accuracy_op.cc +++ b/paddle/operators/accuracy_op.cc @@ -30,6 +30,10 @@ class AccuracyOp : public framework::OperatorWithKernel { "Input (Label) of accuracy op should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Accuracy"), "Output (Accuracy) of AccuracyOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Correct"), + "Output (Correct) of AccuracyOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Total"), + "Output (Total) of AccuracyOp should not be null."); auto inference_dim = ctx->GetInputDim("Out"); auto label_dim = ctx->GetInputDim("Label"); @@ -43,6 +47,8 @@ class AccuracyOp : public framework::OperatorWithKernel { " the same as label."); ctx->SetOutputDim("Accuracy", {1}); + ctx->SetOutputDim("Correct", {1}); + ctx->SetOutputDim("Total", {1}); ctx->ShareLoD("Out", /*->*/ "Accuracy"); } @@ -66,6 +72,8 @@ class AccuracyOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("Label", "Label of the training data"); // TODO(typhoonzero): AddInput("Weight", ... AddOutput("Accuracy", "The accuracy of current batch"); + AddOutput("Correct", "The correct samples count of current batch"); + AddOutput("Total", "The samples count of current batch"); AddComment(R"DOC( Accuracy Operator. diff --git a/paddle/operators/accuracy_op.cu b/paddle/operators/accuracy_op.cu index 1776f33105..b575c682f0 100644 --- a/paddle/operators/accuracy_op.cu +++ b/paddle/operators/accuracy_op.cu @@ -24,7 +24,8 @@ using platform::PADDLE_CUDA_NUM_THREADS; template __global__ void AccuracyCudaKernel(const int N, const int D, const int64_t* Xdata, - const int64_t* labeldata, float* accuracy) { + const int64_t* labeldata, int* correct_data, + float* accuracy) { int count = 0; __shared__ int total[BlockSize]; @@ -43,6 +44,7 @@ __global__ void AccuracyCudaKernel(const int N, const int D, // reduce the count with init value 0, and output accuracy. int result = thrust::reduce(thrust::device, total, total + BlockSize, 0); if (threadIdx.x == 0) { + *correct_data = result; *accuracy = static_cast(result) / static_cast(N); } } @@ -56,31 +58,48 @@ class AccuracyOpCUDAKernel : public framework::OpKernel { auto* inference = ctx.Input("Out"); auto* indices = ctx.Input("Indices"); auto* label = ctx.Input("Label"); + auto* accuracy = ctx.Output("Accuracy"); + auto* correct = ctx.Output("Correct"); + auto* total = ctx.Output("Total"); // FIXME(typhoonzero): only support indices currently // if add support for output values, how to detect the data type? const int64_t* indices_data = indices->data(); const int64_t* label_data = label->data(); + + int* correct_data = correct->mutable_data(ctx.GetPlace()); + int* total_data = total->mutable_data(ctx.GetPlace()); float* accuracy_data = accuracy->mutable_data(ctx.GetPlace()); - size_t num_samples = inference->dims()[0]; + int num_samples = static_cast(inference->dims()[0]); size_t infer_width = inference->dims()[1]; PADDLE_ENFORCE(cudaMemset(accuracy_data, 0, sizeof(float))); + // cudaMemset((void**)&correct_data, 0, sizeof(float)); if (num_samples == 0) { return; } + cudaMemcpy(total_data, &num_samples, sizeof(int), cudaMemcpyHostToDevice); AccuracyCudaKernel<<< 1, PADDLE_CUDA_NUM_THREADS, 0, ctx.cuda_device_context().stream()>>>( - num_samples, infer_width, indices_data, label_data, accuracy_data); + num_samples, infer_width, indices_data, label_data, correct_data, + accuracy_data); + + int d_num_samples, d_num_correct; + float d_accuracy; + cudaMemcpy(&d_num_correct, correct_data, sizeof(int), + cudaMemcpyDeviceToHost); + cudaMemcpy(&d_num_samples, total_data, sizeof(int), cudaMemcpyDeviceToHost); + cudaMemcpy(&d_accuracy, accuracy_data, sizeof(float), + cudaMemcpyDeviceToHost); } }; } // namespace operators } // namespace paddle -// FIXME(typhoonzero): types of T is for infernece data. -// label data is always int +// FIXME(typhoonzero): types of T is for inference data. +// label data is always int64 REGISTER_OP_GPU_KERNEL(accuracy, paddle::operators::AccuracyOpCUDAKernel, paddle::operators::AccuracyOpCUDAKernel); diff --git a/paddle/operators/accuracy_op.h b/paddle/operators/accuracy_op.h index 28dbc77f64..d060e6eddd 100644 --- a/paddle/operators/accuracy_op.h +++ b/paddle/operators/accuracy_op.h @@ -29,7 +29,11 @@ class AccuracyKernel : public framework::OpKernel { auto* indices = ctx.Input("Indices"); auto* label = ctx.Input("Label"); auto* accuracy = ctx.Output("Accuracy"); + auto* correct = ctx.Output("Correct"); + auto* total = ctx.Output("Total"); + int* correct_data = correct->mutable_data(ctx.GetPlace()); + int* total_data = total->mutable_data(ctx.GetPlace()); float* accuracy_data = accuracy->mutable_data(ctx.GetPlace()); const int64_t* indices_data = indices->data(); @@ -55,7 +59,8 @@ class AccuracyKernel : public framework::OpKernel { } } - // FIXME(typhoonzero): we don't accumulate the accuracy for now. + *correct_data = num_correct; + *total_data = num_samples; *accuracy_data = static_cast(num_correct) / static_cast(num_samples); } diff --git a/paddle/operators/compare_op.cc b/paddle/operators/compare_op.cc index 716b5ee92d..bf7e883681 100644 --- a/paddle/operators/compare_op.cc +++ b/paddle/operators/compare_op.cc @@ -94,5 +94,13 @@ class CompareOp : public framework::OperatorWithKernel { REGISTER_LOGICAL_OP(less_than, "Out = X < Y"); REGISTER_LOGICAL_KERNEL(less_than, CPU, paddle::operators::LessThanFunctor); +REGISTER_LOGICAL_OP(less_equal, "Out = X <= Y"); +REGISTER_LOGICAL_KERNEL(less_equal, CPU, paddle::operators::LessEqualFunctor); +REGISTER_LOGICAL_OP(greater_than, "Out = X > Y"); +REGISTER_LOGICAL_KERNEL(greater_than, CPU, + paddle::operators::GreaterThanFunctor); +REGISTER_LOGICAL_OP(greater_equal, "Out = X >= Y"); +REGISTER_LOGICAL_KERNEL(greater_equal, CPU, + paddle::operators::GreaterEqualFunctor); REGISTER_LOGICAL_OP(equal, "Out = X == Y"); REGISTER_LOGICAL_KERNEL(equal, CPU, paddle::operators::EqualFunctor); diff --git a/paddle/operators/compare_op.cu b/paddle/operators/compare_op.cu index 42a5bb2f45..6ac8c124b9 100644 --- a/paddle/operators/compare_op.cu +++ b/paddle/operators/compare_op.cu @@ -15,4 +15,9 @@ #include "paddle/operators/compare_op.h" REGISTER_LOGICAL_KERNEL(less_than, GPU, paddle::operators::LessThanFunctor); +REGISTER_LOGICAL_KERNEL(less_equal, GPU, paddle::operators::LessEqualFunctor); +REGISTER_LOGICAL_KERNEL(greater_than, GPU, + paddle::operators::GreaterThanFunctor); +REGISTER_LOGICAL_KERNEL(greater_equal, GPU, + paddle::operators::GreaterEqualFunctor); REGISTER_LOGICAL_KERNEL(equal, GPU, paddle::operators::EqualFunctor); diff --git a/paddle/operators/compare_op.h b/paddle/operators/compare_op.h index 04e04e347b..afdf3ab3e0 100644 --- a/paddle/operators/compare_op.h +++ b/paddle/operators/compare_op.h @@ -27,6 +27,24 @@ struct LessThanFunctor { HOSTDEVICE bool operator()(const T& a, const T& b) const { return a < b; } }; +template +struct LessEqualFunctor { + using ELEM_TYPE = T; + HOSTDEVICE bool operator()(const T& a, const T& b) const { return a <= b; } +}; + +template +struct GreaterThanFunctor { + using ELEM_TYPE = T; + HOSTDEVICE bool operator()(const T& a, const T& b) const { return a > b; } +}; + +template +struct GreaterEqualFunctor { + using ELEM_TYPE = T; + HOSTDEVICE bool operator()(const T& a, const T& b) const { return a >= b; } +}; + template struct EqualFunctor { using ELEM_TYPE = T; diff --git a/paddle/operators/elementwise_add_op.cc b/paddle/operators/elementwise_add_op.cc index ebe1de90c7..432b9ba6f7 100644 --- a/paddle/operators/elementwise_add_op.cc +++ b/paddle/operators/elementwise_add_op.cc @@ -34,7 +34,13 @@ REGISTER_OP(elementwise_add, ops::ElementwiseOp, ops::ElementwiseAddOpMaker, elementwise_add_grad, ops::ElementwiseOpGrad); REGISTER_OP_CPU_KERNEL( elementwise_add, - ops::ElementwiseAddKernel); + ops::ElementwiseAddKernel, + ops::ElementwiseAddKernel, + ops::ElementwiseAddKernel, + ops::ElementwiseAddKernel); REGISTER_OP_CPU_KERNEL( elementwise_add_grad, - ops::ElementwiseAddGradKernel); + ops::ElementwiseAddGradKernel, + ops::ElementwiseAddGradKernel, + ops::ElementwiseAddGradKernel, + ops::ElementwiseAddGradKernel); diff --git a/paddle/operators/elementwise_div_op.cc b/paddle/operators/elementwise_div_op.cc index de75816a24..7a325199bd 100644 --- a/paddle/operators/elementwise_div_op.cc +++ b/paddle/operators/elementwise_div_op.cc @@ -35,7 +35,13 @@ REGISTER_OP(elementwise_div, ops::ElementwiseOp, ops::ElementwiseDivOpMaker, elementwise_div_grad, ops::ElementwiseOpGrad); REGISTER_OP_CPU_KERNEL( elementwise_div, - ops::ElementwiseDivKernel); + ops::ElementwiseDivKernel, + ops::ElementwiseDivKernel, + ops::ElementwiseDivKernel, + ops::ElementwiseDivKernel); REGISTER_OP_CPU_KERNEL( elementwise_div_grad, - ops::ElementwiseDivGradKernel); + ops::ElementwiseDivGradKernel, + ops::ElementwiseDivGradKernel, + ops::ElementwiseDivGradKernel, + ops::ElementwiseDivGradKernel); diff --git a/paddle/operators/elementwise_mul_op.cc b/paddle/operators/elementwise_mul_op.cc index ffa10486f1..8851267a52 100644 --- a/paddle/operators/elementwise_mul_op.cc +++ b/paddle/operators/elementwise_mul_op.cc @@ -37,8 +37,12 @@ REGISTER_OP(elementwise_mul, ops::ElementwiseOp, ops::ElementwiseMulOpMaker, REGISTER_OP_CPU_KERNEL( elementwise_mul, ops::ElementwiseMulKernel, - ops::ElementwiseMulKernel); + ops::ElementwiseMulKernel, + ops::ElementwiseMulKernel, + ops::ElementwiseMulKernel); REGISTER_OP_CPU_KERNEL( elementwise_mul_grad, ops::ElementwiseMulGradKernel, - ops::ElementwiseMulGradKernel); + ops::ElementwiseMulGradKernel, + ops::ElementwiseMulGradKernel, + ops::ElementwiseMulGradKernel); diff --git a/paddle/operators/elementwise_sub_op.cc b/paddle/operators/elementwise_sub_op.cc index 39702dad0e..95d7979e39 100644 --- a/paddle/operators/elementwise_sub_op.cc +++ b/paddle/operators/elementwise_sub_op.cc @@ -34,7 +34,13 @@ REGISTER_OP(elementwise_sub, ops::ElementwiseOp, ops::ElementwiseSubOpMaker, elementwise_sub_grad, ops::ElementwiseOpGrad); REGISTER_OP_CPU_KERNEL( elementwise_sub, - ops::ElementwiseSubKernel); + ops::ElementwiseSubKernel, + ops::ElementwiseSubKernel, + ops::ElementwiseSubKernel, + ops::ElementwiseSubKernel); REGISTER_OP_CPU_KERNEL( elementwise_sub_grad, - ops::ElementwiseSubGradKernel); + ops::ElementwiseSubGradKernel, + ops::ElementwiseSubGradKernel, + ops::ElementwiseSubGradKernel, + ops::ElementwiseSubGradKernel); diff --git a/paddle/operators/reduce_op.h b/paddle/operators/reduce_op.h index 45043c440b..dd6547542d 100644 --- a/paddle/operators/reduce_op.h +++ b/paddle/operators/reduce_op.h @@ -14,6 +14,7 @@ #pragma once +#include "glog/logging.h" #include "paddle/framework/eigen.h" #include "paddle/framework/op_registry.h" @@ -26,6 +27,10 @@ template using EigenTensor = framework::EigenTensor; +template +using EigenScalar = framework::EigenScalar; + struct SumFunctor { template void operator()(const Place& place, X& x, Y& y, const Dim& dim) { @@ -133,10 +138,17 @@ class ReduceKernel : public framework::OpKernel { dims_vector.erase(dims_vector.begin() + dim); dims = framework::make_ddim(dims_vector); } - auto out = EigenTensor < T, D == 1 ? 1 : (D - 1) > ::From(*output, dims); + auto& place = context.GetEigenDevice(); Functor functor; - functor(place, x, out, reduce_dim); + + if (D == 1) { + auto out = EigenScalar::From(*output); + functor(place, x, out, reduce_dim); + } else { + auto out = EigenTensor::From(*output, dims); + functor(place, x, out, reduce_dim); + } } }; @@ -186,13 +198,13 @@ class ReduceGradKernel : public framework::OpKernel { auto x_reduce = EigenTensor::From(*input1, dims); auto x_reduce_grad = EigenTensor::From(*input2, dims); - Eigen::array braodcast_dim; - for (size_t i = 0; i < D; ++i) braodcast_dim[i] = 1; - braodcast_dim[dim] = input0->dims()[dim]; + Eigen::array broadcast_dim; + for (size_t i = 0; i < D; ++i) broadcast_dim[i] = 1; + broadcast_dim[dim] = input0->dims()[dim]; auto& place = context.GetEigenDevice(); Functor functor; - functor(place, x, x_reduce, x_grad, x_reduce_grad, braodcast_dim, - braodcast_dim[dim]); + functor(place, x, x_reduce, x_grad, x_reduce_grad, broadcast_dim, + broadcast_dim[dim]); } }; diff --git a/paddle/parameter/Parameter.cpp b/paddle/parameter/Parameter.cpp index f031109501..3b0f09cea6 100644 --- a/paddle/parameter/Parameter.cpp +++ b/paddle/parameter/Parameter.cpp @@ -200,7 +200,10 @@ void Parameter::setMat(ParameterType pType, int matType) { false, useGpu_); } - } else if (matType == MAT_NORMAL_SHARED) { + } +#ifndef PADDLE_MOBILE_INFERENCE + // NOLINTNEXTLINE + else if (matType == MAT_NORMAL_SHARED) { CHECK_EQ(height * width, bufs_[pType]->getSize()); size_t blockNum = 0; CHECK(isGradShared(&blockNum)); @@ -259,7 +262,10 @@ void Parameter::setMat(ParameterType pType, int matType) { } else if (matType == MAT_SPARSE_ROW_AUTO_GROW) { CHECK(isGradSparseUpdate()); mats_[pType] = std::make_shared(height, width); - } else { + } +#endif + // NOLINTNEXTLINE + else { LOG(FATAL) << "Unsupported mat type" << matType; } } diff --git a/paddle/scripts/deb/postinst b/paddle/scripts/deb/postinst deleted file mode 100644 index 91620b1ee7..0000000000 --- a/paddle/scripts/deb/postinst +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -set -e -echo "Post install paddle debian package." -echo "Install some python package used for paddle. You can run " -echo " pip install /usr/opt/paddle/share/wheels/*.whl to install them." -find /usr/ -name '*paddle*.whl' | xargs pip install diff --git a/paddle/scripts/docker/README.md b/paddle/scripts/docker/README.md index 76bc30e59b..b5fd68839d 100644 --- a/paddle/scripts/docker/README.md +++ b/paddle/scripts/docker/README.md @@ -2,178 +2,198 @@ ## Goals -We want the building procedure generates Docker images so that we can run PaddlePaddle applications on Kubernetes clusters. +We want to make the building procedures: -We want to build .deb packages so that enterprise users can run PaddlePaddle applications without Docker. +1. Static, can reproduce easily. +1. Generate python `whl` packages that can be widely use cross many distributions. +1. Build different binaries per release to satisfy different environments: + - Binaries for different CUDA and CUDNN versions, like CUDA 7.5, 8.0, 9.0 + - Binaries containing only capi + - Binaries for python with wide unicode support or not. +1. Build docker images with PaddlePaddle pre-installed, so that we can run +PaddlePaddle applications directly in docker or on Kubernetes clusters. -We want to minimize the size of generated Docker images and .deb packages so to reduce the download time. +To achieve this, we created a repo: https://github.com/PaddlePaddle/buildtools +which gives several docker images that are `manylinux1` sufficient. Then we +can build PaddlePaddle using these images to generate corresponding `whl` +binaries. -We want to encapsulate building tools and dependencies in a *development* Docker image so to ease the tools installation for developers. +## Run The Build -Developers use various editors (emacs, vim, Eclipse, Jupyter Notebook), so the development Docker image contains only building tools, not editing tools, and developers are supposed to git clone source code into their development computers and map the code into the development container. +### Build Evironments -We want the procedure and tools also work with testing, continuous integration, and releasing. +The pre-built build environment images are: +| Image | Tag | +| ----- | --- | +| paddlepaddle/paddle_manylinux_devel | cuda7.5_cudnn5 | +| paddlepaddle/paddle_manylinux_devel | cuda8.0_cudnn5 | +| paddlepaddle/paddle_manylinux_devel | cuda7.5_cudnn7 | +| paddlepaddle/paddle_manylinux_devel | cuda9.0_cudnn7 | -## Docker Images - -So we need two Docker images for each version of PaddlePaddle: - -1. `paddle:-dev` - - This a development image contains only the development tools and standardizes the building procedure. Users include: +### Start Build - - developers -- no longer need to install development tools on the host, and can build their current work on the host (development computer). - - release engineers -- use this to build the official release from certain branch/tag on Github.com. - - document writers / Website developers -- Our documents are in the source repo in the form of .md/.rst files and comments in source code. We need tools to extract the information, typeset, and generate Web pages. +Choose one docker image that suit your environment and run the following +command to start a build: - Of course, developers can install building tools on their development computers. But different versions of PaddlePaddle might require different set or version of building tools. Also, it makes collaborative debugging easier if all developers use a unified development environment. - - The development image should include the following tools: - - - gcc/clang - - nvcc - - Python - - sphinx - - woboq - - sshd +```bash +git clone https://github.com/PaddlePaddle/Paddle.git +cd Paddle +docker run --rm -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_AVX=ON" -e "WITH_TESTING=OFF" -e "RUN_TEST=OFF" -e "PYTHON_ABI=cp27-cp27mu" paddlepaddle/paddle_manylinux_devel /paddle/paddle/scripts/docker/build.sh +``` - Many developers work on a remote computer with GPU; they could ssh into the computer and `docker exec` into the development container. However, running `sshd` in the container allows developers to ssh into the container directly. +After the build finishes, you can get output `whl` package under +`build/python/dist`. -1. `paddle:` +This command mounts the source directory on the host into `/paddle` in the container, then run the build script `/paddle/paddle/scripts/docker/build.sh` +in the container. When it writes to `/paddle/build` in the container, it writes to `$PWD/build` on the host indeed. - This is the production image, generated using the development image. This image might have multiple variants: +### Build Options - - GPU/AVX `paddle:-gpu` - - GPU/no-AVX `paddle:-gpu-noavx` - - no-GPU/AVX `paddle:` - - no-GPU/no-AVX `paddle:-noavx` +Users can specify the following Docker build arguments with either "ON" or "OFF" value: - We allow users to choose between GPU and no-GPU because the GPU version image is much larger than then the no-GPU version. +| Option | Default | Description | +| ------ | -------- | ----------- | +| `WITH_GPU` | OFF | Generates NVIDIA CUDA GPU code and relies on CUDA libraries. | +| `WITH_AVX` | OFF | Set to "ON" to enable AVX support. | +| `WITH_TESTING` | ON | Build unit tests binaries. | +| `WITH_MKLDNN` | ON | Build with [IntelĀ® MKL DNN](https://github.com/01org/mkl-dnn) support. | +| `WITH_MKLML` | ON | Build with [IntelĀ® MKL](https://software.intel.com/en-us/mkl) support. | +| `WITH_GOLANG` | ON | Build fault-tolerant parameter server written in go. | +| `WITH_SWIG_PY` | ON | Build with SWIG python API support. | +| `WITH_C_API` | OFF | Build capi libraries for inference. | +| `WITH_PYTHON` | ON | Build with python support. Turn this off if build is only for capi. | +| `WITH_STYLE_CHECK` | ON | Check the code style when building. | +| `PYTHON_ABI` | "" | Build for different python ABI support, can be cp27-cp27m or cp27-cp27mu | +| `RUN_TEST` | OFF | Run unit test immediently after the build. | +| `WITH_DOC` | OFF | Build docs after build binaries. | +| `WOBOQ` | OFF | Generate WOBOQ code viewer under `build/woboq_out` | - We allow users the choice between AVX and no-AVX, because some cloud providers don't provide AVX-enabled VMs. +## Docker Images -## Development Environment +You can get the latest PaddlePaddle docker images by +`docker pull paddlepaddle/paddle:` or build one by yourself. -Here we describe how to use above two images. We start from considering our daily development environment. +### Official Docker Releases -Developers work on a computer, which is usually a laptop or desktop: +Official docker images at +[here](https://hub.docker.com/r/paddlepaddle/paddle/tags/), +you can choose either latest or images with a release tag like `0.10.0`, +Currently available tags are: - +| Tag | Description | +| ------ | --------------------- | +| latest | latest CPU only image | +| latest-gpu | latest binary with GPU support | +| 0.10.0 | release 0.10.0 CPU only binary image | +| 0.10.0-gpu | release 0.10.0 with GPU support | -or, they might rely on a more sophisticated box (like with GPUs): +### Build Your Own Image - +Build PaddlePaddle docker images are quite simple since PaddlePaddle can +be installed by just running `pip install`. A sample `Dockerfile` is: -A principle here is that source code lies on the development computer (host) so that editors like Eclipse can parse the source code to support auto-completion. +```dockerfile +FROM nvidia/cuda:7.5-cudnn5-runtime-centos6 +RUN yum install -y centos-release-SCL +RUN yum install -y python27 +# This whl package is generated by previous build steps. +ADD python/dist/paddlepaddle-0.10.0-cp27-cp27mu-linux_x86_64.whl / +RUN pip install /paddlepaddle-0.10.0-cp27-cp27mu-linux_x86_64.whl && rm -f /*.whl +``` +Then build the image by running `docker build -t [REPO]/paddle:[TAG] .` under +the directory containing your own `Dockerfile`. -## Usages +- NOTE: note that you can choose different base images for your environment, you can find all the versions [here](https://hub.docker.com/r/nvidia/cuda/). -### Build the Development Docker Image +### Use Docker Images -The following commands check out the source code to the host and build the development image `paddle:dev`: +Suppose that you have written an application program `train.py` using +PaddlePaddle, we can test and run it using docker: ```bash -git clone https://github.com/PaddlePaddle/Paddle paddle -cd paddle -docker build -t paddle:dev . +docker run --rm -it -v $PWD:/work paddlepaddle/paddle /work/a.py ``` -The `docker build` command assumes that `Dockerfile` is in the root source tree. Note that in this design, this `Dockerfile` is this only one in our repo. - -Users can specify a Ubuntu mirror server for faster downloading: - -```bash -docker build -t paddle:dev --build-arg UBUNTU_MIRROR=mirror://mirrors.ubuntu.com/mirrors.txt . -``` +But this works only if all dependencies of `train.py` are in the production image. If this is not the case, we need to build a new Docker image from the production image and with more dependencies installs. -### Build PaddlePaddle from Source Code +### Run PaddlePaddle Book In Docker -Given the development image `paddle:dev`, the following command builds PaddlePaddle from the source tree on the development computer (host): +Our [book repo](https://github.com/paddlepaddle/book) also provide a docker +image to start a jupiter notebook inside docker so that you can run this book +using docker: ```bash -docker run --rm -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_AVX=ON" -e "WITH_TESTING=OFF" -e "RUN_TEST=OFF" paddle:dev +docker run -d -p 8888:8888 paddlepaddle/book ``` -This command mounts the source directory on the host into `/paddle` in the container, so the default entry point of `paddle:dev`, `build.sh`, could build the source code with possible local changes. When it writes to `/paddle/build` in the container, it writes to `$PWD/build` on the host indeed. - -`build.sh` builds the following: - -- PaddlePaddle binaries, -- `$PWD/build/paddle-.deb` for production installation, and -- `$PWD/build/Dockerfile`, which builds the production Docker image. +Please refer to https://github.com/paddlepaddle/book if you want to build this +docker image by your self. -Users can specify the following Docker build arguments with either "ON" or "OFF" value: -- `WITH_GPU`: ***Required***. Generates NVIDIA CUDA GPU code and relies on CUDA libraries. -- `WITH_AVX`: ***Required***. Set to "OFF" prevents from generating AVX instructions. If you don't know what is AVX, you might want to set "ON". -- `WITH_TEST`: ***Optional, default OFF***. Build unit tests binaries. Once you've built the unit tests, you can run these test manually by the following command: - ```bash - docker run --rm -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_AVX=ON" paddle:dev sh -c "cd /paddle/build; make coverall" - ``` -- `RUN_TEST`: ***Optional, default OFF***. Run unit tests after building. You can't run unit tests without building it. +### Run Distributed Applications -### Build the Production Docker Image +In our [API design doc](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/api.md#distributed-training), we proposed an API that starts a distributed training job on a cluster. This API need to build a PaddlePaddle application into a Docker image as above and calls kubectl to run it on the cluster. This API might need to generate a Dockerfile look like above and call `docker build`. -The following command builds the production image: +Of course, we can manually build an application image and launch the job using the kubectl tool: ```bash -docker build -t paddle -f build/Dockerfile ./build +docker build -f some/Dockerfile -t myapp . +docker tag myapp me/myapp +docker push +kubectl ... ``` -This production image is minimal -- it includes binary `paddle`, the shared library `libpaddle.so`, and Python runtime. +## Docker Images for Developers -### Run PaddlePaddle Applications +We have a special docker image for developers: +`paddlepaddle/paddle:-dev`. This image is also generated from +https://github.com/PaddlePaddle/buildtools -Again the development happens on the host. Suppose that we have a simple application program in `a.py`, we can test and run it using the production image: +This a development image contains only the +development tools and standardizes the building procedure. Users include: -```bash -docker run --rm -it -v $PWD:/work paddle /work/a.py -``` +- developers -- no longer need to install development tools on the host, and can build their current work on the host (development computer). +- release engineers -- use this to build the official release from certain branch/tag on Github.com. +- document writers / Website developers -- Our documents are in the source repo in the form of .md/.rst files and comments in source code. We need tools to extract the information, typeset, and generate Web pages. -But this works only if all dependencies of `a.py` are in the production image. If this is not the case, we need to build a new Docker image from the production image and with more dependencies installs. +Of course, developers can install building tools on their development computers. But different versions of PaddlePaddle might require different set or version of building tools. Also, it makes collaborative debugging easier if all developers use a unified development environment. -### Build and Run PaddlePaddle Applications +The development image contains the following tools: -We need a Dockerfile in https://github.com/paddlepaddle/book that builds Docker image `paddlepaddle/book:`, basing on the PaddlePaddle production image: + - gcc/clang + - nvcc + - Python + - sphinx + - woboq + - sshd -``` -FROM paddlepaddle/paddle: -RUN pip install -U matplotlib jupyter ... -COPY . /book -EXPOSE 8080 -CMD ["jupyter"] -``` +Many developers work on a remote computer with GPU; they could ssh into the computer and `docker exec` into the development container. However, running `sshd` in the container allows developers to ssh into the container directly. -The book image is an example of PaddlePaddle application image. We can build it -```bash -git clone https://github.com/paddlepaddle/book -cd book -docker build -t book . -``` +### Development Workflow -### Build and Run Distributed Applications +Here we describe how the workflow goes on. We start from considering our daily development environment. -In our [API design doc](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/api.md#distributed-training), we proposed an API that starts a distributed training job on a cluster. This API need to build a PaddlePaddle application into a Docker image as above and calls kubectl to run it on the cluster. This API might need to generate a Dockerfile look like above and call `docker build`. +Developers work on a computer, which is usually a laptop or desktop: -Of course, we can manually build an application image and launch the job using the kubectl tool: + -```bash -docker build -f some/Dockerfile -t myapp . -docker tag myapp me/myapp -docker push -kubectl ... -``` +or, they might rely on a more sophisticated box (like with GPUs): + + + +A principle here is that source code lies on the development computer (host) so that editors like Eclipse can parse the source code to support auto-completion. ### Reading source code with woboq codebrowser + For developers who are interested in the C++ source code, please use -e "WOBOQ=ON" to enable the building of C++ source code into HTML pages using [Woboq codebrowser](https://github.com/woboq/woboq_codebrowser). - The following command builds PaddlePaddle, generates HTML pages from C++ source code, and writes HTML pages into `$HOME/woboq_out` on the host: ```bash -docker run -v $PWD:/paddle -v $HOME/woboq_out:/woboq_out -e "WITH_GPU=OFF" -e "WITH_AVX=ON" -e "WITH_TEST=ON" -e "WOBOQ=ON" paddle:dev +docker run -v $PWD:/paddle -v $HOME/woboq_out:/woboq_out -e "WITH_GPU=OFF" -e "WITH_AVX=ON" -e "WITH_TEST=ON" -e "WOBOQ=ON" paddlepaddle/paddle:latest-dev ``` - You can open the generated HTML files in your Web browser. Or, if you want to run a Nginx container to serve them for a wider audience, you can run: diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index 256500c56a..e9c89eee1a 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -1,23 +1,6 @@ #!/bin/bash -set -xe - - function cmake_gen() { - # Set BASE_IMAGE according to env variables - if [[ ${WITH_GPU} == "ON" ]]; then - BASE_IMAGE="nvidia/cuda:8.0-cudnn5-runtime-ubuntu16.04" - else - BASE_IMAGE="ubuntu:16.04" - fi - - DOCKERFILE_GPU_ENV="" - DOCKERFILE_CUDNN_DSO="" - if [[ ${WITH_GPU:-OFF} == 'ON' ]]; then - DOCKERFILE_GPU_ENV="ENV LD_LIBRARY_PATH /usr/lib/x86_64-linux-gnu:${LD_LIBRARY_PATH}" - DOCKERFILE_CUDNN_DSO="RUN ln -s /usr/lib/x86_64-linux-gnu/libcudnn.so.5 /usr/lib/x86_64-linux-gnu/libcudnn.so" - fi - mkdir -p /paddle/build cd /paddle/build @@ -26,10 +9,29 @@ function cmake_gen() { # delete previous built whl packages rm -rf /paddle/paddle/dist 2>/dev/null || true + # Support build for all python versions, currently + # including cp27-cp27m and cp27-cp27mu. + PYTHON_FLAGS="" + if [ "$1" != "" ]; then + echo "using python abi: $1" + if [ "$1" == "cp27-cp27m" ]; then + export LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs2/lib:${LD_LIBRARY_PATH#/opt/_internal/cpython-2.7.11-ucs4/lib:} + PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/python/cp27-cp27m/bin/python + -DPYTHON_INCLUDE_DIR:PATH=/opt/python/cp27-cp27m/include/python2.7 + -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-2.7.11-ucs2/lib/libpython2.7.so" + elif [ "$1" == "cp27-cp27mu" ]; then + export LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs4/lib:${LD_LIBRARY_PATH#/opt/_internal/cpython-2.7.11-ucs2/lib:} + PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/python/cp27-cp27mu/bin/python + -DPYTHON_INCLUDE_DIR:PATH=/opt/python/cp27-cp27mu/include/python2.7 + -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-2.7.11-ucs4/lib/libpython2.7.so" + fi + fi + cat < ids(height); std::vector indices(height + 1); indices[0] = 0; @@ -84,6 +85,8 @@ MatrixPtr makeRandomSparseMatrix(size_t height, } return mat; } +#endif + return nullptr; } void generateSequenceStartPositions(size_t batchSize, diff --git a/paddle/trainer/MergeModel.cpp b/paddle/trainer/MergeModel.cpp index f3cfd9f97f..56c38015fb 100644 --- a/paddle/trainer/MergeModel.cpp +++ b/paddle/trainer/MergeModel.cpp @@ -27,6 +27,9 @@ using namespace paddle; // NOLINT using namespace std; // NOLINT int main(int argc, char** argv) { + initMain(argc, argv); + initPython(argc, argv); + if (FLAGS_model_dir.empty() || FLAGS_config_file.empty() || FLAGS_model_file.empty()) { LOG(INFO) << "Usage: ./paddle_merge_model --model_dir=pass-00000 " @@ -34,9 +37,6 @@ int main(int argc, char** argv) { return 0; } - initMain(argc, argv); - initPython(argc, argv); - string confFile = FLAGS_config_file; #ifndef PADDLE_WITH_CUDA FLAGS_use_gpu = false; diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index a4e25c73bc..1df9a6083e 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -888,7 +888,7 @@ def mixed_layer(size=0, :type size: int :param input: The input of this layer. It is an optional parameter. If set, then this function will just return layer's name. - :param act: Activation Type. LinearActivation is the default. + :param act: Activation Type. LinearActivation is the default activation. :type act: BaseActivation :param bias_attr: The bias attribute. If the parameter is set to False or an object whose type is not ParameterAttribute, no bias is defined. If the @@ -1030,7 +1030,7 @@ def fc_layer(input, :type input: LayerOutput | list | tuple :param size: The layer dimension. :type size: int - :param act: Activation Type. TanhActivation is the default. + :param act: Activation Type. TanhActivation is the default activation. :type act: BaseActivation :param param_attr: The Parameter Attribute|list. :type param_attr: ParameterAttribute @@ -1527,7 +1527,7 @@ def lstmemory(input, :type input: LayerOutput :param reverse: is sequence process reversed or not. :type reverse: bool - :param act: Activation type. TanhActivation is the default. :math:`h_t` + :param act: Activation type. TanhActivation is the default activation. :type act: BaseActivation :param gate_act: gate activation type, SigmoidActivation by default. :type gate_act: BaseActivation @@ -1920,7 +1920,7 @@ def repeat_layer(input, False for treating input as column vector and repeating in the row direction. :type as_row_vector: bool - :param act: Activation type. IdentityActivation is the default. + :param act: Activation type. IdentityActivation is the default activation. :type act: BaseActivation :type name: basestring :param layer_attr: extra layer attributes. @@ -1974,7 +1974,7 @@ def seq_reshape_layer(input, :type reshape_size: int :param name: The name of this layer. It is optional. :type name: basestring - :param act: Activation type. IdentityActivation is the default. + :param act: Activation type. IdentityActivation is the default activation. :type act: BaseActivation :param layer_attr: extra layer attributes. :type layer_attr: ExtraLayerAttribute. @@ -2487,7 +2487,7 @@ def img_conv_layer(input, shape will be (filter_size, filter_size_y). :type filter_size_y: int | None :param num_filters: Each filter group's number of filter - :param act: Activation type. ReluActivation is the default. + :param act: Activation type. ReluActivation is the default activation. :type act: BaseActivation :param groups: Group size of filters. :type groups: int @@ -3255,7 +3255,7 @@ def addto_layer(input, act=None, name=None, bias_attr=None, layer_attr=None): :param input: Input layers. It could be a LayerOutput or list/tuple of LayerOutput. :type input: LayerOutput | list | tuple - :param act: Activation Type. LinearActivation is the default. + :param act: Activation Type. LinearActivation is the default activation. :type act: BaseActivation :param bias_attr: The bias attribute. If the parameter is set to False or an object whose type is not ParameterAttribute, no bias is defined. If the @@ -3313,7 +3313,7 @@ def concat_layer(input, act=None, name=None, layer_attr=None, bias_attr=None): :type name: basestring :param input: input layers or projections :type input: list | tuple | collections.Sequence - :param act: Activation type. IdentityActivation is the default. + :param act: Activation type. IdentityActivation is the default activation. :type act: BaseActivation :param layer_attr: Extra Layer Attribute. :type layer_attr: ExtraLayerAttribute @@ -3408,7 +3408,7 @@ def seq_concat_layer(a, b, act=None, name=None, layer_attr=None, :type a: LayerOutput :param b: input sequence layer :type b: LayerOutput - :param act: Activation type. IdentityActivation is the default. + :param act: Activation type. IdentityActivation is the default activation. :type act: BaseActivation :param layer_attr: Extra Layer Attribute. :type layer_attr: ExtraLayerAttribute @@ -3574,30 +3574,32 @@ def lstm_step_layer(input, ... - This layer has two outputs. Default output is :math:`h_t`. The other - output is :math:`o_t`, whose name is 'state' and can use + This layer has two outputs. The default output is :math:`h_t`. The other + output is :math:`o_t`, whose name is 'state' and users can use :code:`get_output_layer` to extract this output. :param name: The name of this layer. It is optional. :type name: basestring - :param size: Layer's size. NOTE: lstm layer's size, should be equal to - :code:`input.size/4`, and should be equal to - :code:`state.size`. + :param size: The dimension of this layer's output, which must be + equal to the dimension of the state. :type size: int - :param input: input layer. :math:`Wx_t + Wh_{t-1}` + :param input: The input of this layer. :type input: LayerOutput - :param state: State Layer. :math:`c_{t-1}` + :param state: The state of the LSTM unit. :type state: LayerOutput - :param act: Activation type. TanhActivation is the default. + :param act: Activation type. TanhActivation is the default activation. :type act: BaseActivation - :param gate_act: Gate Activation Type. SigmoidActivation is the default. + :param gate_act: Activation type of the gate. SigmoidActivation is the + default activation. :type gate_act: BaseActivation - :param state_act: State Activation Type. TanhActivation is the default. + :param state_act: Activation type of the state. TanhActivation is the + default activation. :type state_act: BaseActivation - :param bias_attr: The parameter attribute for bias. If this parameter is - set to True or None, the bias is initialized to zero. - :type bias_attr: ParameterAttribute | None | True - :param layer_attr: layer's extra attribute. + :param bias_attr: The bias attribute. If the parameter is set to False or an object + whose type is not ParameterAttribute, no bias is defined. If the + parameter is set to True, the bias is initialized to zero. + :type bias_attr: ParameterAttribute | None | bool | Any + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for details. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput @@ -3642,23 +3644,31 @@ def gru_step_layer(input, layer_attr=None): """ - :param input: + :param input: The input of this layer, whose dimension can be divided by 3. :type input: LayerOutput - :param output_mem: - :param size: - :param act: + :param output_mem: A memory which memorizes the output of this layer at previous + time step. + :type output_mem: LayerOutput + :param size: The dimension of this layer's output. If it is not set or set to None, + it will be set to one-third of the dimension of the input automatically. + :type size: int + :param act: Activation type of this layer's output. TanhActivation + is the default activation. :type act: BaseActivation :param name: The name of this layer. It is optional. - :param gate_act: Activation type of this layer's two gates. Default is Sigmoid. + :type name: basestring + :param gate_act: Activation type of this layer's two gates. SigmoidActivation is + the default activation. :type gate_act: BaseActivation :param bias_attr: The parameter attribute for bias. If this parameter is set to False or an object whose type is not ParameterAttribute, no bias is defined. If this parameter is set to True, the bias is initialized to zero. :type bias_attr: ParameterAttribute | None | bool | Any - :param param_attr: the parameter_attribute for transforming the output_mem - from previous step. - :param layer_attr: + :param param_attr: The parameter attribute. See ParameterAttribute for details. + :type param_attr: ParameterAttribute + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for details. + :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ @@ -3703,25 +3713,34 @@ def gru_step_naive_layer(input, param_attr=None, layer_attr=None): """ - GRU Step Layer, but using MixedLayer to generate. It support ERROR_CLIPPING + GRU Step Layer, which is realized using PaddlePaddle API. It supports ERROR_CLIPPING and DROPOUT. - :param input: - :param output_mem: - :param size: + :param input: The input of this layer, whose dimensionality can be divided by 3. + :param output_mem: A memory which memorizes the output of this layer at previous + time step. + :type output_mem: LayerOutput + :param size: The dimension of this layer's output. If it is not set or set to None, + it will be set to one-third of the dimension of the input automatically. + :type size: int :param name: The name of this layer. It is optional. - :param act: + :type name: basestring + :param act: Activation type of this layer's output. TanhActivation + is the default activation. :type act: BaseActivation - :param gate_act: Activation type of this layer's two gates. Default is Sigmoid. + :param gate_act: Activation type of this layer's two gates. SigmoidActivation + is the default activation. :type gate_act: BaseActivation :param bias_attr: The parameter attribute for bias. If this parameter is set to False or an object whose type is not ParameterAttribute, no bias is defined. If this parameter is set to True, the bias is initialized to zero. :type bias_attr: ParameterAttribute | None | bool | Any - :param param_attr: - :param layer_attr: - :return: + :param param_attr: The parameter attribute. See ParameterAttribute for details. + :type param_attr: ParameterAttribute + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for details. + :type layer_attr: ExtraLayerAttribute + :return: LayerOutput object. :rtype: LayerOutput """ if input.size % 3 != 0: @@ -3783,12 +3802,13 @@ def get_output_layer(input, arg_name, name=None, layer_attr=None): :param name: The name of this layer. It is optional. :type name: basestring - :param input: get output layer's input. And this layer should contains + :param input: The input layer. And this layer should contain multiple outputs. :type input: LayerOutput - :param arg_name: Output name from input. + :param arg_name: The name of the output to be extracted from the input layer. :type arg_name: basestring - :param layer_attr: Layer's extra attribute. + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. :return: LayerOutput object. :rtype: LayerOutput """ @@ -3845,18 +3865,20 @@ def recurrent_layer(input, :param input: The input of this layer. :type input: LayerOutput - :param act: Activation type. TanhActivation is the default. + :param act: Activation type. TanhActivation is the default activation. :type act: BaseActivation :param bias_attr: The parameter attribute for bias. If this parameter is set to False or an object whose type is not ParameterAttribute, no bias is defined. If the parameter is set to True, the bias is initialized to zero. :type bias_attr: ParameterAttribute | None | bool | Any - :param param_attr: parameter attribute. + :param param_attr: The parameter attribute. See ParameterAttribute for + details. :type param_attr: ParameterAttribute :param name: The name of this layer. It is optional. :type name: basestring - :param layer_attr: Layer Attribute. + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput @@ -3881,7 +3903,7 @@ def recurrent_layer(input, class StaticInput(object): """ StaticInput is only used in recurrent_group which defines a read-only memory - that can be a sequence or non-sequence. + and can be a sequence or non-sequence. :param size: DEPRECATED :param is_seq: DEPRECATED """ @@ -3914,8 +3936,8 @@ def recurrent_group(step, input, reverse=False, name=None, targetInlink=None): Recurrent layer group is an extremely flexible recurrent unit in PaddlePaddle. As long as the user defines the calculation done within a time step, PaddlePaddle will iterate such a recurrent calculation over - sequence input. This is extremely usefull for attention based model, or - Neural Turning Machine like models. + sequence input. This is useful for attention-based models, or Neural + Turning Machine like models. The basic usage (time steps) is: @@ -3937,18 +3959,17 @@ def recurrent_group(step, input, reverse=False, name=None, targetInlink=None): demo/seqToseq/seqToseq_net.py - sequence steps: paddle/gserver/tests/sequence_nest_layer_group.conf - :param step: recurrent one time step function.The input of this function is - input of the group. The return of this function will be - recurrent group's return value. + :param step: A step function which takes the input of recurrent_group as its own + input and returns values as recurrent_group's output every time step. - The recurrent group scatter a sequence into time steps. And - for each time step, will invoke step function, and return - a time step result. Then gather each time step of output into + The recurrent group scatters a sequence into time steps. And + for each time step, it will invoke step function, and return + a time step result. Then gather outputs of each time step into layer group's output. :type step: callable - :param name: recurrent_group's name. + :param name: The recurrent_group's name. It is optional. :type name: basestring :param input: Input links array. @@ -3956,11 +3977,11 @@ def recurrent_group(step, input, reverse=False, name=None, targetInlink=None): LayerOutput will be scattered into time steps. SubsequenceInput will be scattered into sequence steps. StaticInput will be imported to each time step, and doesn't change - through time. It's a mechanism to access layer outside step function. + over time. It's a mechanism to access layer outside step function. :type input: LayerOutput | StaticInput | SubsequenceInput | list | tuple - :param reverse: If reverse is set true, the recurrent unit will process the + :param reverse: If reverse is set to True, the recurrent unit will process the input sequence in a reverse order. :type reverse: bool @@ -4095,7 +4116,8 @@ def maxid_layer(input, name=None, layer_attr=None): :type input: LayerOutput :param name: The name of this layer. It is optional. :type name: basestring - :param layer_attr: extra layer attributes. + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. :type layer_attr: ExtraLayerAttribute. :return: LayerOutput object. :rtype: LayerOutput @@ -4128,11 +4150,12 @@ def out_prod_layer(input1, input2, name=None, layer_attr=None): :param name: The name of this layer. It is optional. :type name: basestring - :param input1: The first input layer name. + :param input1: The first input layer. :type input: LayerOutput - :param input2: The second input layer name. + :param input2: The second input layer. :type input2: LayerOutput - :param layer_attr: extra layer attributes. + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. :type layer_attr: ExtraLayerAttribute. :return: LayerOutput object. :rtype: LayerOutput @@ -4171,9 +4194,10 @@ def eos_layer(input, eos_id, name=None, layer_attr=None): :type name: basestring :param input: The input of this layer. :type input: LayerOutput - :param eos_id: end id of sequence + :param eos_id: End id of sequence :type eos_id: int - :param layer_attr: extra layer attributes. + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. :type layer_attr: ExtraLayerAttribute. :return: LayerOutput object. :rtype: LayerOutput @@ -4234,8 +4258,9 @@ def beam_search(step, - machine translation : demo/seqToseq/translation/gen.conf \ demo/seqToseq/seqToseq_net.py - :param name: Name of the recurrent unit that generates sequences. - :type name: base string + :param name: The name of the recurrent unit that is responsible for + generating sequences. It is optional. + :type name: basestring :param step: A callable function that defines the calculation in a time step, and it is applied to sequences with arbitrary length by sharing a same set of weights. @@ -4360,16 +4385,18 @@ def square_error_cost(input, :param name: The name of this layer. It is optional. :type name: basestring - :param input: Network prediction. + :param input: The first input layer. :type input: LayerOutput - :param label: Data label. + :param label: The input label. :type label: LayerOutput - :param weight: The weight affects the cost, namely the scale of cost. - It is an optional argument. + :param weight: The weight layer defines a weight for each sample in the + mini-batch. It is optional. :type weight: LayerOutput - :param coeff: The coefficient affects the gradient in the backward. + :param coeff: The weight of the gradient in the back propagation. + 1.0 is the default value. :type coeff: float - :param layer_attr: layer's extra attribute. + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput @@ -4402,17 +4429,20 @@ def classification_cost(input, :param name: The name of this layer. It is optional. :type name: basestring - :param input: input layer name. network output. + :param input: The first input layer. :type input: LayerOutput - :param label: label layer name. data_layer often. + :param label: The input label. :type label: LayerOutput - :param weight: The weight affects the cost, namely the scale of cost. - It is an optional argument. + :param weight: The weight layer defines a weight for each sample in the + mini-batch. It is optional. :type weight: LayerOutput - :param evaluator: Evaluator method. - :param layer_attr: layer's extra attribute. + :param evaluator: Evaluator method. classification_error_evaluator is the default. + :type evaluator: Evaluator method + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. :type layer_attr: ExtraLayerAttribute - :param coeff: The coefficient affects the gradient in the backward. + :param coeff: The weight of the gradient in the back propagation. + 1.0 is the default value. :type coeff: float :return: LayerOutput object. :rtype: LayerOutput @@ -4465,7 +4495,7 @@ def conv_operator(img, Different from img_conv_layer, conv_op is an Operator, which can be used in mixed_layer. And conv_op takes two inputs to perform convolution. The first input is the image and the second is filter kernel. It only - support GPU mode. + supports GPU mode. The example usage is: @@ -4477,27 +4507,31 @@ def conv_operator(img, num_filters=64, num_channels=64) - :param img: input image + :param img: The input image. :type img: LayerOutput - :param filter: input filter + :param filter: The input filter. :type filter: LayerOutput - :param filter_size: The x dimension of a filter kernel. + :param filter_size: The dimension of the filter kernel on the x axis. :type filter_size: int - :param filter_size_y: The y dimension of a filter kernel. Since - PaddlePaddle now supports rectangular filters, - the filter's shape can be (filter_size, filter_size_y). + :param filter_size_y: The dimension of the filter kernel on the y axis. + If the parameter is not set or set to None, it will + set to 'filter_size' automatically. :type filter_size_y: int - :param num_filters: channel of output data. + :param num_filters: The number of the output channels. :type num_filters: int - :param num_channels: channel of input data. + :param num_channels: The number of the input channels. If the parameter is not set + or set to None, it will be automatically set to the channel + number of the 'img'. :type num_channels: int - :param stride: The x dimension of the stride. + :param stride: The stride on the x axis. :type stride: int - :param stride_y: The y dimension of the stride. + :param stride_y: The stride on the y axis. If the parameter is not set or + set to None, it will be set to 'stride' automatically. :type stride_y: int - :param padding: The x dimension of padding. + :param padding: The padding size on the x axis. :type padding: int - :param padding_y: The y dimension of padding. + :param padding_y: The padding size on the y axis. If the parameter is not set + or set to None, it will be set to 'padding' automatically. :type padding_y: int :return: A ConvOperator Object. :rtype: ConvOperator @@ -4548,9 +4582,9 @@ def conv_projection(input, param_attr=None, trans=False): """ - Different from img_conv_layer and conv_op, conv_projection is an Projection, - which can be used in mixed_layer and conat_layer. It use cudnn to implement - conv and only support GPU mode. + Different from img_conv_layer and conv_op, conv_projection is a Projection, + which can be used in mixed_layer and concat_layer. It uses cudnn to implement + convolution and only supports GPU mode. The example usage is: @@ -4563,32 +4597,45 @@ def conv_projection(input, :param input: The input of this layer. :type input: LayerOutput - :param filter_size: The x dimension of a filter kernel. - :type filter_size: int - :param filter_size_y: The y dimension of a filter kernel. Since - PaddlePaddle now supports rectangular filters, - the filter's shape can be (filter_size, filter_size_y). + :param filter_size: The dimensions of the filter kernel. If the parameter is + set to one integer, the two dimensions on x and y axises + will be same when filter_size_y is not set. If it is set + to a list, the first element indicates the dimension on + the x axis, and the second is used to specify the dimension + on the y axis when filter_size is not provided. + :type filter_size: int | tuple | list + :param filter_size_y: The dimension of the filter kernel on the y axis. If the parameter + is not set, it will be set automatically according to filter_size. :type filter_size_y: int - :param num_filters: channel of output data. + :param num_filters: The number of filters. :type num_filters: int - :param num_channels: channel of input data. + :param num_channels: The number of the input channels. :type num_channels: int - :param stride: The x dimension of the stride. - :type stride: int - :param stride_y: The y dimension of the stride. + :param stride: The strides. If the parameter is set to one integer, the strides + on x and y axises will be same when stride_y is not set. If it is + set to a list, the first element indicates the stride on the x axis, + and the second is used to specify the stride on the y axis when + stride_y is not provided. + :type stride: int | tuple | list + :param stride_y: The stride on the y axis. :type stride_y: int - :param padding: The x dimension of padding. - :type padding: int - :param padding_y: The y dimension of padding. + :param padding: The padding sizes. If the parameter is set to one integer, the padding + sizes on x and y axises will be same when padding_y is not set. If it + is set to a list, the first element indicates the padding size on the + x axis, and the second is used to specify the padding size on the y axis + when padding_y is not provided. + :type padding: int | tuple | list + :param padding_y: The padding size on the y axis. :type padding_y: int :param groups: The group number. :type groups: int - :param param_attr: Convolution param attribute. None means default attribute + :param param_attr: The parameter attribute of the convolution. See ParameterAttribute for + details. :type param_attr: ParameterAttribute - :param trans: whether it is convTrans or conv + :param trans: Whether it is ConvTransProjection or ConvProjection :type trans: bool - :return: A DotMulProjection Object. - :rtype: DotMulProjection + :return: A Projection Object. + :rtype: ConvTransProjection | ConvProjection """ if num_channels is None: assert input.num_filters is not None @@ -4653,13 +4700,13 @@ def pad_layer(input, layer_attr=None): """ This operation pads zeros to the input data according to pad_c,pad_h - and pad_w. pad_c, pad_h, pad_w specifies the which dimension and size - of padding. And the input data shape is NCHW. + and pad_w. pad_c, pad_h, pad_w specify the size in the corresponding + dimension. And the input data shape is NCHW. - For example, pad_c=[2,3] means padding 2 zeros before the - input data and 3 zeros after the input data in channel dimension. - pad_h means padding zeros in height dimension. pad_w means padding zeros - in width dimension. + For example, pad_c=[2,3] means padding 2 zeros before the input data + and 3 zeros after the input data in the channel dimension. pad_h means + padding zeros in the height dimension. pad_w means padding zeros in the + width dimension. For example, @@ -4696,13 +4743,14 @@ def pad_layer(input, :param input: The input of this layer. :type input: LayerOutput - :param pad_c: padding size in channel dimension. + :param pad_c: The padding size in the channel dimension. :type pad_c: list | None - :param pad_h: padding size in height dimension. + :param pad_h: The padding size in the height dimension. :type pad_h: list | None - :param pad_w: padding size in width dimension. + :param pad_w: The padding size in the width dimension. :type pad_w: list | None - :param layer_attr: Extra Layer Attribute. + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. :type layer_attr: ExtraLayerAttribute :param name: The name of this layer. It is optional. :type name: basestring @@ -4751,7 +4799,7 @@ def pad_layer(input, @layer_support() def conv_shift_layer(a, b, name=None, layer_attr=None): """ - This layer performs cyclic convolution for two input. For example: + This layer performs cyclic convolution on two inputs. For example: - a[in]: contains M elements. - b[in]: contains N elements (N should be odd). - c[out]: contains M elements. @@ -4760,7 +4808,7 @@ def conv_shift_layer(a, b, name=None, layer_attr=None): c[i] = \sum_{j=-(N-1)/2}^{(N-1)/2}a_{i+j} * b_{j} - In this formular: + In this formula: - a's index is computed modulo M. When it is negative, then get item from the right side (which is the end of array) to the left. - b's index is computed modulo N. When it is negative, then get item from @@ -4774,11 +4822,12 @@ def conv_shift_layer(a, b, name=None, layer_attr=None): :param name: The name of this layer. It is optional. :type name: basestring - :param a: Input layer a. + :param a: The first input of this layer. :type a: LayerOutput - :param b: input layer b. + :param b: The second input of this layer. :type b: LayerOutput - :param layer_attr: layer's extra attribute. + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput @@ -4809,8 +4858,8 @@ def tensor_layer(a, bias_attr=None, layer_attr=None): """ - This layer performs tensor operation for two input. - For example, each sample: + This layer performs tensor operation on two inputs. + For example: .. math:: y_{i} = a * W_{i} * {b^\mathrm{T}}, i=0,1,...,K-1 @@ -4830,22 +4879,24 @@ def tensor_layer(a, :param name: The name of this layer. It is optional. :type name: basestring - :param a: Input layer a. + :param a: The first input of this layer. :type a: LayerOutput - :param b: input layer b. + :param b: The second input of this layer. :type b: LayerOutput - :param size: the layer dimension. - :type size: int. - :param act: Activation type. LinearActivation is the default. + :param size: The dimension of this layer. + :type size: int + :param act: Activation type. LinearActivation is the default activation. :type act: BaseActivation - :param param_attr: The Parameter Attribute. + :param param_attr: The parameter attribute. See ParameterAttribute for + details. :type param_attr: ParameterAttribute :param bias_attr: The parameter attribute for bias. If this parameter is set to False or an object whose type is not ParameterAttribute, no bias is defined. If this parameter is set to True, the bias is initialized to zero. :type bias_attr: ParameterAttribute | None | bool | Any - :param layer_attr: Extra Layer config. + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. :type layer_attr: ExtraLayerAttribute | None :return: LayerOutput object. :rtype: LayerOutput @@ -4881,7 +4932,7 @@ def selective_fc_layer(input, layer_attr=None): """ Selectived fully connected layer. Different from fc_layer, the output - of this layer maybe sparse. It requires an additional input to indicate + of this layer can be sparse. It requires an additional input to indicate several selected columns for output. If the selected columns is not specified, selective_fc_layer acts exactly like fc_layer. @@ -4895,22 +4946,34 @@ def selective_fc_layer(input, :type name: basestring :param input: The input of this layer. :type input: LayerOutput | list | tuple - :param select: The select layer. The output of select layer should be a - sparse binary matrix, and treat as the mask of selective fc. - If is None, acts exactly like fc_layer. + :param select: The layer to select columns to output. It should be a sparse + binary matrix, and is treated as the mask of selective fc. If + it is not set or set to None, selective_fc_layer acts exactly + like fc_layer. :type select: LayerOutput - :param size: The layer dimension. + :param size: The dimension of this layer, which should be equal to that of + the layer 'select'. :type size: int - :param act: Activation type. TanhActivation is the default. + :param act: Activation type. TanhActivation is the default activation. :type act: BaseActivation - :param param_attr: The Parameter Attribute. + :param pass_generation: The flag which indicates whether it is during generation. + :type pass_generation: bool + :param has_selected_colums: The flag which indicates whether the parameter 'select' + has been set. True is the default. + :type has_selected_colums: bool + :param mul_ratio: A ratio helps to judge how sparse the output is and determine + the computation method for speed consideration. + :type mul_ratio: float + :param param_attr: The parameter attribute. See ParameterAttribute for + details. :type param_attr: ParameterAttribute :param bias_attr: The parameter attribute for bias. If this parameter is set to False or an object whose type is not ParameterAttribute, no bias is defined. If this parameter is set to True, the bias is initialized to zero. :type bias_attr: ParameterAttribute | None | bool | Any - :param layer_attr: Extra Layer config. + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. :type layer_attr: ExtraLayerAttribute | None :return: LayerOutput object. :rtype: LayerOutput @@ -4961,7 +5024,7 @@ def selective_fc_layer(input, @layer_support() def sampling_id_layer(input, name=None, layer_attr=None): """ - A layer for sampling id from multinomial distribution from the input layer. + A layer for sampling id from a multinomial distribution from the input layer. Sampling one id for one sample. The simple usage is: @@ -4974,8 +5037,9 @@ def sampling_id_layer(input, name=None, layer_attr=None): :type input: LayerOutput :param name: The name of this layer. It is optional. :type name: basestring - :param layer_attr: Extra Layer config. - :type layer_attr: ExtraLayerAttribute | None + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. + :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ @@ -4996,8 +5060,7 @@ def slope_intercept_layer(input, intercept=0.0, layer_attr=None): """ - This layer for applying a slope and an intercept to the input - element-wise. There is no activation and weight. + This layer for applying a slope and an intercept to the input. .. math:: y = slope * x + intercept @@ -5012,12 +5075,13 @@ def slope_intercept_layer(input, :type input: LayerOutput :param name: The name of this layer. It is optional. :type name: basestring - :param slope: the scale factor. - :type slope: float. - :param intercept: the offset. - :type intercept: float. - :param layer_attr: Extra Layer config. - :type layer_attr: ExtraLayerAttribute | None + :param slope: The scale factor. + :type slope: float + :param intercept: The offset. + :type intercept: float + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. + :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ @@ -5072,12 +5136,13 @@ def linear_comb_layer(weights, vectors, size=None, name=None, layer_attr=None): :type weights: LayerOutput :param vectors: The vector layer. :type vectors: LayerOutput - :param size: the dimension of this layer. + :param size: The dimension of this layer. :type size: int :param name: The name of this layer. It is optional. :type name: basestring - :param layer_attr: Extra Layer config. - :type layer_attr: ExtraLayerAttribute | None + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. + :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ @@ -5124,11 +5189,11 @@ def block_expand_layer(input, outputW = 1 + (2 * padding_x + imgSizeW - block_x + stride_x - 1) / stride_x - The expand method is the same with ExpandConvLayer, but saved the transposed + The expanding method is the same with ExpandConvLayer, but saved the transposed value. After expanding, output.sequenceStartPositions will store timeline. - The number of time steps are outputH * outputW and the dimension of each + The number of time steps is outputH * outputW and the dimension of each time step is block_y * block_x * num_channels. This layer can be used after - convolution neural network, and before recurrent neural network. + convolutional neural network, and before recurrent neural network. The simple usage is: @@ -5143,8 +5208,10 @@ def block_expand_layer(input, :param input: The input of this layer. :type input: LayerOutput - :param num_channels: The channel number of input layer. - :type num_channels: int | None + :param num_channels: The number of input channels. If the parameter is not set or + set to None, its actual value will be automatically set to + the channels number of the input. + :type num_channels: int :param block_x: The width of sub block. :type block_x: int :param block_y: The width of sub block. @@ -5158,9 +5225,10 @@ def block_expand_layer(input, :param padding_y: The padding size in vertical direction. :type padding_y: int :param name: The name of this layer. It is optional. - :type name: None | basestring. - :param layer_attr: Extra Layer config. - :type layer_attr: ExtraLayerAttribute | None + :type name: basestring. + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. + :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ @@ -5190,12 +5258,19 @@ def block_expand_layer(input, @layer_support() def maxout_layer(input, groups, num_channels=None, name=None, layer_attr=None): """ - A layer to do max out on conv layer output. - - Input: output of a conv layer. - - Output: feature map size same as input. Channel is (input channel) / groups. + A layer to do max out on convolutional layer output. + - Input: the output of a convolutional layer. + - Output: feature map size same as the input's, and its channel number is + (input channel) / groups. So groups should be larger than 1, and the num of channels should be able - to devided by groups. + to be devided by groups. + + Reference: + Maxout Networks + http://www.jmlr.org/proceedings/papers/v28/goodfellow13.pdf + Multi-digit Number Recognition from Street View Imagery using Deep Convolutional Neural Networks + https://arxiv.org/pdf/1312.6082v4.pdf .. math:: y_{si+j} = \max_k x_{gsi + sk + j} @@ -5205,12 +5280,6 @@ def maxout_layer(input, groups, num_channels=None, name=None, layer_attr=None): 0 \le j < s 0 \le k < groups - Please refer to Paper: - - Maxout Networks: http://www.jmlr.org/proceedings/papers/v28/goodfellow13.pdf - - Multi-digit Number Recognition from Street View \ - Imagery using Deep Convolutional Neural Networks: \ - https://arxiv.org/pdf/1312.6082v4.pdf - The simple usage is: .. code-block:: python @@ -5221,14 +5290,16 @@ def maxout_layer(input, groups, num_channels=None, name=None, layer_attr=None): :param input: The input of this layer. :type input: LayerOutput - :param num_channels: The channel number of input layer. If None will be set - automatically from previous output. - :type num_channels: int | None + :param num_channels: The number of input channels. If the parameter is not set or + set to None, its actual value will be automatically set to + the channels number of the input. + :type num_channels: int :param groups: The group number of input layer. :type groups: int :param name: The name of this layer. It is optional. - :type name: None | basestring. - :param layer_attr: Extra Layer attribute. + :type name: basestring + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput @@ -5260,20 +5331,20 @@ def ctc_layer(input, layer_attr=None): """ Connectionist Temporal Classification (CTC) is designed for temporal - classication task. That is, for sequence labeling problems where the + classication task. e.g. sequence labeling problems where the alignment between the inputs and the target labels is unknown. - More details can be found by referring to `Connectionist Temporal - Classification: Labelling Unsegmented Sequence Data with Recurrent - Neural Networks `_ + Reference: + Connectionist Temporal Classification: Labelling Unsegmented Sequence Data + with Recurrent Neural Networks + http://machinelearning.wustl.edu/mlpapers/paper_files/icml2006_GravesFGS06.pdf Note: - Considering the 'blank' label needed by CTC, you need to use - (num_classes + 1) as the input size. num_classes is the category number. - And the 'blank' is the last category index. So the size of 'input' layer, such as - fc_layer with softmax activation, should be num_classes + 1. The size of ctc_layer - should also be num_classes + 1. + Considering the 'blank' label needed by CTC, you need to use (num_classes + 1) + as the size of the input, where num_classes is the category number. + And the 'blank' is the last category index. So the size of 'input' layer (e.g. + fc_layer with softmax activation) should be (num_classes + 1). The size of + ctc_layer should also be (num_classes + 1). The example usage is: @@ -5286,16 +5357,17 @@ def ctc_layer(input, :param input: The input of this layer. :type input: LayerOutput - :param label: The data layer of label with variable length. + :param label: The input label. :type label: LayerOutput - :param size: category numbers + 1. + :param size: The dimension of this layer, which must be equal to (category number + 1). :type size: int :param name: The name of this layer. It is optional. - :type name: basestring | None - :param norm_by_times: Whether to normalization by times. False by default. + :type name: basestring + :param norm_by_times: Whether to do normalization by times. False is the default. :type norm_by_times: bool - :param layer_attr: Extra Layer config. - :type layer_attr: ExtraLayerAttribute | None + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. + :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ @@ -5336,20 +5408,19 @@ def warp_ctc_layer(input, building process, PaddlePaddle will clone the source codes, build and install it to :code:`third_party/install/warpctc` directory. - More details of CTC can be found by referring to `Connectionist Temporal - Classification: Labelling Unsegmented Sequence Data with Recurrent - Neural Networks `_. + Reference: + Connectionist Temporal Classification: Labelling Unsegmented Sequence Data + with Recurrent Neural Networks + http://machinelearning.wustl.edu/mlpapers/paper_files/icml2006_GravesFGS06.pdf Note: - - Let num_classes represent the category number. Considering the 'blank' - label needed by CTC, you need to use (num_classes + 1) as the input size. - Thus, the size of both warp_ctc layer and 'input' layer should be set to - num_classes + 1. + - Let num_classes represents the category number. Considering the 'blank' + label needed by CTC, you need to use (num_classes + 1) as the size of + warp_ctc layer. - You can set 'blank' to any value ranged in [0, num_classes], which - should be consistent as that used in your labels. + should be consistent with those used in your labels. - As a native 'softmax' activation is interated to the warp-ctc library, - 'linear' activation is expected instead in the 'input' layer. + 'linear' activation is expected to be used instead in the 'input' layer. The example usage is: @@ -5363,18 +5434,19 @@ def warp_ctc_layer(input, :param input: The input of this layer. :type input: LayerOutput - :param label: The data layer of label with variable length. + :param label: The input label. :type label: LayerOutput - :param size: category numbers + 1. + :param size: The dimension of this layer, which must be equal to (category number + 1). :type size: int :param name: The name of this layer. It is optional. - :type name: basestring | None - :param blank: the 'blank' label used in ctc + :type name: basestring + :param blank: The 'blank' label used in ctc. :type blank: int - :param norm_by_times: Whether to normalization by times. False by default. + :param norm_by_times: Whether to do normalization by times. False is the default. :type norm_by_times: bool - :param layer_attr: Extra Layer config. - :type layer_attr: ExtraLayerAttribute | None + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. + :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ @@ -5420,23 +5492,26 @@ def crf_layer(input, label=label, size=label_dim) - :param input: The first input layer is the feature. + :param input: The first input layer. :type input: LayerOutput - :param label: The second input layer is label. + :param label: The input label. :type label: LayerOutput :param size: The category number. :type size: int - :param weight: The third layer is "weight" of each sample, which is an - optional argument. + :param weight: The weight layer defines a weight for each sample in the + mini-batch. It is optional. :type weight: LayerOutput - :param param_attr: Parameter attribute. None means default attribute + :param param_attr: The parameter attribute. See ParameterAttribute for + details. :type param_attr: ParameterAttribute :param name: The name of this layer. It is optional. - :type name: None | basestring - :param coeff: The coefficient affects the gradient in the backward. + :type name: basestring + :param coeff: The weight of the gradient in the back propagation. + 1.0 is the default value. :type coeff: float - :param layer_attr: Extra Layer config. - :type layer_attr: ExtraLayerAttribute | None + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. + :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ @@ -5482,9 +5557,9 @@ def crf_decoding_layer(input, """ A layer for calculating the decoding sequence of sequential conditional random field model. The decoding sequence is stored in output.ids. - If a second input is provided, it is treated as the ground-truth label, and - this layer will also calculate error. output.value[i] is 1 for incorrect - decoding or 0 for correct decoding. + If the input 'label' is provided, it is treated as the ground-truth label, and + this layer will also calculate error. output.value[i] is 1 for an incorrect + decoding and 0 for the correct. The example usage is: @@ -5495,16 +5570,18 @@ def crf_decoding_layer(input, :param input: The first input layer. :type input: LayerOutput - :param size: size of this layer. + :param size: The dimension of this layer. :type size: int - :param label: None or ground-truth label. - :type label: LayerOutput or None - :param param_attr: Parameter attribute. None means default attribute + :param label: The input label. + :type label: LayerOutput | None + :param param_attr: The parameter attribute. See ParameterAttribute for + details. :type param_attr: ParameterAttribute :param name: The name of this layer. It is optional. - :type name: None | basestring - :param layer_attr: Extra Layer config. - :type layer_attr: ExtraLayerAttribute | None + :type name: basestring + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. + :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ @@ -5551,8 +5628,7 @@ def nce_layer(input, bias_attr=None, layer_attr=None): """ - Noise-contrastive estimation. This layer implements the method in the - following paper: + Noise-contrastive estimation. Reference: A fast and simple algorithm for training neural probabilistic language @@ -5568,25 +5644,27 @@ def nce_layer(input, :param name: The name of this layer. It is optional. :type name: basestring - :param input: The input layers. It should be a LayerOutput or a list/tuple - of LayerOutput. + :param input: The first input of this layer. :type input: LayerOutput | list | tuple | collections.Sequence - :param label: The ground truth. + :param label: The input label. :type label: LayerOutput :param weight: The weight layer defines a weight for each sample in the - mini-batch. The default value is None. + mini-batch. It is optional. :type weight: LayerOutput - :param num_classes: The class number. + :param num_classes: The number of classes. :type num_classes: int - :param param_attr: The parameter attributes. - :type param_attr: ParameterAttribute|list - :param num_neg_samples: The number of sampled negative labels. The default - value is 10. + :param act: Activation type. SigmoidActivation is the default activation. + :type act: BaseActivation + :param param_attr: The parameter attribute. See ParameterAttribute for + details. + :type param_attr: ParameterAttribute + :param num_neg_samples: The number of sampled negative labels. 10 is the + default value. :type num_neg_samples: int :param neg_distribution: The discrete noisy distribution over the output space from which num_neg_samples negative labels are sampled. If this parameter is not set, a - uniform distribution will be used. A user defined + uniform distribution will be used. A user-defined distribution is a list whose length must be equal to the num_classes. Each member of the list defines the probability of a class given input x. @@ -5596,9 +5674,10 @@ def nce_layer(input, no bias is defined. If this parameter is set to True, the bias is initialized to zero. :type bias_attr: ParameterAttribute | None | bool | Any - :param layer_attr: Extra Layer Attribute. + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. :type layer_attr: ExtraLayerAttribute - :return: The LayerOutput object. + :return: LayerOutput object. :rtype: LayerOutput """ if isinstance(input, LayerOutput): @@ -5665,11 +5744,11 @@ def rank_cost(left, coeff=1.0, layer_attr=None): """ - A cost Layer for learning to rank using gradient descent. Details can refer - to `papers `_. - This layer contains at least three inputs. The weight is an optional - argument, which affects the cost. + A cost Layer for learning to rank using gradient descent. + + Reference: + Learning to Rank using Gradient Descent + http://research.microsoft.com/en-us/um/people/cburges/papers/ICML_ranking.pdf .. math:: @@ -5700,14 +5779,16 @@ def rank_cost(left, :type right: LayerOutput :param label: Label is 1 or 0, means positive order and reverse order. :type label: LayerOutput - :param weight: The weight affects the cost, namely the scale of cost. - It is an optional argument. + :param weight: The weight layer defines a weight for each sample in the + mini-batch. It is optional. :type weight: LayerOutput :param name: The name of this layer. It is optional. - :type name: None | basestring - :param coeff: The coefficient affects the gradient in the backward. + :type name: basestring + :param coeff: The weight of the gradient in the back propagation. + 1.0 is the default value. :type coeff: float - :param layer_attr: Extra Layer Attribute. + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput @@ -5752,25 +5833,25 @@ def lambda_cost(input, NDCG_num=8, max_sort_size=-1) - :param input: Samples of the same query should be loaded as sequence. + :param input: The first input of this layer, which is often a document + samples list of the same query and whose type must be sequence. :type input: LayerOutput - :param score: The 2nd input. Score of each sample. + :param score: The scores of the samples. :type input: LayerOutput :param NDCG_num: The size of NDCG (Normalized Discounted Cumulative Gain), e.g., 5 for NDCG@5. It must be less than or equal to the - minimum size of lists. + minimum size of the list. :type NDCG_num: int - :param max_sort_size: The size of partial sorting in calculating gradient. - If max_sort_size = -1, then for each list, the - algorithm will sort the entire list to get gradient. - In other cases, max_sort_size must be greater than or - equal to NDCG_num. And if max_sort_size is greater - than the size of a list, the algorithm will sort the - entire list of get gradient. + :param max_sort_size: The size of partial sorting in calculating gradient. If + max_sort_size is equal to -1 or greater than the number + of the samples in the list, then the algorithm will sort + the entire list to compute the gradient. In other cases, + max_sort_size must be greater than or equal to NDCG_num. :type max_sort_size: int :param name: The name of this layer. It is optional. - :type name: None | basestring - :param layer_attr: Extra Layer Attribute. + :type name: basestring + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput @@ -5815,11 +5896,10 @@ def cross_entropy(input, :param name: The name of this layer. It is optional. :type name: basestring :param coeff: The weight of the gradient in the back propagation. - 1.0 is the default. + 1.0 is the default value. :type coeff: float - :param weight: The cost of each sample is multiplied with each weight. - The weight should be a layer with size=1. Note that gradient - will not be calculated for weight. + :param weight: The weight layer defines a weight for each sample in the + mini-batch. It is optional. :type weight: LayerOutout :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for details. @@ -5864,7 +5944,7 @@ def cross_entropy_with_selfnorm(input, :param name: The name of this layer. It is optional. :type name: basestring :param coeff: The weight of the gradient in the back propagation. - 1.0 is the default. + 1.0 is the default value. :type coeff: float :param softmax_selfnorm_alpha: The scale factor affects the cost. :type softmax_selfnorm_alpha: float @@ -5954,7 +6034,7 @@ def huber_regression_cost(input, :param delta: The difference between the observed and predicted values. :type delta: float :param coeff: The weight of the gradient in the back propagation. - 1.0 is the default. + 1.0 is the default value. :type coeff: float :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for details. @@ -6004,7 +6084,7 @@ def huber_classification_cost(input, :param name: The name of this layer. It is optional. :type name: basestring :param coeff: The weight of the gradient in the back propagation. - 1.0 is the default. + 1.0 is the default value. :type coeff: float :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for details. @@ -6049,7 +6129,7 @@ def multi_binary_label_cross_entropy(input, :param name: The name of this layer. It is optional. :type name: basestring :param coeff: The weight of the gradient in the back propagation. - 1.0 is the default. + 1.0 is the default value. :type coeff: float :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for details. @@ -6220,7 +6300,7 @@ def smooth_l1_cost(input, label, name=None, coeff=1.0, layer_attr=None): :param name: The name of this layer. It is optional. :type name: basestring :param coeff: The weight of the gradient in the back propagation. - 1.0 is the default. + 1.0 is the default value. :type coeff: float :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for details. @@ -6372,7 +6452,7 @@ def row_conv_layer(input, :param context_len: The context length equals the lookahead step number plus one. :type context_len: int - :param act: Activation Type. LinearActivation is the default. + :param act: Activation Type. LinearActivation is the default activation. :type act: BaseActivation :param param_attr: The parameter attribute. See ParameterAttribute for details. @@ -6520,7 +6600,8 @@ def gated_unit_layer(input, :type input: LayerOutput :param size: The dimension of this layer's output. :type size: int - :param act: Activation type of the projection. LinearActivation is the default. + :param act: Activation type of the projection. LinearActivation is the default + activation. :type act: BaseActivation :param name: The name of this layer. It is optional. :type name: basestring @@ -6901,7 +6982,7 @@ def img_conv3d_layer(input, :type filter_size: int | tuple | list :param num_filters: The number of filters in each group. :type num_filters: int - :param act: Activation type. ReluActivation is the default. + :param act: Activation type. ReluActivation is the default activation. :type act: BaseActivation :param groups: The number of the filter groups. :type groups: int @@ -6916,8 +6997,8 @@ def img_conv3d_layer(input, parameter is set to True, the bias is initialized to zero. :type bias_attr: ParameterAttribute | None | bool | Any :param num_channels: The number of input channels. If the parameter is not set or - set to None, its actual value will be automatically set to - the channels number of the input . + set to None, its actual value will be automatically set to + the channels number of the input. :type num_channels: int :param param_attr: The parameter attribute of the convolution. See ParameterAttribute for details. @@ -7093,7 +7174,7 @@ def sub_seq_layer(input, offsets, sizes, act=None, bias_attr=None, name=None): :type offsets: LayerOutput :param sizes: The sizes of the sub-sequences, which should be sequence type. :type sizes: LayerOutput - :param act: Activation type, LinearActivation is the default. + :param act: Activation type, LinearActivation is the default activation. :type act: BaseActivation. :param bias_attr: The bias attribute. If the parameter is set to False or an object whose type is not ParameterAttribute, no bias is defined. If the diff --git a/python/paddle/v2/__init__.py b/python/paddle/v2/__init__.py index 1c8d8f4b2f..3d70513843 100644 --- a/python/paddle/v2/__init__.py +++ b/python/paddle/v2/__init__.py @@ -37,6 +37,8 @@ import model import paddle.trainer.config_parser as cp __all__ = [ + 'default_startup_program', + 'default_main_program', 'optimizer', 'layer', 'activation', diff --git a/python/paddle/v2/fluid/evaluator.py b/python/paddle/v2/fluid/evaluator.py index 180d0135ff..3a8f1831cf 100644 --- a/python/paddle/v2/fluid/evaluator.py +++ b/python/paddle/v2/fluid/evaluator.py @@ -1,59 +1,187 @@ -import paddle.v2.fluid.op as op import numpy as np +from paddle.v2.fluid.framework import Program, g_main_program, unique_name, Variable import paddle.v2.fluid.core as core -def avg_accumulate(accumulated_var, per_eval, num_batches, place): - t = np.array(accumulated_var.get_tensor()) - t[0] += per_eval[0] - accumulated_var.get_tensor().set([t[0] / float(num_batches)], place) +def _clone_var_in_block_(block, var): + assert isinstance(var, Variable) + return block.create_var( + name=var.name, + shape=var.shape, + dtype=var.data_type, + type=var.type, + lod_level=var.lod_level, + persistable=True) class Evaluator(object): - def __init__(self, - scope, - operator='accuracy', - input='Inference', - label='Label', - output='Output', - place=core.CPUPlace()): - """ - create an evaluator for evaluating the inference. - NOTE: default run on CPUPlace(), running on GPUPlace doesn't improve performance much. - - :param scope: the scope instance contains the input. - :type scope: paddle.v2.fluid.core.scope - :param operator: operator name for caculating the evaluation for each mini-batch. - :type operator: string - :param input: output variable name of forward network. - :type input: string - :param label: variable name of label - :type label: string - """ - self.scope = scope - self.place = place - self.output_name = output - self.num_batches = 0 - # create variable to store accumulated evaluator output - eval_name = ''.join([operator, "@Eval"]) - if scope.find_var(eval_name): - raise Exception("evaluator already exist in scope: %s" % eval_name) - self.accumulated_var = scope.var(eval_name) - t = self.accumulated_var.get_tensor() - t.set_dims((1, )) - t.set([0.0], place) - # self.accumulated_var = block.create_var(block, name=eval_name, shape=(1,)) - # self.accumulated_var.get_tensor().set([0.0]) - # create operator of evaluation - var_map = dict() # var name -> variable - var_map[input] = [input] - var_map[label] = [label] - var_map[output] = [output] - self.op = op.Operator(operator, **var_map) - - def evaluate(self, ctx, accumulator=avg_accumulate): - self.op.run(self.scope, ctx) - per_eval = np.array(self.scope.find_var(self.output_name).get_tensor()) - self.num_batches += 1 - accumulator(self.accumulated_var, per_eval, self.num_batches, - self.place) + """ + Evalutor Base class. + + create metric states + add mini-batch evaluator caculate operator + add increment operator to accumulate the metric states + """ + + def __init__(self, name, **kwargs): + """ + init the global states + """ + self._states = {} + if kwargs.has_key("main_program"): + self._main_program = kwargs.get("main_program") + else: + self._main_program = g_main_program + + def _update_ops(self, *args, **kwargs): + """ + append update ops to the global states + """ + raise NotImplementedError() + + def reset(self, executor, reset_program=None): + """ + Clear metric states at the begin of each pass/user specified batch + """ + if reset_program == None: + reset_program = Program() + else: + reset_program = program + block = reset_program.global_block() + for k, var in self._states.iteritems(): + g_var = _clone_var_in_block_(block, var) + zeros = block.create_var(dtype="float32", persistable=True) + block.append_op( + type="fill_constant", + outputs={"Out": [zeros]}, + attrs={ + "shape": g_var.shape, + "value": .0, + "data_type": 5, + }) + block.append_op( + type="scale", inputs={"X": zeros}, outputs={"Out": g_var}) + executor.run(reset_program, fetch_list=self._states.values()) + + def eval(self, executor, eval_program=None): + """ + Merge the mini-batch statistics to form the evaluation result for multiple mini-batches. + """ + raise NotImplementedError() + + +class Accuracy(Evaluator): + """ + Accuracy need two state variable Total, Correct + """ + + def __init__(self, *args, **kwargs): + super(Accuracy, self).__init__("accuracy", **kwargs) + block = self._main_program.global_block() + g_total = block.create_var( + name=unique_name("Total"), + persistable=True, + dtype="int64", + shape=[1]) + g_correct = block.create_var( + name=unique_name("Correct"), + persistable=True, + dtype="int64", + shape=[1]) + self._states["Total"] = g_total + self._states["Correct"] = g_correct + + def _update_ops(self, input, label, k=1, **kwargs): + block = self._main_program.global_block() + topk_out = block.create_var(dtype=input.data_type) + topk_indices = block.create_var(dtype="int64") + block.append_op( + type="top_k", + inputs={"X": [input]}, + outputs={"Out": [topk_out], + "Indices": [topk_indices]}, + attrs={"k": k}) + acc_out = block.create_var(dtype=kwargs.get("out_dtype", "float32")) + correct = block.create_var(dtype="int64", persistable=True) + total = block.create_var(dtype="int64", persistable=True) + block.append_op( + type="accuracy", + inputs={ + "Out": [topk_out], + "Indices": [topk_indices], + "Label": [label] + }, + outputs={ + "Accuracy": [acc_out], + "Correct": [correct], + "Total": [total], + }) + + block.append_op( + type="cast", + inputs={"X": [self._states["Total"]]}, + outputs={"Out": [self._states["Total"]]}, + attrs={ + "in_data_type": 5, # float32 + "out_data_type": 2, #int32 + }) + block.append_op( + type="cast", + inputs={"X": [self._states["Correct"]]}, + outputs={"Out": [self._states["Correct"]]}, + attrs={ + "in_data_type": 5, + "out_data_type": 2, + }) + + block.append_op( + type="elementwise_add", + inputs={"X": [self._states["Total"]], + "Y": [total]}, + outputs={"Out": [self._states["Total"]]}) + block.append_op( + type="elementwise_add", + inputs={"X": [self._states["Correct"]], + "Y": [correct]}, + outputs={"Out": [self._states["Correct"]]}) + + return acc_out + + def eval(self, executor, eval_program=None): + if eval_program != None: + eval_program = eval_program + else: + eval_program = Program() + block = eval_program.global_block() + eval_out = block.create_var(dtype=self._states["Total"].data_type) + e_total = _clone_var_in_block_(block, self._states["Total"]) + e_correct = _clone_var_in_block_(block, self._states["Correct"]) + block.append_op( + type="cast", + inputs={"X": [e_total]}, + outputs={"Out": [e_total]}, + attrs={ + "in_data_type": 2, #int32 + "out_data_type": 5, #float32 + }) + block.append_op( + type="cast", + inputs={"X": [e_correct]}, + outputs={"Out": [e_correct]}, + attrs={ + "in_data_type": 2, + "out_data_type": 5, + }) + block.append_op( + type="elementwise_div", + inputs={"X": e_correct, + "Y": e_total}, + outputs={"Out": eval_out}) + out = executor.run(eval_program, fetch_list=[eval_out]) + return np.array(out[0]) + + +def accuracy(*args, **kwargs): + cls = Accuracy(*args, **kwargs) + out = cls._update_ops(*args, **kwargs) + return cls, out diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index e2587b4f74..f20567243a 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -4,7 +4,7 @@ import collections import numpy as np import copy -__all__ = ['Block', 'Variable', 'Program', 'Operator'] +__all__ = ['Block', 'Variable', 'Program', 'Operator', 'default_startup_program', 'default_main_program'] def unique_name(prefix): @@ -562,3 +562,9 @@ class Parameter(Variable): # program is a global instance. g_main_program = Program() g_startup_program = Program() + +def default_startup_program(): + return g_startup_program + +def default_main_program(): + return g_main_program diff --git a/python/paddle/v2/fluid/layers.py b/python/paddle/v2/fluid/layers.py index 8a1aa1c42d..b582f2ef6d 100644 --- a/python/paddle/v2/fluid/layers.py +++ b/python/paddle/v2/fluid/layers.py @@ -574,7 +574,9 @@ def accuracy(input, label, k=1, **kwargs): "Indices": [topk_indices]}, attrs={"k": k}) acc_out_dtype = kwargs.get("out_dtype", "float32") - acc_out = helper.create_tmp_variable(dtype=acc_out_dtype) + acc_out = helper.create_tmp_variable(dtype="float32") + correct = helper.create_tmp_variable(dtype="int64") + total = helper.create_tmp_variable(dtype="int64") helper.append_op( type="accuracy", inputs={ @@ -582,7 +584,11 @@ def accuracy(input, label, k=1, **kwargs): "Indices": [topk_indices], "Label": [label] }, - outputs={"Accuracy": [acc_out]}) + outputs={ + "Accuracy": [acc_out], + "Correct": [correct], + "Total": [total], + }) return acc_out diff --git a/python/paddle/v2/fluid/optimizer.py b/python/paddle/v2/fluid/optimizer.py index 4252a6f085..d2841df6af 100644 --- a/python/paddle/v2/fluid/optimizer.py +++ b/python/paddle/v2/fluid/optimizer.py @@ -9,7 +9,7 @@ from paddle.v2.fluid.layer_helper import LayerHelper __all__ = [ 'SGDOptimizer', 'MomentumOptimizer', 'AdagradOptimizer', 'AdamOptimizer', - 'AdamaxOptimizer' + 'AdamaxOptimizer', 'DecayedAdagradOptimizer' ] @@ -85,7 +85,7 @@ class Optimizer(object): """ if (name in self._accumulators and param.name in self._accumulators[name]): - raise Exception("Accumulator {} already exists for parmeter {}". + raise Exception("Accumulator {} already exists for parameter {}". format(name, param.name)) assert isinstance(self.helper, LayerHelper) @@ -307,7 +307,7 @@ class AdagradOptimizer(Optimizer): moment_acc = self._get_accumulator(self._moment_acc_str, param_and_grad[0]) - # create the adagrad optimizer op + # Create the adagrad optimizer op adagrad_op = block.append_op( type=self.type, inputs={ @@ -510,3 +510,51 @@ class AdamaxOptimizer(Optimizer): attrs={"scale": self._beta1}) return [scale_beta1] + + +class DecayedAdagradOptimizer(Optimizer): + """Simple Decayed Adagrad optimizer with moment state + """ + _moment_acc_str = "moment" + + def __init__(self, + learning_rate, + decay=0.95, + epsilon=1.0e-6, + global_step=None): + assert learning_rate is not None + assert decay is not None + assert epsilon is not None + + super(DecayedAdagradOptimizer, self).__init__(global_step) + self.type = "decayed_adagrad" + self._learning_rate = learning_rate + self._decay = decay + self._epsilon = epsilon + + def _create_accumulators(self, block, parameters): + assert isinstance(block, framework.Block) + + for p in parameters: + self._add_accumulator(self._moment_acc_str, p) + + def _append_optimize_op(self, block, param_and_grad): + assert isinstance(block, framework.Block) + + moment_acc = self._get_accumulator(self._moment_acc_str, + param_and_grad[0]) + + # Create the decayed adagrad optimizer op + decayed_adagrad_op = block.append_op( + type=self.type, + inputs={ + "Param": param_and_grad[0], + "Grad": param_and_grad[1], + "Moment": moment_acc, + "LearningRate": self._create_param_lr(param_and_grad) + }, + outputs={"ParamOut": param_and_grad[0], + "MomentOut": moment_acc}, + attrs={"epsilon": self._epsilon}) + + return decayed_adagrad_op diff --git a/python/paddle/v2/fluid/tests/book/test_fit_a_line.py b/python/paddle/v2/fluid/tests/book/test_fit_a_line.py index 5ef963bffa..ee677a2c56 100644 --- a/python/paddle/v2/fluid/tests/book/test_fit_a_line.py +++ b/python/paddle/v2/fluid/tests/book/test_fit_a_line.py @@ -2,45 +2,33 @@ import paddle.v2 as paddle import paddle.v2.fluid.layers as layers import paddle.v2.fluid.core as core import paddle.v2.fluid.optimizer as optimizer - -from paddle.v2.fluid.framework import Program +import paddle.v2.fluid.framework as framework from paddle.v2.fluid.io import save_persistables, load_persistables from paddle.v2.fluid.executor import Executor import numpy as np -startup_program = Program() -main_program = Program() x = layers.data( name='x', shape=[13], - data_type='float32', - main_program=main_program, - startup_program=startup_program) + data_type='float32') y_predict = layers.fc(input=x, size=1, - act=None, - main_program=main_program, - startup_program=startup_program) + act=None) y = layers.data( name='y', shape=[1], - data_type='float32', - main_program=main_program, - startup_program=startup_program) + data_type='float32') cost = layers.square_error_cost( input=y_predict, - label=y, - main_program=main_program, - startup_program=startup_program) -avg_cost = layers.mean( - x=cost, main_program=main_program, startup_program=startup_program) + label=y) +avg_cost = layers.mean(x=cost) sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001) -opts = sgd_optimizer.minimize(avg_cost, startup_program) +opts = sgd_optimizer.minimize(avg_cost) BATCH_SIZE = 20 @@ -52,12 +40,12 @@ train_reader = paddle.batch( place = core.CPUPlace() exe = Executor(place) -exe.run(startup_program, feed={}, fetch_list=[]) +exe.run(framework.default_startup_program()) PASS_NUM = 100 for pass_id in range(PASS_NUM): - save_persistables(exe, "./fit_a_line.model/", main_program=main_program) - load_persistables(exe, "./fit_a_line.model/", main_program=main_program) + save_persistables(exe, "./fit_a_line.model/") + load_persistables(exe, "./fit_a_line.model/") for data in train_reader(): x_data = np.array(map(lambda x: x[0], data)).astype("float32") y_data = np.array(map(lambda x: x[1], data)).astype("float32") @@ -69,7 +57,7 @@ for pass_id in range(PASS_NUM): tensor_y = core.LoDTensor() tensor_y.set(y_data, place) # print tensor_y.get_dims() - outs = exe.run(main_program, + outs = exe.run(framework.default_main_program(), feed={'x': tensor_x, 'y': tensor_y}, fetch_list=[avg_cost]) diff --git a/python/paddle/v2/fluid/tests/book/test_image_classification_train.py b/python/paddle/v2/fluid/tests/book/test_image_classification_train.py index e253b8d27f..f4be835b3a 100644 --- a/python/paddle/v2/fluid/tests/book/test_image_classification_train.py +++ b/python/paddle/v2/fluid/tests/book/test_image_classification_train.py @@ -5,19 +5,17 @@ import paddle.v2.fluid.layers as layers import paddle.v2.fluid.nets as nets import paddle.v2.fluid.optimizer as optimizer from paddle.v2.fluid.executor import Executor -from paddle.v2.fluid.framework import g_startup_program, g_main_program +import paddle.v2.fluid.framework as framework from paddle.v2.fluid.initializer import XavierInitializer -def resnet_cifar10(input, depth=32, main_program=None, startup_program=None): +def resnet_cifar10(input, depth=32): def conv_bn_layer(input, ch_out, filter_size, stride, padding, - act='relu', - main_program=None, - startup_program=None): + act='relu'): tmp = layers.conv2d( input=input, filter_size=filter_size, @@ -25,14 +23,10 @@ def resnet_cifar10(input, depth=32, main_program=None, startup_program=None): stride=stride, padding=padding, act=None, - bias_attr=False, - main_program=main_program, - startup_program=startup_program) + bias_attr=False) return layers.batch_norm( input=tmp, - act=act, - main_program=main_program, - startup_program=startup_program) + act=act) def shortcut(input, ch_in, ch_out, stride, program, init_program): if ch_in != ch_out: @@ -44,40 +38,30 @@ def resnet_cifar10(input, depth=32, main_program=None, startup_program=None): def basicblock(input, ch_in, ch_out, - stride, - main_program=main_program, - startup_program=startup_program): + stride): tmp = conv_bn_layer( input, ch_out, 3, stride, - 1, - main_program=main_program, - startup_program=startup_program) + 1) tmp = conv_bn_layer( tmp, ch_out, 3, 1, 1, - act=None, - main_program=main_program, - startup_program=startup_program) - short = shortcut(input, ch_in, ch_out, stride, main_program, - startup_program) + act=None) + short = shortcut(input, ch_in, ch_out, stride) return layers.elementwise_add( x=tmp, y=short, - act='relu', - main_program=main_program, - startup_program=startup_program) + act='relu') - def layer_warp(block_func, input, ch_in, ch_out, count, stride, program, - startup_program): - tmp = block_func(input, ch_in, ch_out, stride, program, startup_program) + def layer_warp(block_func, input, ch_in, ch_out, count, stride): + tmp = block_func(input, ch_in, ch_out, stride) for i in range(1, count): - tmp = block_func(tmp, ch_out, ch_out, 1, program, startup_program) + tmp = block_func(tmp, ch_out, ch_out, 1) return tmp assert (depth - 2) % 6 == 0 @@ -87,53 +71,41 @@ def resnet_cifar10(input, depth=32, main_program=None, startup_program=None): ch_out=16, filter_size=3, stride=1, - padding=1, - main_program=main_program, - startup_program=startup_program) + padding=1) res1 = layer_warp( basicblock, conv1, 16, 16, n, - 1, - main_program=main_program, - startup_program=startup_program) + 1) res2 = layer_warp( basicblock, res1, 16, 32, n, - 2, - main_program=main_program, - startup_program=startup_program) + 2) res3 = layer_warp( basicblock, res2, 32, 64, n, - 2, - main_program=main_program, - startup_program=startup_program) + 2) pool = layers.pool2d( input=res3, pool_size=8, pool_type='avg', - pool_stride=1, - main_program=main_program, - startup_program=startup_program) + pool_stride=1) return pool -def vgg16_bn_drop(input, main_program=None, startup_program=None): +def vgg16_bn_drop(input): def conv_block(input, num_filter, groups, - dropouts, - main_program=None, - startup_program=None): + dropouts): return nets.img_conv_group( input=input, pool_size=2, @@ -143,51 +115,34 @@ def vgg16_bn_drop(input, main_program=None, startup_program=None): conv_act='relu', conv_with_batchnorm=True, conv_batchnorm_drop_rate=dropouts, - pool_type='max', - main_program=main_program, - startup_program=startup_program) + pool_type='max') - conv1 = conv_block(input, 64, 2, [0.3, 0], main_program, startup_program) - conv2 = conv_block(conv1, 128, 2, [0.4, 0], main_program, startup_program) - conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0], main_program, - startup_program) - conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0], main_program, - startup_program) - conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0], main_program, - startup_program) + conv1 = conv_block(input, 64, 2, [0.3, 0]) + conv2 = conv_block(conv1, 128, 2, [0.4, 0]) + conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0]) + conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0]) + conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0]) drop = layers.dropout( x=conv5, - dropout_prob=0.5, - main_program=main_program, - startup_program=startup_program) + dropout_prob=0.5) fc1 = layers.fc(input=drop, size=512, act=None, - param_attr={"initializer": XavierInitializer()}, - main_program=main_program, - startup_program=startup_program) + param_attr={"initializer": XavierInitializer()}) reshape1 = layers.reshape( x=fc1, - shape=list(fc1.shape + (1, 1)), - main_program=main_program, - startup_program=startup_program) + shape=list(fc1.shape + (1, 1))) bn = layers.batch_norm( input=reshape1, - act='relu', - main_program=main_program, - startup_program=startup_program) + act='relu') drop2 = layers.dropout( x=bn, - dropout_prob=0.5, - main_program=main_program, - startup_program=startup_program) + dropout_prob=0.5) fc2 = layers.fc(input=drop2, size=512, act=None, - param_attr={"initializer": XavierInitializer()}, - main_program=main_program, - startup_program=startup_program) + param_attr={"initializer": XavierInitializer()}) return fc2 @@ -225,7 +180,7 @@ train_reader = paddle.batch( place = core.CPUPlace() exe = Executor(place) -exe.run(g_startup_program, feed={}, fetch_list=[]) +exe.run(framework.default_startup_program()) for pass_id in range(PASS_NUM): batch_id = 0 @@ -243,7 +198,7 @@ for pass_id in range(PASS_NUM): tensor_img.set(img_data, place) tensor_y.set(y_data, place) - outs = exe.run(g_main_program, + outs = exe.run(framework.default_main_program(), feed={"pixel": tensor_img, "label": tensor_y}, fetch_list=[avg_cost, accuracy]) diff --git a/python/paddle/v2/fluid/tests/book/test_recognize_digits_conv.py b/python/paddle/v2/fluid/tests/book/test_recognize_digits_conv.py index 2b72312541..f330ff5813 100644 --- a/python/paddle/v2/fluid/tests/book/test_recognize_digits_conv.py +++ b/python/paddle/v2/fluid/tests/book/test_recognize_digits_conv.py @@ -3,67 +3,46 @@ import paddle.v2.fluid.layers as layers import paddle.v2.fluid.nets as nets import paddle.v2.fluid.core as core import paddle.v2.fluid.optimizer as optimizer - -from paddle.v2.fluid.framework import Program +import paddle.v2.fluid.evaluator as evaluator +import paddle.v2.fluid.framework as framework from paddle.v2.fluid.executor import Executor import numpy as np -startup_program = Program() -main_program = Program() - images = layers.data( name='pixel', shape=[1, 28, 28], - data_type='float32', - main_program=main_program, - startup_program=startup_program) + data_type='float32') label = layers.data( name='label', shape=[1], - data_type='int64', - main_program=main_program, - startup_program=startup_program) + data_type='int64') conv_pool_1 = nets.simple_img_conv_pool( input=images, filter_size=5, num_filters=20, pool_size=2, pool_stride=2, - act="relu", - main_program=main_program, - startup_program=startup_program) + act="relu") conv_pool_2 = nets.simple_img_conv_pool( input=conv_pool_1, filter_size=5, num_filters=50, pool_size=2, pool_stride=2, - act="relu", - main_program=main_program, - startup_program=startup_program) + act="relu") predict = layers.fc(input=conv_pool_2, size=10, - act="softmax", - main_program=main_program, - startup_program=startup_program) -cost = layers.cross_entropy( - input=predict, - label=label, - main_program=main_program, - startup_program=startup_program) -avg_cost = layers.mean(x=cost, main_program=main_program) -accuracy = layers.accuracy( - input=predict, - label=label, - main_program=main_program, - startup_program=startup_program) - -# optimizer = optimizer.MomentumOptimizer(learning_rate=0.1 / 128.0, -# momentum=0.9) + act="softmax") +cost = layers.cross_entropy(input=predict, label=label) +avg_cost = layers.mean(x=cost) optimizer = optimizer.AdamOptimizer(learning_rate=0.01, beta1=0.9, beta2=0.999) -opts = optimizer.minimize(avg_cost, startup_program) +opts = optimizer.minimize(avg_cost) + +accuracy, acc_out = evaluator.accuracy( + input=predict, + label=label) BATCH_SIZE = 50 PASS_NUM = 3 @@ -75,10 +54,11 @@ train_reader = paddle.batch( place = core.CPUPlace() exe = Executor(place) -exe.run(startup_program, feed={}, fetch_list=[]) +exe.run(framework.default_startup_program()) for pass_id in range(PASS_NUM): count = 0 + accuracy.reset(exe) for data in train_reader(): img_data = np.array(map(lambda x: x[0].reshape([1, 28, 28]), data)).astype("float32") @@ -90,14 +70,20 @@ for pass_id in range(PASS_NUM): tensor_img.set(img_data, place) tensor_y.set(y_data, place) - outs = exe.run(main_program, + outs = exe.run(framework.default_main_program(), feed={"pixel": tensor_img, "label": tensor_y}, - fetch_list=[avg_cost, accuracy]) + fetch_list=[avg_cost, acc_out]) loss = np.array(outs[0]) acc = np.array(outs[1]) - + pass_acc = accuracy.eval(exe) + print "pass id : ", pass_id, pass_acc + # print loss, acc if loss < 10.0 and acc > 0.9: # if avg cost less than 10.0 and accuracy is larger than 0.9, we think our code is good. exit(0) + + pass_acc = accuracy.eval(exe) + print "pass id : ", pass_id, pass_acc + exit(1) diff --git a/python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py b/python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py index 2e1a9f236b..b0164e3e36 100644 --- a/python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py +++ b/python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py @@ -2,8 +2,7 @@ import paddle.v2 as paddle import paddle.v2.fluid.layers as layers import paddle.v2.fluid.core as core import paddle.v2.fluid.optimizer as optimizer - -from paddle.v2.fluid.framework import Program +import paddle.v2.fluid.framework as framework from paddle.v2.fluid.executor import Executor from paddle.v2.fluid.regularizer import L2DecayRegularizer from paddle.v2.fluid.initializer import UniformInitializer @@ -11,14 +10,10 @@ from paddle.v2.fluid.initializer import UniformInitializer import numpy as np BATCH_SIZE = 128 -startup_program = Program() -main_program = Program() image = layers.data( name='x', shape=[784], - data_type='float32', - main_program=main_program, - startup_program=startup_program) + data_type='float32') param_attr = { 'name': None, @@ -30,45 +25,30 @@ param_attr = { hidden1 = layers.fc(input=image, size=128, act='relu', - main_program=main_program, - startup_program=startup_program, param_attr=param_attr) hidden2 = layers.fc(input=hidden1, size=64, act='relu', - main_program=main_program, - startup_program=startup_program, param_attr=param_attr) predict = layers.fc(input=hidden2, size=10, act='softmax', - main_program=main_program, - startup_program=startup_program, param_attr=param_attr) label = layers.data( name='y', shape=[1], - data_type='int64', - main_program=main_program, - startup_program=startup_program) + data_type='int64') -cost = layers.cross_entropy( - input=predict, - label=label, - main_program=main_program, - startup_program=startup_program) -avg_cost = layers.mean( - x=cost, main_program=main_program, startup_program=startup_program) +cost = layers.cross_entropy(input=predict, label=label) +avg_cost = layers.mean(x=cost) accuracy = layers.accuracy( input=predict, - label=label, - main_program=main_program, - startup_program=startup_program) + label=label) optimizer = optimizer.MomentumOptimizer(learning_rate=0.001, momentum=0.9) -opts = optimizer.minimize(avg_cost, startup_program) +opts = optimizer.minimize(avg_cost) train_reader = paddle.batch( paddle.reader.shuffle( @@ -78,7 +58,7 @@ train_reader = paddle.batch( place = core.CPUPlace() exe = Executor(place) -exe.run(startup_program, feed={}, fetch_list=[]) +exe.run(framework.default_startup_program()) PASS_NUM = 100 for pass_id in range(PASS_NUM): @@ -93,7 +73,7 @@ for pass_id in range(PASS_NUM): tensor_y = core.LoDTensor() tensor_y.set(y_data, place) - outs = exe.run(main_program, + outs = exe.run(framework.default_main_program(), feed={'x': tensor_x, 'y': tensor_y}, fetch_list=[avg_cost, accuracy]) diff --git a/python/paddle/v2/fluid/tests/book/test_recommender_system.py b/python/paddle/v2/fluid/tests/book/test_recommender_system.py index 4708dfe3e9..eefcb55beb 100644 --- a/python/paddle/v2/fluid/tests/book/test_recommender_system.py +++ b/python/paddle/v2/fluid/tests/book/test_recommender_system.py @@ -3,16 +3,13 @@ import paddle.v2.fluid.layers as layers import paddle.v2.fluid.nets as nets import paddle.v2.fluid.core as core import paddle.v2.fluid.optimizer as optimizer - -from paddle.v2.fluid.framework import Program +import paddle.v2.fluid.framework as framework from paddle.v2.fluid.executor import Executor import numpy as np -startup_program = Program() -main_program = Program() -is_sparse = True -use_gpu = False +IS_SPARSE = True +USE_GPU = False BATCH_SIZE = 256 @@ -25,99 +22,71 @@ def get_usr_combined_features(): uid = layers.data( name='user_id', shape=[1], - data_type='int64', - main_program=main_program, - startup_program=startup_program) + data_type='int64') usr_emb = layers.embedding( input=uid, data_type='float32', size=[USR_DICT_SIZE, 32], param_attr={'name': 'user_table'}, - is_sparse=is_sparse, - main_program=main_program, - startup_program=startup_program) + is_sparse=IS_SPARSE) usr_fc = layers.fc(input=usr_emb, - size=32, - main_program=main_program, - startup_program=startup_program) + size=32) USR_GENDER_DICT_SIZE = 2 usr_gender_id = layers.data( name='gender_id', shape=[1], - data_type='int64', - main_program=main_program, - startup_program=startup_program) + data_type='int64') usr_gender_emb = layers.embedding( input=usr_gender_id, size=[USR_GENDER_DICT_SIZE, 16], param_attr={'name': 'gender_table'}, - is_sparse=is_sparse, - main_program=main_program, - startup_program=startup_program) + is_sparse=IS_SPARSE) usr_gender_fc = layers.fc(input=usr_gender_emb, - size=16, - main_program=main_program, - startup_program=startup_program) + size=16) USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table) usr_age_id = layers.data( name='age_id', shape=[1], - data_type="int64", - main_program=main_program, - startup_program=startup_program) + data_type="int64") usr_age_emb = layers.embedding( input=usr_age_id, size=[USR_AGE_DICT_SIZE, 16], - is_sparse=is_sparse, - param_attr={'name': 'age_table'}, - main_program=main_program, - startup_program=startup_program) + is_sparse=IS_SPARSE, + param_attr={'name': 'age_table'}) usr_age_fc = layers.fc(input=usr_age_emb, - size=16, - main_program=main_program, - startup_program=startup_program) + size=16) USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1 usr_job_id = layers.data( name='job_id', shape=[1], - data_type="int64", - main_program=main_program, - startup_program=startup_program) + data_type="int64") usr_job_emb = layers.embedding( input=usr_job_id, size=[USR_JOB_DICT_SIZE, 16], param_attr={'name': 'job_table'}, - is_sparse=is_sparse, - main_program=main_program, - startup_program=startup_program) + is_sparse=IS_SPARSE) usr_job_fc = layers.fc(input=usr_job_emb, - size=16, - main_program=main_program, - startup_program=startup_program) + size=16) concat_embed = layers.concat( input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], - axis=1, - main_program=main_program, - startup_program=startup_program) + axis=1) usr_combined_features = layers.fc(input=concat_embed, size=200, - act="tanh", - main_program=main_program, - startup_program=startup_program) + act="tanh") return usr_combined_features @@ -129,83 +98,61 @@ def get_mov_combined_features(): mov_id = layers.data( name='movie_id', shape=[1], - data_type='int64', - main_program=main_program, - startup_program=startup_program) + data_type='int64') mov_emb = layers.embedding( input=mov_id, data_type='float32', size=[MOV_DICT_SIZE, 32], param_attr={'name': 'movie_table'}, - is_sparse=is_sparse, - main_program=main_program, - startup_program=startup_program) + is_sparse=IS_SPARSE) mov_fc = layers.fc(input=mov_emb, - size=32, - main_program=main_program, - startup_program=startup_program) + size=32) CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories()) category_id = layers.data( name='category_id', shape=[1], - data_type='int64', - main_program=main_program, - startup_program=startup_program) + data_type='int64') mov_categories_emb = layers.embedding( input=category_id, size=[CATEGORY_DICT_SIZE, 32], - is_sparse=is_sparse, - main_program=main_program, - startup_program=startup_program) + is_sparse=IS_SPARSE) mov_categories_hidden = layers.sequence_pool( input=mov_categories_emb, - pool_type="sum", - main_program=main_program, - startup_program=startup_program) + pool_type="sum") MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict()) mov_title_id = layers.data( name='movie_title', shape=[1], - data_type='int64', - main_program=main_program, - startup_program=startup_program) + data_type='int64') mov_title_emb = layers.embedding( input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32], - is_sparse=is_sparse, - main_program=main_program, - startup_program=startup_program) + is_sparse=IS_SPARSE) mov_title_conv = nets.sequence_conv_pool( input=mov_title_emb, num_filters=32, filter_size=3, act="tanh", - pool_type="sum", - main_program=main_program, - startup_program=startup_program) + pool_type="sum") concat_embed = layers.concat( input=[mov_fc, mov_categories_hidden, mov_title_conv], - axis=1, - main_program=main_program, - startup_program=startup_program) + axis=1) # FIXME(dzh) : need tanh operator mov_combined_features = layers.fc(input=concat_embed, size=200, - act="tanh", - main_program=main_program, - startup_program=startup_program) + act="tanh") return mov_combined_features @@ -217,27 +164,18 @@ def model(): # need cos sim inference = layers.cos_sim( X=usr_combined_features, - Y=mov_combined_features, - main_program=main_program, - startup_program=startup_program) + Y=mov_combined_features) label = layers.data( name='score', shape=[1], - data_type='float32', - main_program=main_program, - startup_program=startup_program) + data_type='float32') square_cost = layers.square_error_cost( input=inference, - label=label, - main_program=main_program, - startup_program=startup_program) + label=label) - avg_cost = layers.mean( - x=square_cost, - main_program=main_program, - startup_program=startup_program) + avg_cost = layers.mean(x=square_cost) return avg_cost @@ -245,16 +183,15 @@ def model(): def main(): cost = model() sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.2) - opts = sgd_optimizer.minimize(cost, startup_program=startup_program) - block = main_program.block(0) + opts = sgd_optimizer.minimize(cost) - if use_gpu: + if USE_GPU: place = core.GPUPlace(0) else: place = core.CPUPlace() exe = Executor(place) - exe.run(startup_program, feed={}, fetch_list=[]) + exe.run(framework.default_startup_program()) train_reader = paddle.batch( paddle.reader.shuffle( @@ -303,7 +240,7 @@ def main(): PASS_NUM = 100 for pass_id in range(PASS_NUM): for data in train_reader(): - outs = exe.run(main_program, + outs = exe.run(framework.default_main_program(), feed=func_feed(feeding, data), fetch_list=[cost]) out = np.array(outs[0]) diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py index dc4b63da9b..91fc79a987 100644 --- a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py +++ b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py @@ -3,8 +3,7 @@ import paddle.v2.fluid.layers as layers import paddle.v2.fluid.nets as nets import paddle.v2.fluid.core as core import paddle.v2.fluid.optimizer as optimizer - -from paddle.v2.fluid.framework import Program, g_main_program, g_startup_program +import paddle.v2.fluid.framework as framework from paddle.v2.fluid.executor import Executor import numpy as np @@ -70,7 +69,7 @@ def main(): place = core.CPUPlace() exe = Executor(place) - exe.run(g_startup_program) + exe.run(framework.default_startup_program()) for pass_id in xrange(PASS_NUM): for data in train_data(): @@ -82,7 +81,7 @@ def main(): tensor_label = core.LoDTensor() tensor_label.set(label, place) - outs = exe.run(g_main_program, + outs = exe.run(framework.default_main_program(), feed={"words": tensor_words, "label": tensor_label}, fetch_list=[cost, acc]) diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_dynamic_lstm.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_dynamic_lstm.py index 6d507f4c8e..8c3d448835 100644 --- a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_dynamic_lstm.py +++ b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_dynamic_lstm.py @@ -3,8 +3,7 @@ import paddle.v2.fluid.layers as layers import paddle.v2.fluid.nets as nets import paddle.v2.fluid.core as core import paddle.v2.fluid.optimizer as optimizer - -from paddle.v2.fluid.framework import Program, g_main_program, g_startup_program +import paddle.v2.fluid.framework as framework from paddle.v2.fluid.executor import Executor import numpy as np @@ -81,7 +80,7 @@ def main(): place = core.CPUPlace() exe = Executor(place) - exe.run(g_startup_program) + exe.run(framework.default_startup_program()) for pass_id in xrange(PASS_NUM): for data in train_data(): @@ -93,7 +92,7 @@ def main(): tensor_label = core.LoDTensor() tensor_label.set(label, place) - outs = exe.run(g_main_program, + outs = exe.run(framework.default_main_program(), feed={"words": tensor_words, "label": tensor_label}, fetch_list=[cost, acc]) diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py index 848dcce974..a7d791c1f3 100644 --- a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py +++ b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py @@ -2,8 +2,7 @@ import paddle.v2 as paddle import paddle.v2.fluid.layers as layers import paddle.v2.fluid.core as core import paddle.v2.fluid.optimizer as optimizer - -from paddle.v2.fluid.framework import g_main_program, g_startup_program +import paddle.v2.fluid.framework as framework from paddle.v2.fluid.executor import Executor import numpy as np @@ -88,10 +87,10 @@ def main(): place = core.CPUPlace() tensor_words, tensor_label = prepare_feed_data(data, place) exe = Executor(place) - exe.run(g_startup_program) + exe.run(framework.default_startup_program()) while True: - outs = exe.run(g_main_program, + outs = exe.run(framework.default_main_program(), feed={"words": tensor_words, "label": tensor_label}, fetch_list=[cost, acc]) diff --git a/python/paddle/v2/fluid/tests/book/test_word2vec.py b/python/paddle/v2/fluid/tests/book/test_word2vec.py index 054dbd5a3d..9dcb6f2fea 100644 --- a/python/paddle/v2/fluid/tests/book/test_word2vec.py +++ b/python/paddle/v2/fluid/tests/book/test_word2vec.py @@ -2,20 +2,17 @@ import paddle.v2 as paddle import paddle.v2.fluid.layers as layers import paddle.v2.fluid.core as core import paddle.v2.fluid.optimizer as optimizer - -from paddle.v2.fluid.framework import Program +import paddle.v2.fluid.framework as framework from paddle.v2.fluid.executor import Executor import numpy as np -startup_program = Program() -main_program = Program() - -embed_size = 32 -hidden_size = 256 +PASS_NUM = 100 +EMBED_SIZE = 32 +HIDDEN_SIZE = 256 N = 5 -batch_size = 32 -is_sparse = True +BATCH_SIZE = 32 +IS_SPARSE = True word_dict = paddle.dataset.imikolov.build_dict() dict_size = len(word_dict) @@ -23,97 +20,67 @@ dict_size = len(word_dict) first_word = layers.data( name='firstw', shape=[1], - data_type='int64', - main_program=main_program, - startup_program=startup_program) + data_type='int64') second_word = layers.data( name='secondw', shape=[1], - data_type='int64', - main_program=main_program, - startup_program=startup_program) + data_type='int64') third_word = layers.data( name='thirdw', shape=[1], - data_type='int64', - main_program=main_program, - startup_program=startup_program) + data_type='int64') forth_word = layers.data( name='forthw', shape=[1], - data_type='int64', - main_program=main_program, - startup_program=startup_program) + data_type='int64') next_word = layers.data( name='nextw', shape=[1], - data_type='int64', - main_program=main_program, - startup_program=startup_program) + data_type='int64') embed_first = layers.embedding( input=first_word, - size=[dict_size, embed_size], + size=[dict_size, EMBED_SIZE], data_type='float32', - is_sparse=is_sparse, - param_attr={'name': 'shared_w'}, - main_program=main_program, - startup_program=startup_program) + is_sparse=IS_SPARSE, + param_attr={'name': 'shared_w'}) embed_second = layers.embedding( input=second_word, - size=[dict_size, embed_size], + size=[dict_size, EMBED_SIZE], data_type='float32', - is_sparse=is_sparse, - param_attr={'name': 'shared_w'}, - main_program=main_program, - startup_program=startup_program) - + is_sparse=IS_SPARSE, + param_attr={'name': 'shared_w'}) embed_third = layers.embedding( input=third_word, - size=[dict_size, embed_size], + size=[dict_size, EMBED_SIZE], data_type='float32', - is_sparse=is_sparse, - param_attr={'name': 'shared_w'}, - main_program=main_program, - startup_program=startup_program) + is_sparse=IS_SPARSE, + param_attr={'name': 'shared_w'}) embed_forth = layers.embedding( input=forth_word, - size=[dict_size, embed_size], + size=[dict_size, EMBED_SIZE], data_type='float32', - is_sparse=is_sparse, - param_attr={'name': 'shared_w'}, - main_program=main_program, - startup_program=startup_program) + is_sparse=IS_SPARSE, + param_attr={'name': 'shared_w'}) concat_embed = layers.concat( input=[embed_first, embed_second, embed_third, embed_forth], - axis=1, - main_program=main_program, - startup_program=startup_program) - + axis=1) hidden1 = layers.fc(input=concat_embed, - size=hidden_size, - act='sigmoid', - main_program=main_program, - startup_program=startup_program) + size=HIDDEN_SIZE, + act='sigmoid') predict_word = layers.fc(input=hidden1, size=dict_size, - act='softmax', - main_program=main_program, - startup_program=startup_program) + act='softmax') cost = layers.cross_entropy( input=predict_word, - label=next_word, - main_program=main_program, - startup_program=startup_program) -avg_cost = layers.mean( - x=cost, main_program=main_program, startup_program=startup_program) - + label=next_word) +avg_cost = layers.mean(x=cost) sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001) -opts = sgd_optimizer.minimize(avg_cost, startup_program) +opts = sgd_optimizer.minimize(avg_cost) train_reader = paddle.batch( - paddle.dataset.imikolov.train(word_dict, N), batch_size) + paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE) place = core.CPUPlace() exe = Executor(place) @@ -122,8 +89,8 @@ exe = Executor(place) # below exit line. exit(0) -exe.run(startup_program, feed={}, fetch_list=[]) -PASS_NUM = 100 +exe.run(framework.default_startup_program()) + for pass_id in range(PASS_NUM): for data in train_reader(): input_data = [[data_idx[idx] for data_idx in data] for idx in xrange(5)] @@ -150,7 +117,7 @@ for pass_id in range(PASS_NUM): next_tensor = core.LoDTensor() next_tensor.set(next_data, place) - outs = exe.run(main_program, + outs = exe.run(framework.default_main_program(), feed={ 'firstw': first_tensor, 'secondw': second_tensor, diff --git a/python/paddle/v2/fluid/tests/test_accuracy_op.py b/python/paddle/v2/fluid/tests/test_accuracy_op.py index 6536c297e8..6f72918b71 100644 --- a/python/paddle/v2/fluid/tests/test_accuracy_op.py +++ b/python/paddle/v2/fluid/tests/test_accuracy_op.py @@ -18,7 +18,9 @@ class TestAccuracyOp(OpTest): num_correct += 1 break self.outputs = { - 'Accuracy': np.array([num_correct / float(n)]).astype("float32") + 'Accuracy': np.array([num_correct / float(n)]).astype("float32"), + 'Correct': np.array([num_correct]).astype("int32"), + 'Total': np.array([n]).astype("int32") } def test_check_output(self): diff --git a/python/paddle/v2/framework/tests/test_beam_search_decode_op.py b/python/paddle/v2/fluid/tests/test_beam_search_decode_op.py similarity index 96% rename from python/paddle/v2/framework/tests/test_beam_search_decode_op.py rename to python/paddle/v2/fluid/tests/test_beam_search_decode_op.py index e9f180bbae..8a11820d2a 100644 --- a/python/paddle/v2/framework/tests/test_beam_search_decode_op.py +++ b/python/paddle/v2/fluid/tests/test_beam_search_decode_op.py @@ -1,8 +1,8 @@ import unittest import numpy as np -import paddle.v2.framework.core as core -from paddle.v2.framework.op import Operator +import paddle.v2.fluid.core as core +from paddle.v2.fluid.op import Operator class TestBeamSearchDecodeOp(unittest.TestCase): diff --git a/python/paddle/v2/fluid/tests/test_compare_op.py b/python/paddle/v2/fluid/tests/test_compare_op.py index bb0256694d..5d0dfab6ff 100644 --- a/python/paddle/v2/fluid/tests/test_compare_op.py +++ b/python/paddle/v2/fluid/tests/test_compare_op.py @@ -23,6 +23,9 @@ def create_test_class(op_type, typename, callback): for _type_name in {'float32', 'float64', 'int32', 'int64'}: create_test_class('less_than', _type_name, lambda _a, _b: _a < _b) + create_test_class('less_equal', _type_name, lambda _a, _b: _a <= _b) + create_test_class('greater_than', _type_name, lambda _a, _b: _a > _b) + create_test_class('greater_equal', _type_name, lambda _a, _b: _a >= _b) create_test_class('equal', _type_name, lambda _a, _b: _a == _b) if __name__ == '__main__': diff --git a/python/paddle/v2/fluid/tests/test_evaluator.py b/python/paddle/v2/fluid/tests/test_evaluator.py deleted file mode 100644 index 1d51205b70..0000000000 --- a/python/paddle/v2/fluid/tests/test_evaluator.py +++ /dev/null @@ -1,64 +0,0 @@ -from paddle.v2.fluid.evaluator import Evaluator -from paddle.v2.fluid.op import Operator -import paddle.v2.fluid.core as core -import unittest -import op_test -import numpy as np - - -class TestEvaluator(unittest.TestCase): - def setup(self, scope, inputs, outputs): - def __create_var__(var_name, arr): - np_arr = np.array(arr) - scope.var(var_name) - # tensor = var.get_tensor() - # tensor.set_dims(np_arr.shape) - - for var_name, arr in inputs.iteritems(): - __create_var__(var_name, arr) - - for var_name, arr in outputs.iteritems(): - __create_var__(var_name, arr) - - def test_evaluator(self): - - inputs = { - 'Inference': np.array([[1, 1, 1, 1, 1, 0, 0, 0, 0, 1]]).T, - 'Label': np.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0]) - } - outputs = {'Accuracy': np.array([0.9])} - out_name = 'Accuracy' - - places = [core.CPUPlace()] - if core.is_compile_gpu(): - places.append(core.GPUPlace(0)) - - for place in places: - scope = core.Scope() - self.setup(scope, inputs, outputs) - - evaluator = Evaluator( - scope, - operator='accuracy', - input='Inference', - label='Label', - output=out_name, - place=place) - op_test.set_input(scope, evaluator.op, inputs, place) - ctx = core.DeviceContext.create(place) - - for i in range(10): # simulate 10 mini-batches - evaluator.evaluate(ctx) - - actual = np.array(scope.find_var(out_name).get_tensor()) - print actual - - self.assertTrue( - np.allclose( - actual, outputs[out_name], atol=1e-5), - "output name: " + out_name + " has diff.") - - -if __name__ == '__main__': - exit(0) - unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_optimizer.py b/python/paddle/v2/fluid/tests/test_optimizer.py index 0ebf7cdf20..7b4237e7fd 100644 --- a/python/paddle/v2/fluid/tests/test_optimizer.py +++ b/python/paddle/v2/fluid/tests/test_optimizer.py @@ -198,7 +198,7 @@ class TestAdagradOptimizer(unittest.TestCase): adagrad_op = opts[0] self.assertEqual(adagrad_op.type, "adagrad") - # check accumulators + # Check accumulators accumulators = adagrad_optimizer.get_accumulators() self.assertEqual(len(accumulators), 1) self.assertTrue(adagrad_optimizer.get_moment_str() in accumulators) @@ -331,5 +331,59 @@ class TestAdamaxOptimizer(unittest.TestCase): self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate) +class TestDecayedAdagradOptimizer(unittest.TestCase): + class MockDecayedAdagrad(optimizer.DecayedAdagradOptimizer): + def get_accumulators(self): + return self._accumulators + + def get_moment_str(self): + return self._moment_acc_str + + def test_decayed_adagrad_optimizer(self): + init_program = framework.Program() + program = framework.Program() + block = program.global_block() + mul_x = block.create_parameter( + dtype="float32", shape=[5, 10], lod_level=0, name="mul.x") + mul_y = block.create_var( + dtype="float32", shape=[10, 8], lod_level=0, name="mul.y") + mul_out = block.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="mul.out") + block.append_op( + type="mul", + inputs={"X": mul_x, + "Y": mul_y}, + outputs={"Out": mul_out}, + attrs={"x_num_col_dims": 1}) + learning_rate = 0.01 + decayed_adagrad_optimizer = self.MockDecayedAdagrad( + learning_rate=learning_rate, decay=0.95, epsilon=1.0e-6) + params_grads = append_backward_ops(mul_out) + self.assertEqual(len(params_grads), 1) + self.assertEqual(len(decayed_adagrad_optimizer.get_accumulators()), 0) + opts = decayed_adagrad_optimizer.create_optimization_pass( + params_grads, mul_out, init_program) + self.assertEqual(len(opts), 1) + decayed_adagrad_op = opts[0] + self.assertEqual(decayed_adagrad_op.type, "decayed_adagrad") + + # Check accumulators + accumulators = decayed_adagrad_optimizer.get_accumulators() + self.assertEqual(len(accumulators), 1) + self.assertTrue( + decayed_adagrad_optimizer.get_moment_str() in accumulators) + moment_acc = accumulators[decayed_adagrad_optimizer.get_moment_str()] + self.assertEqual(len(moment_acc), 1) + self.assertTrue(mul_x.name in moment_acc) + + # Check init_program + init_ops = init_program.global_block().ops + self.assertEqual(len(init_ops), 2) + self.assertEqual(init_ops[0].type, "fill_constant") + self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate) + self.assertEqual(init_ops[1].type, "fill_constant") + self.assertAlmostEqual(init_ops[1].attr('value'), 0.0) + + if __name__ == '__main__': unittest.main()