diff --git a/paddle/fluid/operators/conv_cudnn_op_cache.h b/paddle/fluid/operators/conv_cudnn_op_cache.h index ddddb7f864..23a471cfa0 100644 --- a/paddle/fluid/operators/conv_cudnn_op_cache.h +++ b/paddle/fluid/operators/conv_cudnn_op_cache.h @@ -40,11 +40,6 @@ static constexpr size_t kNUM_CUDNN_BWD_FILTER_ALGS = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT; static constexpr size_t kNUM_CUDNN_BWD_DATA_ALGS = CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT; -#else -// cuDNN v5 has no CUDNN_CONVOLUTION_FWD_ALGO_COUNT etc. -static constexpr size_t kNUM_CUDNN_FWD_ALGS = 7; -static constexpr size_t kNUM_CUDNN_BWD_FILTER_ALGS = 4; -static constexpr size_t kNUM_CUDNN_BWD_DATA_ALGS = 5; #endif } // namespace operators diff --git a/paddle/fluid/operators/cudnn_lstm_cache.h b/paddle/fluid/operators/cudnn_lstm_cache.h index 3181e4b1d9..b7859237e7 100644 --- a/paddle/fluid/operators/cudnn_lstm_cache.h +++ b/paddle/fluid/operators/cudnn_lstm_cache.h @@ -85,20 +85,12 @@ class ScopedRNNBase { dropout_desc_.descriptor(handle, place, initialized_, dropout_prob_, dropout_state, seed_, state_size); -// ------------------- cudnn rnn descriptors --------------------- -#if CUDNN_VERSION >= 6000 + // ------------------- cudnn rnn descriptors --------------------- PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetRNNDescriptor_v6( handle, rnn_desc_.desc(), hidden_size_, num_layers_, dropout_desc_.desc(), CUDNN_LINEAR_INPUT, is_bidirec_ ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL, CUDNN_LSTM, CUDNN_RNN_ALGO_STANDARD, cudnn_type)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetRNNDescriptor( - rnn_desc_.desc(), hidden_size_, num_layers_, dropout_desc_.desc(), - CUDNN_LINEAR_INPUT, - is_bidirec_ ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL, CUDNN_LSTM, - cudnn_type)); -#endif #if CUDNN_VERSION >= 7201 if (!sequence_length.empty()) { diff --git a/paddle/fluid/operators/cudnn_rnn_cache.h b/paddle/fluid/operators/cudnn_rnn_cache.h index 13a3e7d09b..a6a23a91c7 100644 --- a/paddle/fluid/operators/cudnn_rnn_cache.h +++ b/paddle/fluid/operators/cudnn_rnn_cache.h @@ -168,18 +168,11 @@ struct CudnnRNNCache { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateRNNDescriptor(&rnn_desc_)); -#if CUDNN_VERSION >= 6000 PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetRNNDescriptor_v6( handle, rnn_desc_, hidden_size_, num_layers_, dropout_desc_, CUDNN_LINEAR_INPUT, is_bidirec_ ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL, CUDNN_LSTM, CUDNN_RNN_ALGO_STANDARD, cudnn_type)); -#else - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetRNNDescriptor( - rnn_desc_, hidden_size_, num_layers_, dropout_desc_, CUDNN_LINEAR_INPUT, - is_bidirec_ ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL, CUDNN_LSTM, - cudnn_type)); -#endif PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateFilterDescriptor(&w_desc_)); diff --git a/paddle/fluid/platform/cudnn_helper.h b/paddle/fluid/platform/cudnn_helper.h index af0df2efc5..6c3c96b68c 100644 --- a/paddle/fluid/platform/cudnn_helper.h +++ b/paddle/fluid/platform/cudnn_helper.h @@ -91,30 +91,6 @@ enum class ActivationMode { kBandPass, }; -#if CUDNN_VERSION < 6000 -#pragma message "CUDNN version under 6.0 is supported at best effort." -#pragma message "We strongly encourage you to move to 6.0 and above." -#pragma message "This message is intended to annoy you enough to update." -#pragma message \ - "please see https://docs.nvidia.com/deeplearning/sdk/cudnn-release-notes/" - -inline cudnnPoolingMode_t GetPoolingMode(const PoolingMode& mode) { - switch (mode) { - case PoolingMode::kMaximumDeterministic: - return CUDNN_POOLING_MAX; - case PoolingMode::kAverageExclusive: - return CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING; - case PoolingMode::kAverageInclusive: - return CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING; - case PoolingMode::kMaximum: - return CUDNN_POOLING_MAX; - default: - PADDLE_THROW( - platform::errors::Unimplemented("Unexpected CUDNN pooling mode.")); - } -} -#else - inline cudnnPoolingMode_t GetPoolingMode(const PoolingMode& mode) { switch (mode) { case PoolingMode::kMaximumDeterministic: @@ -130,7 +106,6 @@ inline cudnnPoolingMode_t GetPoolingMode(const PoolingMode& mode) { platform::errors::Unimplemented("Unexpected CUDNN pooling mode.")); } } -#endif // CUDNN_VERSION < 6000 inline ActivationMode StringToActivationMode(const std::string& str) { if (str == "identity") { @@ -471,19 +446,6 @@ class ScopedConvolutionDescriptor { "of pads is %d, size of dilations is %d.", pads.size(), dilations.size())); -#if !CUDNN_VERSION_MIN(6, 0, 0) - // cudnn v5 does not support dilation conv, the argument is called upscale - // instead of dilations and it is must be one. - for (size_t i = 0; i < dilations.size(); ++i) { - PADDLE_ENFORCE_EQ(dilations[i], 1, - platform::errors::InvalidArgument( - "Dilations conv is not supported in this cuDNN " - "version(%d.%d.%d).", - CUDNN_VERSION / 1000, CUDNN_VERSION % 1000 / 100, - CUDNN_VERSION % 100)); - } -#endif - cudnnDataType_t compute_type = (type == CUDNN_DATA_DOUBLE) ? CUDNN_DATA_DOUBLE : CUDNN_DATA_FLOAT; PADDLE_ENFORCE_CUDA_SUCCESS(dynload::cudnnSetConvolutionNdDescriptor(