From 251c6032fb6bce72751b6b32e34368a17b407e6e Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Mon, 15 Jan 2018 19:49:14 +0800 Subject: [PATCH] set use_cudnn as default --- paddle/operators/conv_op.cc | 8 ++++++-- paddle/operators/conv_op.h | 1 - paddle/operators/conv_transpose_op.cc | 8 ++++++-- paddle/operators/conv_transpose_op.h | 1 - paddle/operators/pool_op.cc | 8 ++++++-- paddle/operators/pool_op.h | 1 - paddle/platform/dynload/cudnn.cc | 4 ---- python/paddle/v2/fluid/layers/nn.py | 6 +++--- python/paddle/v2/fluid/nets.py | 6 +++--- 9 files changed, 24 insertions(+), 19 deletions(-) diff --git a/paddle/operators/conv_op.cc b/paddle/operators/conv_op.cc index a6d3ebb4f5..9ae3e87281 100644 --- a/paddle/operators/conv_op.cc +++ b/paddle/operators/conv_op.cc @@ -70,7 +70,9 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const { framework::OpKernelType ConvOp::GetExpectedKernelType( const framework::ExecutionContext& ctx) const { bool use_cudnn = ctx.Attr("use_cudnn"); - use_cudnn &= platform::dynload::HasCUDNN(); + if (paddle::platform::is_cpu_place(ctx.GetPlace())) { + use_cudnn = false; + } framework::LibraryType library_; if (use_cudnn) { library_ = framework::LibraryType::kCUDNN; @@ -284,7 +286,9 @@ void ConvOpGrad::InferShape(framework::InferShapeContext* ctx) const { framework::OpKernelType ConvOpGrad::GetExpectedKernelType( const framework::ExecutionContext& ctx) const { bool use_cudnn = ctx.Attr("use_cudnn"); - use_cudnn &= platform::dynload::HasCUDNN(); + if (paddle::platform::is_cpu_place(ctx.GetPlace())) { + use_cudnn = false; + } framework::LibraryType library_; if (use_cudnn) { library_ = framework::LibraryType::kCUDNN; diff --git a/paddle/operators/conv_op.h b/paddle/operators/conv_op.h index 2a6fad6582..5a8933e791 100644 --- a/paddle/operators/conv_op.h +++ b/paddle/operators/conv_op.h @@ -19,7 +19,6 @@ limitations under the License. */ #include "paddle/operators/math/im2col.h" #include "paddle/operators/math/math_function.h" #include "paddle/operators/math/vol2col.h" -#include "paddle/platform/dynload/cudnn.h" namespace paddle { namespace operators { diff --git a/paddle/operators/conv_transpose_op.cc b/paddle/operators/conv_transpose_op.cc index 53153c0296..46f79b1701 100644 --- a/paddle/operators/conv_transpose_op.cc +++ b/paddle/operators/conv_transpose_op.cc @@ -61,7 +61,9 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const { framework::OpKernelType ConvTransposeOp::GetExpectedKernelType( const framework::ExecutionContext& ctx) const { bool use_cudnn = ctx.Attr("use_cudnn"); - use_cudnn &= platform::dynload::HasCUDNN(); + if (paddle::platform::is_cpu_place(ctx.GetPlace())) { + use_cudnn = false; + } framework::LibraryType library_; if (use_cudnn) { library_ = framework::LibraryType::kCUDNN; @@ -264,7 +266,9 @@ void ConvTransposeOpGrad::InferShape(framework::InferShapeContext* ctx) const { framework::OpKernelType ConvTransposeOpGrad::GetExpectedKernelType( const framework::ExecutionContext& ctx) const { bool use_cudnn = ctx.Attr("use_cudnn"); - use_cudnn &= platform::dynload::HasCUDNN(); + if (paddle::platform::is_cpu_place(ctx.GetPlace())) { + use_cudnn = false; + } framework::LibraryType library_; if (use_cudnn) { library_ = framework::LibraryType::kCUDNN; diff --git a/paddle/operators/conv_transpose_op.h b/paddle/operators/conv_transpose_op.h index f43d652fe1..a42ade41b1 100644 --- a/paddle/operators/conv_transpose_op.h +++ b/paddle/operators/conv_transpose_op.h @@ -19,7 +19,6 @@ limitations under the License. */ #include "paddle/operators/math/im2col.h" #include "paddle/operators/math/math_function.h" #include "paddle/operators/math/vol2col.h" -#include "paddle/platform/dynload/cudnn.h" namespace paddle { namespace operators { diff --git a/paddle/operators/pool_op.cc b/paddle/operators/pool_op.cc index e0f9ea7aa0..648a1dfb56 100644 --- a/paddle/operators/pool_op.cc +++ b/paddle/operators/pool_op.cc @@ -64,7 +64,9 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const { framework::OpKernelType PoolOp::GetExpectedKernelType( const framework::ExecutionContext &ctx) const { bool use_cudnn = ctx.Attr("use_cudnn"); - use_cudnn &= platform::dynload::HasCUDNN(); + if (paddle::platform::is_cpu_place(ctx.GetPlace())) { + use_cudnn = false; + } framework::LibraryType library_; if (use_cudnn) { library_ = framework::LibraryType::kCUDNN; @@ -89,7 +91,9 @@ void PoolOpGrad::InferShape(framework::InferShapeContext *ctx) const { framework::OpKernelType PoolOpGrad::GetExpectedKernelType( const framework::ExecutionContext &ctx) const { bool use_cudnn = ctx.Attr("use_cudnn"); - use_cudnn &= platform::dynload::HasCUDNN(); + if (paddle::platform::is_cpu_place(ctx.GetPlace())) { + use_cudnn = false; + } framework::LibraryType library_; if (use_cudnn) { library_ = framework::LibraryType::kCUDNN; diff --git a/paddle/operators/pool_op.h b/paddle/operators/pool_op.h index e0668f1360..c3d82ecbde 100644 --- a/paddle/operators/pool_op.h +++ b/paddle/operators/pool_op.h @@ -18,7 +18,6 @@ limitations under the License. */ #include "paddle/framework/op_registry.h" #include "paddle/operators/math/math_function.h" #include "paddle/operators/math/pooling.h" -#include "paddle/platform/dynload/cudnn.h" namespace paddle { namespace operators { diff --git a/paddle/platform/dynload/cudnn.cc b/paddle/platform/dynload/cudnn.cc index fafdf5e78a..701f6240fe 100644 --- a/paddle/platform/dynload/cudnn.cc +++ b/paddle/platform/dynload/cudnn.cc @@ -57,10 +57,6 @@ void EnforceCUDNNLoaded(const char* fn_name) { bool HasCUDNN() { return true; } #endif -#ifndef PADDLE_WITH_CUDA -bool HasCUDNN() { return false; } -#endif - } // namespace dynload } // namespace platform } // namespace paddle diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index df2233089c..0cfa011036 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -660,7 +660,7 @@ def conv2d(input, groups=None, param_attr=None, bias_attr=None, - use_cudnn=False, + use_cudnn=True, act=None): """ **Convlution2D Layer** @@ -938,7 +938,7 @@ def pool2d(input, pool_stride=None, pool_padding=None, global_pooling=False, - use_cudnn=False): + use_cudnn=True): """ This function adds the operator for pooling in 2 dimensions, using the pooling configurations mentioned in input parameters. @@ -1088,7 +1088,7 @@ def conv2d_transpose(input, stride=None, dilation=None, param_attr=None, - use_cudnn=False): + use_cudnn=True): """ The transpose of conv2d layer. diff --git a/python/paddle/v2/fluid/nets.py b/python/paddle/v2/fluid/nets.py index 8ac58fd733..327d2ff2da 100644 --- a/python/paddle/v2/fluid/nets.py +++ b/python/paddle/v2/fluid/nets.py @@ -14,7 +14,7 @@ def simple_img_conv_pool(input, act, param_attr=None, pool_type='max', - use_cudnn=False): + use_cudnn=True): conv_out = layers.conv2d( input=input, num_filters=num_filters, @@ -41,10 +41,10 @@ def img_conv_group(input, param_attr=None, conv_with_batchnorm=False, conv_batchnorm_drop_rate=None, - conv_use_cudnn=False, + conv_use_cudnn=True, pool_stride=1, pool_type=None, - pool_use_cudnn=False): + pool_use_cudnn=True): """ Image Convolution Group, Used for vgg net. """