From 88881955e729596bf916bc8382df8fd8b5bc8e0a Mon Sep 17 00:00:00 2001 From: "liuwei(DLTP)" Date: Mon, 14 Jan 2019 10:24:18 +0800 Subject: [PATCH 1/5] fix github issue 15267 test=develop --- python/paddle/fluid/layers/nn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index a4787e769f..99e1c2adfd 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -8480,7 +8480,7 @@ def shape(input): helper = LayerHelper('shape', **locals()) out = helper.create_variable_for_type_inference( - dtype=helper.input_dtype('input')) + dtype='int32') helper.append_op( type='shape', inputs={'Input': input}, outputs={'Out': out}) From b758fa50b2155121f94b043967eb36ebb0c87cf6 Mon Sep 17 00:00:00 2001 From: hjchen2 Date: Mon, 14 Jan 2019 11:09:27 +0800 Subject: [PATCH 2/5] fix github issue 15267 test=develop --- python/paddle/fluid/layers/nn.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index a4787e769f..56971cff43 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -8479,8 +8479,7 @@ def shape(input): """ helper = LayerHelper('shape', **locals()) - out = helper.create_variable_for_type_inference( - dtype=helper.input_dtype('input')) + out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='shape', inputs={'Input': input}, outputs={'Out': out}) From 413543eb8f9ff6939eee457974034afcb3e08718 Mon Sep 17 00:00:00 2001 From: Wei Liu Date: Fri, 18 Jan 2019 09:52:36 +0800 Subject: [PATCH 3/5] print peak memory usage --- paddle/fluid/memory/detail/system_allocator.cc | 5 +++++ paddle/fluid/memory/detail/system_allocator.h | 3 +++ 2 files changed, 8 insertions(+) diff --git a/paddle/fluid/memory/detail/system_allocator.cc b/paddle/fluid/memory/detail/system_allocator.cc index 3e8fb83e9d..14dcaf756f 100644 --- a/paddle/fluid/memory/detail/system_allocator.cc +++ b/paddle/fluid/memory/detail/system_allocator.cc @@ -117,6 +117,11 @@ void* GPUAllocator::Alloc(size_t* index, size_t size) { if (result == cudaSuccess) { *index = 0; gpu_alloc_size_ += size; + if (gpu_alloc_size_ > s_memoryMap[gpu_id_]) { + s_memoryMap[gpu_id_] = gpu_alloc_size_; + VLOG(3) << "device: " << gpu_id_ + << " maximum memory size : " <<(gpu_alloc_size_ >> 20) << " MiB"; + } return p; } else { LOG(WARNING) diff --git a/paddle/fluid/memory/detail/system_allocator.h b/paddle/fluid/memory/detail/system_allocator.h index a0386a2dad..1ac1df6de7 100644 --- a/paddle/fluid/memory/detail/system_allocator.h +++ b/paddle/fluid/memory/detail/system_allocator.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include // for size_t +#include namespace paddle { namespace memory { @@ -44,6 +45,8 @@ class CPUAllocator : public SystemAllocator { #ifdef PADDLE_WITH_CUDA class GPUAllocator : public SystemAllocator { public: + std::unordered_map s_memoryMap; + explicit GPUAllocator(int gpu_id) : gpu_id_(gpu_id) {} virtual void* Alloc(size_t* index, size_t size); From b1f97a6fa9186266b9a76c8157ab80801e5cf9f0 Mon Sep 17 00:00:00 2001 From: liuwei1031 Date: Wed, 13 Feb 2019 04:56:42 +0000 Subject: [PATCH 4/5] fix security issue 27, 38 test=develop --- paddle/fluid/framework/ir/infer_clean_graph_pass.cc | 1 + paddle/fluid/operators/random_crop_op.h | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/framework/ir/infer_clean_graph_pass.cc b/paddle/fluid/framework/ir/infer_clean_graph_pass.cc index 7713ed1eab..6607c026a7 100644 --- a/paddle/fluid/framework/ir/infer_clean_graph_pass.cc +++ b/paddle/fluid/framework/ir/infer_clean_graph_pass.cc @@ -37,6 +37,7 @@ class InferCleanGraphPass : public FusePassBase { std::unordered_set invalid_nodes; int valid_op = 0; for (auto* node : graph->Nodes()) { + PADDLE_ENFORCE_NOT_NULL(node); if (is_valid_node(node)) { invalid_nodes.insert(node); } else if (node->IsOp()) { diff --git a/paddle/fluid/operators/random_crop_op.h b/paddle/fluid/operators/random_crop_op.h index d68ba9d661..ee034b2705 100644 --- a/paddle/fluid/operators/random_crop_op.h +++ b/paddle/fluid/operators/random_crop_op.h @@ -121,7 +121,7 @@ struct RandomCropFunctor { HOSTDEVICE void operator()(size_t ins_idx) { typename Random::Engine engine(seed_); engine.discard(ins_idx * (rank_ - num_batchsize_dims_)); - size_t offsets[9]; + size_t offsets[9] = {}; for (int i = num_batchsize_dims_; i < rank_; ++i) { typename Random::template UniformIntDist dist( 0, x_dims_[i] - out_dims_[i]); From 14fe9219dc9a5769215e471d28b9538b912453bf Mon Sep 17 00:00:00 2001 From: liuwei1031 Date: Wed, 13 Feb 2019 05:03:24 +0000 Subject: [PATCH 5/5] reset unexpected changes, test=develop --- paddle/fluid/memory/detail/system_allocator.cc | 5 ----- paddle/fluid/memory/detail/system_allocator.h | 3 --- 2 files changed, 8 deletions(-) diff --git a/paddle/fluid/memory/detail/system_allocator.cc b/paddle/fluid/memory/detail/system_allocator.cc index 3c82c8aa19..197d1c2f21 100644 --- a/paddle/fluid/memory/detail/system_allocator.cc +++ b/paddle/fluid/memory/detail/system_allocator.cc @@ -117,11 +117,6 @@ void* GPUAllocator::Alloc(size_t* index, size_t size) { if (result == cudaSuccess) { *index = 0; gpu_alloc_size_ += size; - if (gpu_alloc_size_ > s_memoryMap[gpu_id_]) { - s_memoryMap[gpu_id_] = gpu_alloc_size_; - VLOG(3) << "device: " << gpu_id_ - << " maximum memory size : " <<(gpu_alloc_size_ >> 20) << " MiB"; - } return p; } else { LOG(WARNING) diff --git a/paddle/fluid/memory/detail/system_allocator.h b/paddle/fluid/memory/detail/system_allocator.h index 1ac1df6de7..a0386a2dad 100644 --- a/paddle/fluid/memory/detail/system_allocator.h +++ b/paddle/fluid/memory/detail/system_allocator.h @@ -15,7 +15,6 @@ limitations under the License. */ #pragma once #include // for size_t -#include namespace paddle { namespace memory { @@ -45,8 +44,6 @@ class CPUAllocator : public SystemAllocator { #ifdef PADDLE_WITH_CUDA class GPUAllocator : public SystemAllocator { public: - std::unordered_map s_memoryMap; - explicit GPUAllocator(int gpu_id) : gpu_id_(gpu_id) {} virtual void* Alloc(size_t* index, size_t size);