From 0f0e8fe8744c0858050c596afca36588b196ad55 Mon Sep 17 00:00:00 2001 From: limingqi107 Date: Thu, 23 Apr 2020 20:40:35 +0800 Subject: [PATCH] gpu dynamic memory pool can not reuse allReduce in multi-stream --- mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc b/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc index 5dd4facb25..b3b364b00c 100644 --- a/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc +++ b/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc @@ -322,6 +322,9 @@ void GPUKernelRuntime::FreeKernelDynamicRes(const mindspore::AnfNodePtr &kernel, MS_EXCEPTION_IF_NULL(mem_reuse_util_ptr); auto cnode = kernel->cast(); MS_EXCEPTION_IF_NULL(cnode); + if (AnfAlgo::GetCNodeName(kernel) == kAllReduceOpName) { + return; + } // Free the input of kernel by reference count. for (size_t i = 0; i < AnfAlgo::GetInputTensorNum(kernel); ++i) { auto kernel_ref_count_ptr = mem_reuse_util_ptr->GetKernelInputRef(cnode, i);