From eb3d58e4bf029ba33ab581566af747d955b6c13c Mon Sep 17 00:00:00 2001 From: Jonathan Yan Date: Mon, 16 Nov 2020 12:05:53 -0500 Subject: [PATCH] cast gpu kernel --- .../gpu/arrays/cast_gpu_kernel.cc | 321 ++++++++++++++++++ .../gpu/arrays/cast_gpu_kernel.h | 82 +++++ .../gpu/cuda_impl/cast_impl.cu | 262 ++++++++++++++ .../gpu/cuda_impl/cast_impl.cuh | 25 ++ 4 files changed, 690 insertions(+) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/cast_gpu_kernel.cc create mode 100644 mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/cast_gpu_kernel.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/cast_impl.cu create mode 100644 mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/cast_impl.cuh diff --git a/mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/cast_gpu_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/cast_gpu_kernel.cc new file mode 100644 index 0000000000..9e5b5c769e --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/cast_gpu_kernel.cc @@ -0,0 +1,321 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "backend/kernel_compiler/gpu/arrays/cast_gpu_kernel.h" + +namespace mindspore { +namespace kernel { +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt8).AddOutputAttr(kNumberTypeInt8), CastGpuKernel, + int8_t, int8_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt8).AddOutputAttr(kNumberTypeInt16), CastGpuKernel, + int8_t, int16_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt8).AddOutputAttr(kNumberTypeInt32), CastGpuKernel, + int8_t, int32_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt8).AddOutputAttr(kNumberTypeInt64), CastGpuKernel, + int8_t, int64_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt8).AddOutputAttr(kNumberTypeUInt8), CastGpuKernel, + int8_t, uint8_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt8).AddOutputAttr(kNumberTypeUInt16), CastGpuKernel, + int8_t, uint16_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt8).AddOutputAttr(kNumberTypeUInt32), CastGpuKernel, + int8_t, uint32_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt8).AddOutputAttr(kNumberTypeUInt64), CastGpuKernel, + int8_t, uint64_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt8).AddOutputAttr(kNumberTypeFloat32), CastGpuKernel, + int8_t, float) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt8).AddOutputAttr(kNumberTypeFloat64), CastGpuKernel, + int8_t, double) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt8).AddOutputAttr(kNumberTypeFloat16), CastGpuKernel, + int8_t, half) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt8).AddOutputAttr(kNumberTypeBool), CastGpuKernel, + int8_t, bool) + +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt16).AddOutputAttr(kNumberTypeInt8), CastGpuKernel, + int16_t, int8_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt16).AddOutputAttr(kNumberTypeInt16), CastGpuKernel, + int16_t, int16_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt16).AddOutputAttr(kNumberTypeInt32), CastGpuKernel, + int16_t, int32_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt16).AddOutputAttr(kNumberTypeInt64), CastGpuKernel, + int16_t, int64_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt16).AddOutputAttr(kNumberTypeUInt8), CastGpuKernel, + int16_t, uint8_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt16).AddOutputAttr(kNumberTypeUInt16), CastGpuKernel, + int16_t, uint16_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt16).AddOutputAttr(kNumberTypeUInt32), CastGpuKernel, + int16_t, uint32_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt16).AddOutputAttr(kNumberTypeUInt64), CastGpuKernel, + int16_t, uint64_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt16).AddOutputAttr(kNumberTypeFloat32), + CastGpuKernel, int16_t, float) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt16).AddOutputAttr(kNumberTypeFloat64), + CastGpuKernel, int16_t, double) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt16).AddOutputAttr(kNumberTypeFloat16), + CastGpuKernel, int16_t, half) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt16).AddOutputAttr(kNumberTypeBool), CastGpuKernel, + int16_t, bool) + +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt8), CastGpuKernel, + int32_t, int8_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt16), CastGpuKernel, + int32_t, int16_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt32), CastGpuKernel, + int32_t, int32_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt64), CastGpuKernel, + int32_t, int64_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeUInt8), CastGpuKernel, + int32_t, uint8_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeUInt16), CastGpuKernel, + int32_t, uint16_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeUInt32), CastGpuKernel, + int32_t, uint32_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeUInt64), CastGpuKernel, + int32_t, uint64_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeFloat32), + CastGpuKernel, int32_t, float) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeFloat64), + CastGpuKernel, int32_t, double) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeFloat16), + CastGpuKernel, int32_t, half) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeBool), CastGpuKernel, + int32_t, bool) + +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeInt8), CastGpuKernel, + int64_t, int8_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeInt16), CastGpuKernel, + int64_t, int16_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeInt32), CastGpuKernel, + int64_t, int32_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeInt64), CastGpuKernel, + int64_t, int64_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeUInt8), CastGpuKernel, + int64_t, uint8_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeUInt16), CastGpuKernel, + int64_t, uint16_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeUInt32), CastGpuKernel, + int64_t, uint32_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeUInt64), CastGpuKernel, + int64_t, uint64_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeFloat32), + CastGpuKernel, int64_t, float) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeFloat64), + CastGpuKernel, int64_t, double) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeFloat16), + CastGpuKernel, int64_t, half) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeBool), CastGpuKernel, + int64_t, bool) + +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt8).AddOutputAttr(kNumberTypeInt8), CastGpuKernel, + uint8_t, int8_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt8).AddOutputAttr(kNumberTypeInt16), CastGpuKernel, + uint8_t, int16_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt8).AddOutputAttr(kNumberTypeInt32), CastGpuKernel, + uint8_t, int32_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt8).AddOutputAttr(kNumberTypeInt64), CastGpuKernel, + uint8_t, int64_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt8).AddOutputAttr(kNumberTypeUInt8), CastGpuKernel, + uint8_t, uint8_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt8).AddOutputAttr(kNumberTypeUInt16), CastGpuKernel, + uint8_t, uint16_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt8).AddOutputAttr(kNumberTypeUInt32), CastGpuKernel, + uint8_t, uint32_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt8).AddOutputAttr(kNumberTypeUInt64), CastGpuKernel, + uint8_t, uint64_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt8).AddOutputAttr(kNumberTypeFloat32), + CastGpuKernel, uint8_t, float) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt8).AddOutputAttr(kNumberTypeFloat64), + CastGpuKernel, uint8_t, double) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt8).AddOutputAttr(kNumberTypeFloat16), + CastGpuKernel, uint8_t, half) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt8).AddOutputAttr(kNumberTypeBool), CastGpuKernel, + uint8_t, bool) + +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt16).AddOutputAttr(kNumberTypeInt8), CastGpuKernel, + uint16_t, int8_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt16).AddOutputAttr(kNumberTypeInt16), CastGpuKernel, + uint16_t, int16_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt16).AddOutputAttr(kNumberTypeInt32), CastGpuKernel, + uint16_t, int32_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt16).AddOutputAttr(kNumberTypeInt64), CastGpuKernel, + uint16_t, int64_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt16).AddOutputAttr(kNumberTypeUInt8), CastGpuKernel, + uint16_t, uint8_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt16).AddOutputAttr(kNumberTypeUInt16), + CastGpuKernel, uint16_t, uint16_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt16).AddOutputAttr(kNumberTypeUInt32), + CastGpuKernel, uint16_t, uint32_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt16).AddOutputAttr(kNumberTypeUInt64), + CastGpuKernel, uint16_t, uint64_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt16).AddOutputAttr(kNumberTypeFloat32), + CastGpuKernel, uint16_t, float) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt16).AddOutputAttr(kNumberTypeFloat64), + CastGpuKernel, uint16_t, double) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt16).AddOutputAttr(kNumberTypeFloat16), + CastGpuKernel, uint16_t, half) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt16).AddOutputAttr(kNumberTypeBool), CastGpuKernel, + uint16_t, bool) + +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt32).AddOutputAttr(kNumberTypeInt8), CastGpuKernel, + uint32_t, int8_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt32).AddOutputAttr(kNumberTypeInt16), CastGpuKernel, + uint32_t, int16_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt32).AddOutputAttr(kNumberTypeInt32), CastGpuKernel, + uint32_t, int32_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt32).AddOutputAttr(kNumberTypeInt64), CastGpuKernel, + uint32_t, int64_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt32).AddOutputAttr(kNumberTypeUInt8), CastGpuKernel, + uint32_t, uint8_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt32).AddOutputAttr(kNumberTypeUInt16), + CastGpuKernel, uint32_t, uint16_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt32).AddOutputAttr(kNumberTypeUInt32), + CastGpuKernel, uint32_t, uint32_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt32).AddOutputAttr(kNumberTypeUInt64), + CastGpuKernel, uint32_t, uint64_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt32).AddOutputAttr(kNumberTypeFloat32), + CastGpuKernel, uint32_t, float) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt32).AddOutputAttr(kNumberTypeFloat64), + CastGpuKernel, uint32_t, double) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt32).AddOutputAttr(kNumberTypeFloat16), + CastGpuKernel, uint32_t, half) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt32).AddOutputAttr(kNumberTypeBool), CastGpuKernel, + uint32_t, bool) + +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt64).AddOutputAttr(kNumberTypeInt8), CastGpuKernel, + uint64_t, int8_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt64).AddOutputAttr(kNumberTypeInt16), CastGpuKernel, + uint64_t, int16_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt64).AddOutputAttr(kNumberTypeInt32), CastGpuKernel, + uint64_t, int32_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt64).AddOutputAttr(kNumberTypeInt64), CastGpuKernel, + uint64_t, int64_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt64).AddOutputAttr(kNumberTypeUInt8), CastGpuKernel, + uint64_t, uint8_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt64).AddOutputAttr(kNumberTypeUInt16), + CastGpuKernel, uint64_t, uint16_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt64).AddOutputAttr(kNumberTypeUInt32), + CastGpuKernel, uint64_t, uint32_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt64).AddOutputAttr(kNumberTypeUInt64), + CastGpuKernel, uint64_t, uint64_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt64).AddOutputAttr(kNumberTypeFloat32), + CastGpuKernel, uint64_t, float) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt64).AddOutputAttr(kNumberTypeFloat64), + CastGpuKernel, uint64_t, double) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt64).AddOutputAttr(kNumberTypeFloat16), + CastGpuKernel, uint64_t, half) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeUInt64).AddOutputAttr(kNumberTypeBool), CastGpuKernel, + uint64_t, bool) + +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeInt8), CastGpuKernel, + half, int8_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeInt16), + CastGpuKernel, half, int16_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeInt32), + CastGpuKernel, half, int32_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeInt64), + CastGpuKernel, half, int64_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeUInt8), + CastGpuKernel, half, uint8_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeUInt16), + CastGpuKernel, half, uint16_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeUInt32), + CastGpuKernel, half, uint32_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeUInt64), + CastGpuKernel, half, uint64_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat32), + CastGpuKernel, half, float) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat64), + CastGpuKernel, half, double) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat16), + CastGpuKernel, half, half) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeBool), CastGpuKernel, + half, bool) + +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeInt8), CastGpuKernel, + float, int8_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeInt16), + CastGpuKernel, float, int16_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeInt32), + CastGpuKernel, float, int32_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeInt64), + CastGpuKernel, float, int64_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeUInt8), + CastGpuKernel, float, uint8_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeUInt16), + CastGpuKernel, float, uint16_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeUInt32), + CastGpuKernel, float, uint32_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeUInt64), + CastGpuKernel, float, uint64_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), + CastGpuKernel, float, float) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat64), + CastGpuKernel, float, double) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat16), + CastGpuKernel, float, half) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeBool), CastGpuKernel, + float, bool) + +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeInt8), CastGpuKernel, + double, int8_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeInt16), + CastGpuKernel, double, int16_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeInt32), + CastGpuKernel, double, int32_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeInt64), + CastGpuKernel, double, int64_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeUInt8), + CastGpuKernel, double, uint8_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeUInt16), + CastGpuKernel, double, uint16_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeUInt32), + CastGpuKernel, double, uint32_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeUInt64), + CastGpuKernel, double, uint64_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeFloat32), + CastGpuKernel, double, float) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeFloat64), + CastGpuKernel, double, double) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeFloat16), + CastGpuKernel, double, half) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeBool), CastGpuKernel, + double, bool) + +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeBool).AddOutputAttr(kNumberTypeInt8), CastGpuKernel, + bool, int8_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeBool).AddOutputAttr(kNumberTypeInt16), CastGpuKernel, + bool, int16_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeBool).AddOutputAttr(kNumberTypeInt32), CastGpuKernel, + bool, int32_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeBool).AddOutputAttr(kNumberTypeInt64), CastGpuKernel, + bool, int64_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeBool).AddOutputAttr(kNumberTypeUInt8), CastGpuKernel, + bool, uint8_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeBool).AddOutputAttr(kNumberTypeUInt16), CastGpuKernel, + bool, uint16_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeBool).AddOutputAttr(kNumberTypeUInt32), CastGpuKernel, + bool, uint32_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeBool).AddOutputAttr(kNumberTypeUInt64), CastGpuKernel, + bool, uint64_t) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeBool).AddOutputAttr(kNumberTypeFloat32), CastGpuKernel, + bool, float) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeBool).AddOutputAttr(kNumberTypeFloat64), CastGpuKernel, + bool, double) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeBool).AddOutputAttr(kNumberTypeFloat16), CastGpuKernel, + bool, half) +MS_REG_GPU_KERNEL_TWO(Cast, KernelAttr().AddInputAttr(kNumberTypeBool).AddOutputAttr(kNumberTypeBool), CastGpuKernel, + bool, bool) +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/cast_gpu_kernel.h b/mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/cast_gpu_kernel.h new file mode 100644 index 0000000000..fa1efafa67 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/cast_gpu_kernel.h @@ -0,0 +1,82 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_CAST_GPU_KERNEL_H_ +#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_CAST_GPU_KERNEL_H_ + +#include +#include "backend/kernel_compiler/gpu/gpu_kernel.h" +#include "backend/kernel_compiler/gpu/gpu_kernel_factory.h" +#include "backend/kernel_compiler/gpu/cuda_impl/cast_impl.cuh" + +namespace mindspore { +namespace kernel { +template +class CastGpuKernel : public GpuKernel { + public: + CastGpuKernel() : input_size_(1), output_size_(1) {} + ~CastGpuKernel() = default; + + const std::vector &GetInputSizeList() const override { return input_size_list_; } + const std::vector &GetOutputSizeList() const override { return output_size_list_; } + const std::vector &GetWorkspaceSizeList() const override { return workspace_size_list_; } + + bool Launch(const std::vector &inputs, const std::vector &, + const std::vector &outputs, void *stream_ptr) override { + S *input_addr = GetDeviceAddress(inputs, 0); + T *output_addr = GetDeviceAddress(outputs, 0); + + Cast(input_size_, input_addr, output_addr, reinterpret_cast(stream_ptr)); + return true; + } + bool Init(const CNodePtr &kernel_node) override { + auto input_shapes = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); + auto output_shapes = AnfAlgo::GetOutputInferShape(kernel_node, 0); + input_size_ = 1; + for (size_t i = 0; i < input_shapes.size(); i++) { + input_size_ *= input_shapes[i]; + } + + output_size_ = 1; + for (size_t j = 0; j < output_shapes.size(); j++) { + output_size_ *= output_shapes[j]; + } + + if (input_size_ != output_size_) { + MS_LOG(EXCEPTION) << "Input size is not equal to output size."; + } + InitSizeLists(); + return true; + } + + protected: + void InitSizeLists() override { + input_size_list_.push_back(input_size_ * sizeof(T)); + output_size_list_.push_back(output_size_ * sizeof(T)); + } + + private: + int input_size_; + int output_size_; + + std::vector input_size_list_; + std::vector output_size_list_; + std::vector workspace_size_list_; +}; +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_CAST_GPU_KERNEL_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/cast_impl.cu b/mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/cast_impl.cu new file mode 100644 index 0000000000..2deecaf6e3 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/cast_impl.cu @@ -0,0 +1,262 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "backend/kernel_compiler/gpu/cuda_impl/cast_impl.cuh" +#include "runtime/device/gpu/cuda_common.h" + +// Generic cast +template +__device__ __forceinline__ void CastBase(const S *input_addr, T *output_addr) { + *output_addr = static_cast((*input_addr)); +} + +// half --> integer +__device__ __forceinline__ void CastBase(const half *input_addr, uint64_t *output_addr) { + *output_addr = __half2ull_rn((*input_addr)); +} + +__device__ __forceinline__ void CastBase(const half *input_addr, int64_t *output_addr) { + *output_addr = __half2ll_rn((*input_addr)); +} + +__device__ __forceinline__ void CastBase(const half *input_addr, uint32_t *output_addr) { + *output_addr = __half2uint_rn((*input_addr)); +} + +__device__ __forceinline__ void CastBase(const half *input_addr, int32_t *output_addr) { + *output_addr = __half2int_rn((*input_addr)); +} + +__device__ __forceinline__ void CastBase(const half *input_addr, uint16_t *output_addr) { + *output_addr = __half2ushort_rn((*input_addr)); +} + +__device__ __forceinline__ void CastBase(const half *input_addr, int16_t *output_addr) { + *output_addr = __half2short_rn((*input_addr)); +} + +__device__ __forceinline__ void CastBase(const half *input_addr, uint8_t *output_addr) { + *output_addr = static_cast(__half2ushort_rn((*input_addr))); +} + +__device__ __forceinline__ void CastBase(const half *input_addr, int8_t *output_addr) { + *output_addr = static_cast(__half2short_rn((*input_addr))); +} + +// integer --> half +__device__ __forceinline__ void CastBase(const uint64_t *input_addr, half *output_addr) { + *output_addr = __ull2half_rn((*input_addr)); +} + +__device__ __forceinline__ void CastBase(const int64_t *input_addr, half *output_addr) { + *output_addr = __ll2half_rn((*input_addr)); +} + +__device__ __forceinline__ void CastBase(const uint32_t *input_addr, half *output_addr) { + *output_addr = __uint2half_rn((*input_addr)); +} + +__device__ __forceinline__ void CastBase(const int32_t *input_addr, half *output_addr) { + *output_addr = __int2half_rn((*input_addr)); +} + +__device__ __forceinline__ void CastBase(const uint16_t *input_addr, half *output_addr) { + *output_addr = __ushort2half_rn((*input_addr)); +} + +__device__ __forceinline__ void CastBase(const int16_t *input_addr, half *output_addr) { + *output_addr = __short2half_rn((*input_addr)); +} + +__device__ __forceinline__ void CastBase(const uint8_t *input_addr, half *output_addr) { + *output_addr = __ushort2half_rn(static_cast(*input_addr)); +} + +__device__ __forceinline__ void CastBase(const int8_t *input_addr, half *output_addr) { + *output_addr = __short2half_rn(static_cast(*input_addr)); +} + +// Cast +template +__global__ void CastKernel(const int input_size, const S *input_addr, T *output_addr) { + for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < input_size; pos += blockDim.x * gridDim.x) { + CastBase(input_addr + pos, output_addr + pos); + } +} + +template +void Cast(const int input_size, const S *input_addr, T *output_addr, cudaStream_t stream) { + CastKernel<<>>(input_size, input_addr, output_addr); +} + +template void Cast(const int input_size, const int8_t *input_addr, int8_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int8_t *input_addr, int16_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int8_t *input_addr, int32_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int8_t *input_addr, int64_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int8_t *input_addr, uint8_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int8_t *input_addr, uint16_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int8_t *input_addr, uint32_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int8_t *input_addr, uint64_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int8_t *input_addr, float *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int8_t *input_addr, double *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int8_t *input_addr, half *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int8_t *input_addr, bool *output_addr, cudaStream_t stream); + +template void Cast(const int input_size, const int16_t *input_addr, int8_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int16_t *input_addr, int16_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int16_t *input_addr, int32_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int16_t *input_addr, int64_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int16_t *input_addr, uint8_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int16_t *input_addr, uint16_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int16_t *input_addr, uint32_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int16_t *input_addr, uint64_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int16_t *input_addr, float *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int16_t *input_addr, double *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int16_t *input_addr, half *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int16_t *input_addr, bool *output_addr, cudaStream_t stream); + +template void Cast(const int input_size, const int32_t *input_addr, int8_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int32_t *input_addr, int16_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int32_t *input_addr, int32_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int32_t *input_addr, int64_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int32_t *input_addr, uint8_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int32_t *input_addr, uint16_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int32_t *input_addr, uint32_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int32_t *input_addr, uint64_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int32_t *input_addr, float *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int32_t *input_addr, double *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int32_t *input_addr, half *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int32_t *input_addr, bool *output_addr, cudaStream_t stream); + +template void Cast(const int input_size, const int64_t *input_addr, int8_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int64_t *input_addr, int16_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int64_t *input_addr, int32_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int64_t *input_addr, int64_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int64_t *input_addr, uint8_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int64_t *input_addr, uint16_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int64_t *input_addr, uint32_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int64_t *input_addr, uint64_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int64_t *input_addr, float *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int64_t *input_addr, double *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int64_t *input_addr, half *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const int64_t *input_addr, bool *output_addr, cudaStream_t stream); + +template void Cast(const int input_size, const uint8_t *input_addr, int8_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint8_t *input_addr, int16_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint8_t *input_addr, int32_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint8_t *input_addr, int64_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint8_t *input_addr, uint8_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint8_t *input_addr, uint16_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint8_t *input_addr, uint32_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint8_t *input_addr, uint64_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint8_t *input_addr, float *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint8_t *input_addr, double *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint8_t *input_addr, half *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint8_t *input_addr, bool *output_addr, cudaStream_t stream); + +template void Cast(const int input_size, const uint16_t *input_addr, int8_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint16_t *input_addr, int16_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint16_t *input_addr, int32_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint16_t *input_addr, int64_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint16_t *input_addr, uint8_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint16_t *input_addr, uint16_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint16_t *input_addr, uint32_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint16_t *input_addr, uint64_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint16_t *input_addr, float *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint16_t *input_addr, double *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint16_t *input_addr, half *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint16_t *input_addr, bool *output_addr, cudaStream_t stream); + +template void Cast(const int input_size, const uint32_t *input_addr, int8_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint32_t *input_addr, int16_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint32_t *input_addr, int32_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint32_t *input_addr, int64_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint32_t *input_addr, uint8_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint32_t *input_addr, uint16_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint32_t *input_addr, uint32_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint32_t *input_addr, uint64_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint32_t *input_addr, float *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint32_t *input_addr, double *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint32_t *input_addr, half *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint32_t *input_addr, bool *output_addr, cudaStream_t stream); + +template void Cast(const int input_size, const uint64_t *input_addr, int8_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint64_t *input_addr, int16_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint64_t *input_addr, int32_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint64_t *input_addr, int64_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint64_t *input_addr, uint8_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint64_t *input_addr, uint16_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint64_t *input_addr, uint32_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint64_t *input_addr, uint64_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint64_t *input_addr, float *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint64_t *input_addr, double *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint64_t *input_addr, half *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const uint64_t *input_addr, bool *output_addr, cudaStream_t stream); + +template void Cast(const int input_size, const half *input_addr, int8_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const half *input_addr, int16_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const half *input_addr, int32_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const half *input_addr, int64_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const half *input_addr, uint8_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const half *input_addr, uint16_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const half *input_addr, uint32_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const half *input_addr, uint64_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const half *input_addr, float *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const half *input_addr, double *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const half *input_addr, half *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const half *input_addr, bool *output_addr, cudaStream_t stream); + +template void Cast(const int input_size, const float *input_addr, int8_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const float *input_addr, int16_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const float *input_addr, int32_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const float *input_addr, int64_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const float *input_addr, uint8_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const float *input_addr, uint16_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const float *input_addr, uint32_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const float *input_addr, uint64_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const float *input_addr, float *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const float *input_addr, double *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const float *input_addr, half *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const float *input_addr, bool *output_addr, cudaStream_t stream); + +template void Cast(const int input_size, const double *input_addr, int8_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const double *input_addr, int16_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const double *input_addr, int32_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const double *input_addr, int64_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const double *input_addr, uint8_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const double *input_addr, uint16_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const double *input_addr, uint32_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const double *input_addr, uint64_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const double *input_addr, float *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const double *input_addr, double *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const double *input_addr, half *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const double *input_addr, bool *output_addr, cudaStream_t stream); + +template void Cast(const int input_size, const bool *input_addr, int8_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const bool *input_addr, int16_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const bool *input_addr, int32_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const bool *input_addr, int64_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const bool *input_addr, uint8_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const bool *input_addr, uint16_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const bool *input_addr, uint32_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const bool *input_addr, uint64_t *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const bool *input_addr, float *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const bool *input_addr, double *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const bool *input_addr, half *output_addr, cudaStream_t stream); +template void Cast(const int input_size, const bool *input_addr, bool *output_addr, cudaStream_t stream); diff --git a/mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/cast_impl.cuh b/mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/cast_impl.cuh new file mode 100644 index 0000000000..aa8c628c81 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/cast_impl.cuh @@ -0,0 +1,25 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_CAST_H_ +#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_CAST_H_ + +#include +#include "runtime/device/gpu/cuda_common.h" + +template +void Cast(const int input_size, const S *input_addr, T *output_addr, cudaStream_t stream); +#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_CAST_H_