for pylint 2nd

pull/1442/head
z00478463 5 years ago
parent 648501daa0
commit 2d0ee05454

@ -14,14 +14,3 @@
# ============================================================================
"""custom ops"""
from .batch_matmul_impl import CusBatchMatMul
from .cholesky_trsm_impl import CusCholeskyTrsm
from .fused_abs_max1_impl import CusFusedAbsMax1
from .img2col_impl import CusImg2Col
from .matmul_cube_dense_left_impl import CusMatMulCubeDenseLeft
from .matmul_cube_dense_right_impl import CusMatMulCubeDenseRight
from .matmul_cube_fracz_left_cast_impl import CusMatMulCubeFraczLeftCast
from .matmul_cube_fracz_right_mul_impl import CusMatMulCubeFraczRightMul
from .matmul_cube_impl import CusMatMulCube
from .matrix_combine_impl import CusMatrixCombine
from .transpose02314_impl import CusTranspose02314

File diff suppressed because it is too large Load Diff

@ -13,24 +13,25 @@
# limitations under the License.
# ============================================================================
"""CusCholeskyTrsm"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
from te import tik
from topi.cce import util
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
cus_cholesky_trsm_op_info = TBERegOp("CusCholeskyTrsm") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("choleskytrsm.so") \
.compute_cost(10) \
.kernel_name("CusCholeskyTrsm") \
.partial_flag(True) \
.input(0, "x1", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
.get_op_info()
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("choleskytrsm.so") \
.compute_cost(10) \
.kernel_name("CusCholeskyTrsm") \
.partial_flag(True) \
.input(0, "x1", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
.get_op_info()
@op_info_register(cus_cholesky_trsm_op_info)
def CusCholeskyTrsm(input_x,output, kernel_name):
def CusCholeskyTrsm(input_x, output, kernel_name):
input_x_shape = input_x.get("shape")
output_shape = output.get("shape")
split_dim = 128
@ -47,34 +48,36 @@ def CusCholeskyTrsm(input_x,output, kernel_name):
input_x = tik_instance.Tensor("float32", input_x_shape, name="input_x", scope=tik.scope_gm)
res = tik_instance.Tensor("float32", output_shape, name="res", scope=tik.scope_gm)
with tik_instance.for_range(0,blocks,block_num=blocks) as block_index:
input_x_ub = tik_instance.Tensor("float32", (split_dim,split_dim), name="input_x_ub", scope=tik.scope_ubuf)
temp_ub = tik_instance.Tensor("float32", (split_dim,split_dim), name="temp_ub", scope=tik.scope_ubuf)
with tik_instance.for_range(0, blocks, block_num=blocks) as block_index:
input_x_ub = tik_instance.Tensor("float32", (split_dim, split_dim), name="input_x_ub", scope=tik.scope_ubuf)
temp_ub = tik_instance.Tensor("float32", (split_dim, split_dim), name="temp_ub", scope=tik.scope_ubuf)
assist_1_ub = tik_instance.Tensor("float32", (split_dim,), name="assist_1_ub", scope=tik.scope_ubuf)
assist_2_ub = tik_instance.Tensor("float32", (split_dim,), name="assist_2_ub", scope=tik.scope_ubuf)
with tik_instance.for_range(0,split_dim) as i:
tik_instance.data_move(input_x_ub[i,0], input_x[block_index * split_dim + i, block_index * split_dim], 0, 1, vector_repeat_times * 8, 0, 0)
scalar1 = tik_instance.Scalar("float32", init_value = -0.5)
with tik_instance.for_range(0, split_dim) as i:
tik_instance.data_move(input_x_ub[i, 0], input_x[block_index * split_dim + i, block_index * split_dim], 0,
1, vector_repeat_times * 8, 0, 0)
scalar1 = tik_instance.Scalar("float32", init_value=-0.5)
with tik_instance.for_range(0, split_dim) as i:
scalar2= tik_instance.Scalar("float32")
tik_instance.vln(64, assist_1_ub[0], input_x_ub[i,0], vector_repeat_times, 1, 1, 8, 8)
scalar2 = tik_instance.Scalar("float32")
tik_instance.vln(64, assist_1_ub[0], input_x_ub[i, 0], vector_repeat_times, 1, 1, 8, 8)
tik_instance.vmuls(64, assist_2_ub[0], assist_1_ub[0], scalar1, vector_repeat_times, 1, 1, 8, 8)
tik_instance.vexp(64, assist_1_ub[0], assist_2_ub[0], vector_repeat_times, 1, 1, 8, 8)
scalar2.set_as(assist_1_ub[i])
tik_instance.vmuls(64, input_x_ub[i,0], input_x_ub[i,0], scalar2, vector_repeat_times, 1, 1, 8, 8)
tik_instance.vmuls(64, input_x_ub[i, 0], input_x_ub[i, 0], scalar2, vector_repeat_times, 1, 1, 8, 8)
with tik_instance.for_range(i + 1, split_dim) as j:
scalar3= tik_instance.Scalar("float32")
scalar3 = tik_instance.Scalar("float32")
scalar3.set_as(input_x_ub[i, j])
tik_instance.vmuls(64,temp_ub[j, 0], input_x_ub[i, 0], scalar3, vector_repeat_times, 1, 1, 8, 8)
tik_instance.vsub(64,input_x_ub[i+1,0], input_x_ub[i+1,0], temp_ub[i+1,0], (split_dim-1-i) * vector_repeat_times, 1, 1, 1, 8, 8, 8)
tik_instance.vmuls(64, temp_ub[j, 0], input_x_ub[i, 0], scalar3, vector_repeat_times, 1, 1, 8, 8)
tik_instance.vsub(64, input_x_ub[i + 1, 0], input_x_ub[i + 1, 0], temp_ub[i + 1, 0],
(split_dim - 1 - i) * vector_repeat_times, 1, 1, 1, 8, 8, 8)
zero = tik_instance.Scalar("float32")
zero.set_as(0.0)
one = tik_instance.Scalar("float32")
one.set_as(1.0)
with tik_instance.for_range(0, split_dim) as i:
tik_instance.vector_dup(64, temp_ub[i,0], zero, vector_repeat_times, 1, 8)
tik_instance.vector_dup(64, temp_ub[i, 0], zero, vector_repeat_times, 1, 8)
temp_ub.__setitem__(i * split_dim + i, one)
chol_diag_element_final = tik_instance.Scalar("float32")
@ -89,16 +92,19 @@ def CusCholeskyTrsm(input_x,output, kernel_name):
with tik_instance.for_range(0, i) as j:
chol_diag_element_loop = tik_instance.Scalar("float32")
chol_diag_element_loop.set_as(input_x_ub[index, index + 1 + j])
tik_instance.vmuls(64, assist_2_ub, temp_ub[j + index + 1, 0], chol_diag_element_loop, vector_repeat_times,1,1,8,8)
tik_instance.vadd(64, assist_1_ub, assist_2_ub, assist_1_ub, vector_repeat_times,1,1,1,8,8,8)
tik_instance.vmuls(64, assist_2_ub, temp_ub[j + index + 1, 0], chol_diag_element_loop,
vector_repeat_times, 1, 1, 8, 8)
tik_instance.vadd(64, assist_1_ub, assist_2_ub, assist_1_ub, vector_repeat_times, 1, 1, 1, 8, 8, 8)
temp_scalar = tik_instance.Scalar("float32")
temp_scalar.set_as(input_x_ub[index, index])
chol_diag_element = tik_instance.Scalar("float32")
chol_diag_element.set_as(1.0 / temp_scalar)
tik_instance.vsub(64,temp_ub[index, 0], temp_ub[index, 0], assist_1_ub, vector_repeat_times,1,1,1,8,8,8)
tik_instance.vmuls(64, temp_ub[index, 0], temp_ub[index, 0], chol_diag_element,vector_repeat_times,1,1,8,8)
tik_instance.vsub(64, temp_ub[index, 0], temp_ub[index, 0], assist_1_ub, vector_repeat_times, 1, 1, 1, 8, 8,
8)
tik_instance.vmuls(64, temp_ub[index, 0], temp_ub[index, 0], chol_diag_element, vector_repeat_times, 1, 1,
8, 8)
tik_instance.data_move(res[block_index,0,0], temp_ub, 0, 1, 8 * vector_repeat_times * split_dim,0,0)
tik_instance.data_move(res[block_index, 0, 0], temp_ub, 0, 1, 8 * vector_repeat_times * split_dim, 0, 0)
tik_instance.BuildCCE(kernel_name=kernel_name, inputs=[input_x], outputs=[res])
return tik_instance

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -18,15 +18,10 @@ limitations under the License.
matmul
"""
from __future__ import absolute_import
import te.lang.cce
import te.platform.cce_params as cce
from te.platform.fusion_manager import fusion_manager
from te import tvm
from topi import generic
from topi.cce import util
from impl.matmul_vector import matmul_vector_cce
from te import tik
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
from te import tik
from topi.cce import util
matmul_cube_dense_right_op_info = TBERegOp("CusMatMulCubeDenseRight") \
.fusion_type("OPAQUE") \
@ -40,23 +35,26 @@ matmul_cube_dense_right_op_info = TBERegOp("CusMatMulCubeDenseRight") \
.input(2, "x3", False, "required", "all") \
.input(3, "x4", False, "optional", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F16_FracNZ, DataType.F16_Default, DataType.F32_Default, DataType.F16_Default, DataType.F32_FracNZ) \
.dtype_format(DataType.F16_FracNZ, DataType.F16_Default, DataType.F32_Default, DataType.F16_Default,
DataType.F32_FracNZ) \
.get_op_info()
@op_info_register(matmul_cube_dense_right_op_info)
def CusMatMulCubeDenseRight(input_x1, input_x2, input_x3, bias=None, output_y={}, trans_a=False, trans_b=False, kernel_name="matmulcube"):
def CusMatMulCubeDenseRight(input_x1, input_x2, input_x3, bias=None, output_y={}, trans_a=False, trans_b=False,
kernel_name="matmulcube"):
shape_a_temp = (128, 63, 16, 16)
shape_b_temp = (128, 128, 16, 16)
shape_output = output_y.get("shape")
matrix_max_shape = (1,)
support_shape = [(shape_a_temp, shape_b_temp, matrix_max_shape),]
support_shape = [(shape_a_temp, shape_b_temp, matrix_max_shape), ]
shape_a_input = input_x1.get("shape")
shape_b_input = input_x2.get("shape")
matrix_max_input = input_x3.get("shape")
input_shape = (tuple(shape_a_input), tuple(shape_b_input), tuple(matrix_max_input))
if input_shape not in support_shape:
raise RuntimeError("input_shape %s is not supported" % str(input_shape))
if shape_a_temp[0] == 128 and shape_a_temp[1] == 63 and shape_b_temp[0] == 128 and shape_b_temp[1] == 128:
if util.get_product_version() == util.VERSION_MINI:
tik_instance = tik.Tik(tik.Dprofile("v100", "mini"))
@ -64,79 +62,110 @@ def CusMatMulCubeDenseRight(input_x1, input_x2, input_x3, bias=None, output_y={}
tik_instance = tik.Tik(tik.Dprofile("v100", "cloud"))
input_x1 = tik_instance.Tensor("float16", shape_a_temp, name="left_matrix", scope=tik.scope_gm)
input_x2 = tik_instance.Tensor("float16", shape_b_temp, name="right_matrix", scope=tik.scope_gm)
input_x3 = tik_instance.Tensor("float32", [1,], name="matrix_max", scope=tik.scope_gm)
input_x3 = tik_instance.Tensor("float32", [1, ], name="matrix_max", scope=tik.scope_gm)
resMatmul = tik_instance.Tensor("float32", shape_output, name="output", scope=tik.scope_gm)
with tik_instance.for_range(0, 32, block_num=32) as block_index:
core_m_idx = block_index // 16
core_n_idx = block_index % 16
matrix_max_scalar = tik_instance.Scalar("float32")
matrix_max_local_UB = tik_instance.Tensor("float32", (8,), scope = tik.scope_ubuf, name = "matrix_max_local_UB")
matrix_max_local_UB = tik_instance.Tensor("float32", (8,), scope=tik.scope_ubuf, name="matrix_max_local_UB")
tik_instance.data_move(matrix_max_local_UB, input_x3, 0, 1, 1, 0, 0)
matrix_max_scalar.set_as(matrix_max_local_UB[0])
resMatmul_local_UB = tik_instance.Tensor("float32", (256 * 128,), scope=tik.scope_ubuf, name = "resMatmul_local_UB")
resMatmul_local_UB1 = tik_instance.Tensor("float32", (240 * 128,), scope=tik.scope_ubuf, name = "resMatmul_local_UB1")
resMatmul_local_UB_local_L0C = tik_instance.Tensor("float32", (256 * 128,), scope=tik.scope_cc, name = "resMatmul_local_UB_local_L0C")
resMatmul_local_UB_local_L0C1 = tik_instance.Tensor("float32", (240 * 128,), scope=tik.scope_cc, name = "resMatmul_local_UB_local_L0C1")
resMatmul_local_UB = tik_instance.Tensor("float32", (256 * 128,), scope=tik.scope_ubuf,
name="resMatmul_local_UB")
resMatmul_local_UB1 = tik_instance.Tensor("float32", (240 * 128,), scope=tik.scope_ubuf,
name="resMatmul_local_UB1")
resMatmul_local_UB_local_L0C = tik_instance.Tensor("float32", (256 * 128,), scope=tik.scope_cc,
name="resMatmul_local_UB_local_L0C")
resMatmul_local_UB_local_L0C1 = tik_instance.Tensor("float32", (240 * 128,), scope=tik.scope_cc,
name="resMatmul_local_UB_local_L0C1")
input_1_local_L1_local_L0A = tik_instance.Tensor("float16", (256 * 128,), scope=tik.scope_ca,
name="input_1_local_L1_local_L0A")
input_2_local_L1 = tik_instance.Tensor("float16", (8 * 128 * 16,), scope=tik.scope_cbuf,
name="input_2_local_L1")
input_2_local_L11 = tik_instance.Tensor("float16", (8 * 128 * 16,), scope=tik.scope_cbuf,
name="input_2_local_L11")
input_1_local_L1 = tik_instance.Tensor("float16", (8 * 256 * 16,), scope=tik.scope_cbuf,
name="input_1_local_L1")
input_1_local_L11 = tik_instance.Tensor("float16", (8 * 240 * 16,), scope=tik.scope_cbuf,
name="input_1_local_L11")
input_2_local_L1_local_L0B = tik_instance.Tensor("float16", (128 * 128,), scope=tik.scope_cb,
name="input_2_local_L1_local_L0B")
input_2_local_L1_local_L0B1 = tik_instance.Tensor("float16", (128 * 128,), scope=tik.scope_cb,
name="input_2_local_L1_local_L0B1")
input_1_local_L1_local_L0A = tik_instance.Tensor("float16", (256 * 128,), scope=tik.scope_ca, name = "input_1_local_L1_local_L0A")
input_2_local_L1 = tik_instance.Tensor("float16", (8 * 128 * 16,), scope=tik.scope_cbuf, name = "input_2_local_L1")
input_2_local_L11 = tik_instance.Tensor("float16", (8 * 128 * 16,), scope=tik.scope_cbuf, name = "input_2_local_L11")
input_1_local_L1 = tik_instance.Tensor("float16", (8 * 256 * 16,), scope=tik.scope_cbuf, name = "input_1_local_L1")
input_1_local_L11 = tik_instance.Tensor("float16", (8 * 240 * 16,), scope=tik.scope_cbuf, name = "input_1_local_L11")
input_2_local_L1_local_L0B = tik_instance.Tensor("float16", (128 * 128,), scope=tik.scope_cb, name = "input_2_local_L1_local_L0B")
input_2_local_L1_local_L0B1 = tik_instance.Tensor("float16", (128 * 128,), scope=tik.scope_cb, name = "input_2_local_L1_local_L0B1")
with tik_instance.if_scope(core_m_idx == 0):
with tik_instance.for_range(0, 2) as cc1:
tik_instance.data_move(input_2_local_L1, input_x2[core_n_idx * 262144 + core_n_idx * 2048], 0, 8, 128, 1920, 0)
tik_instance.data_move(input_1_local_L1, input_x1[core_n_idx * 129024 + cc1 * 4096], 0, 8, 256, 752, 0)
tik_instance.data_move(input_2_local_L1, input_x2[core_n_idx * 262144 + core_n_idx * 2048], 0, 8,
128, 1920, 0)
tik_instance.data_move(input_1_local_L1, input_x1[core_n_idx * 129024 + cc1 * 4096], 0, 8, 256, 752,
0)
with tik_instance.for_range(0, 8) as cc10:
tik_instance.load2dv1(input_2_local_L1_local_L0B[cc10 * 2048], input_2_local_L1[cc10 * 256], 0, 8, 8, 0, True)
tik_instance.load2dv1(input_2_local_L1_local_L0B[cc10 * 2048], input_2_local_L1[cc10 * 256], 0,
8, 8, 0, True)
with tik_instance.for_range(0, 16) as cc101:
tik_instance.load2dv1(input_1_local_L1_local_L0A[cc101 * 2048], input_1_local_L1[cc101 * 256], 0, 8, 16, 0, False)
tik_instance.mmad(resMatmul_local_UB_local_L0C, input_1_local_L1_local_L0A, input_2_local_L1_local_L0B, 256, 128, 128, 0)
tik_instance.load2dv1(input_1_local_L1_local_L0A[cc101 * 2048], input_1_local_L1[cc101 * 256],
0, 8, 16, 0, False)
tik_instance.mmad(resMatmul_local_UB_local_L0C, input_1_local_L1_local_L0A,
input_2_local_L1_local_L0B, 256, 128, 128, 0)
tik_instance.data_move(resMatmul_local_UB, resMatmul_local_UB_local_L0C, 0, 1, 128, 0, 0)
tik_instance.vmuls(64, resMatmul_local_UB, resMatmul_local_UB, matrix_max_scalar, 255,1,1,8,8)
tik_instance.vmuls(64, resMatmul_local_UB[255*64], resMatmul_local_UB[255*64], matrix_max_scalar, 255,1,1,8,8)
tik_instance.vmuls(64, resMatmul_local_UB[510*64], resMatmul_local_UB[510*64], matrix_max_scalar, 2,1,1,8,8)
tik_instance.data_move(resMatmul[core_n_idx * 129024 + cc1 * 4096], resMatmul_local_UB, 0, 8, 512, 0, 1504)
tik_instance.vmuls(64, resMatmul_local_UB, resMatmul_local_UB, matrix_max_scalar, 255, 1, 1, 8, 8)
tik_instance.vmuls(64, resMatmul_local_UB[255 * 64], resMatmul_local_UB[255 * 64],
matrix_max_scalar, 255, 1, 1, 8, 8)
tik_instance.vmuls(64, resMatmul_local_UB[510 * 64], resMatmul_local_UB[510 * 64],
matrix_max_scalar, 2, 1, 1, 8, 8)
tik_instance.data_move(resMatmul[core_n_idx * 129024 + cc1 * 4096], resMatmul_local_UB, 0, 8, 512,
0, 1504)
with tik_instance.else_scope():
tik_instance.data_move(input_2_local_L1, input_x2[core_n_idx * 262144 + core_n_idx * 2048], 0, 8, 128, 1920, 0)
tik_instance.data_move(input_2_local_L1, input_x2[core_n_idx * 262144 + core_n_idx * 2048], 0, 8, 128,
1920, 0)
tik_instance.data_move(input_1_local_L1, input_x1[core_n_idx * 129024 + 2 * 4096], 0, 8, 256, 752, 0)
with tik_instance.for_range(0, 8) as cc10:
tik_instance.load2dv1(input_2_local_L1_local_L0B[cc10 * 2048], input_2_local_L1[cc10 * 256], 0, 8, 8, 0, True)
tik_instance.load2dv1(input_2_local_L1_local_L0B[cc10 * 2048], input_2_local_L1[cc10 * 256], 0, 8,
8, 0, True)
with tik_instance.for_range(0, 16) as cc101:
tik_instance.load2dv1(input_1_local_L1_local_L0A[cc101 * 2048], input_1_local_L1[cc101 * 256], 0, 8, 16, 0, False)
tik_instance.mmad(resMatmul_local_UB_local_L0C, input_1_local_L1_local_L0A, input_2_local_L1_local_L0B, 256, 128, 128, 0)
tik_instance.load2dv1(input_1_local_L1_local_L0A[cc101 * 2048], input_1_local_L1[cc101 * 256], 0, 8,
16, 0, False)
tik_instance.mmad(resMatmul_local_UB_local_L0C, input_1_local_L1_local_L0A, input_2_local_L1_local_L0B,
256, 128, 128, 0)
tik_instance.data_move(resMatmul_local_UB, resMatmul_local_UB_local_L0C, 0, 1, 128, 0, 0)
tik_instance.vmuls(64, resMatmul_local_UB, resMatmul_local_UB, matrix_max_scalar, 255,1,1,8,8)
tik_instance.vmuls(64, resMatmul_local_UB[255*64], resMatmul_local_UB[255*64], matrix_max_scalar, 255,1,1,8,8)
tik_instance.vmuls(64, resMatmul_local_UB[510*64], resMatmul_local_UB[510*64], matrix_max_scalar, 2,1,1,8,8)
tik_instance.data_move(resMatmul[core_n_idx * 129024 + 2 * 4096], resMatmul_local_UB, 0, 8, 512, 0, 1504)
tik_instance.data_move(input_2_local_L11, input_x2[core_n_idx * 262144 + core_n_idx * 2048], 0, 8, 128, 1920, 0)
tik_instance.vmuls(64, resMatmul_local_UB, resMatmul_local_UB, matrix_max_scalar, 255, 1, 1, 8, 8)
tik_instance.vmuls(64, resMatmul_local_UB[255 * 64], resMatmul_local_UB[255 * 64], matrix_max_scalar,
255, 1, 1, 8, 8)
tik_instance.vmuls(64, resMatmul_local_UB[510 * 64], resMatmul_local_UB[510 * 64], matrix_max_scalar, 2,
1, 1, 8, 8)
tik_instance.data_move(resMatmul[core_n_idx * 129024 + 2 * 4096], resMatmul_local_UB, 0, 8, 512, 0,
1504)
tik_instance.data_move(input_2_local_L11, input_x2[core_n_idx * 262144 + core_n_idx * 2048], 0, 8, 128,
1920, 0)
tik_instance.data_move(input_1_local_L11, input_x1[core_n_idx * 129024 + 12288], 0, 8, 240, 768, 0)
with tik_instance.for_range(0, 8) as cc102:
tik_instance.load2dv1(input_2_local_L1_local_L0B1[cc102 * 2048], input_2_local_L11[cc102 * 256], 0, 8, 8, 0, True)
tik_instance.load2dv1(input_2_local_L1_local_L0B1[cc102 * 2048], input_2_local_L11[cc102 * 256], 0,
8, 8, 0, True)
with tik_instance.for_range(0, 16) as cc103:
tik_instance.load2dv1(input_1_local_L1_local_L0A[cc103 * 2048], input_1_local_L11[cc103 * 256], 0, 8, 15, 0, False)
tik_instance.mmad(resMatmul_local_UB_local_L0C1, input_1_local_L1_local_L0A, input_2_local_L1_local_L0B1, 240, 128, 128, 0)
tik_instance.load2dv1(input_1_local_L1_local_L0A[cc103 * 2048], input_1_local_L11[cc103 * 256], 0,
8, 15, 0, False)
tik_instance.mmad(resMatmul_local_UB_local_L0C1, input_1_local_L1_local_L0A,
input_2_local_L1_local_L0B1, 240, 128, 128, 0)
tik_instance.data_move(resMatmul_local_UB1, resMatmul_local_UB_local_L0C1, 0, 1, 120, 0, 0)
tik_instance.vmuls(64, resMatmul_local_UB1, resMatmul_local_UB1, matrix_max_scalar, 255,1,1,8,8)
tik_instance.vmuls(64, resMatmul_local_UB1[255*64], resMatmul_local_UB1[255*64], matrix_max_scalar, 225,1,1,8,8)
tik_instance.vmuls(64, resMatmul_local_UB1, resMatmul_local_UB1, matrix_max_scalar, 255, 1, 1, 8, 8)
tik_instance.vmuls(64, resMatmul_local_UB1[255 * 64], resMatmul_local_UB1[255 * 64], matrix_max_scalar,
225, 1, 1, 8, 8)
tik_instance.data_move(resMatmul[core_n_idx * 129024 + 12288], resMatmul_local_UB1, 0, 8, 480, 0, 1536)
tik_instance.BuildCCE(kernel_name=kernel_name, inputs=[input_x1, input_x2, input_x3], outputs=[resMatmul])
return tik_instance

@ -18,37 +18,35 @@ limitations under the License.
matmul
"""
from __future__ import absolute_import
import te.lang.cce
import te.platform.cce_params as cce
from te.platform.fusion_manager import fusion_manager
from te import tvm
from topi import generic
from topi.cce import util
from te import tik
from impl.matmul_vector import matmul_vector_cce
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
from te import tik
from topi.cce import util
# General limitation of the size for input shape: 2**31
SHAPE_SIZE_LIMIT = 2147483648
NoneType = type(None)
cus_matmul_cube_fracz_right_mul_op_info = TBERegOp("CusMatMulCubeFraczRightMul") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("matmulcubefraczrightmul.so") \
.compute_cost(10) \
.kernel_name("CusMatMulCubeFraczRightMul") \
.partial_flag(True) \
.input(0, "x1", False, "required", "all") \
.input(1, "x2", False, "required", "all") \
.input(2, "x3", False, "required", "all") \
.input(3, "x4", False, "optional", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F16_FracZ, DataType.F16_Default, DataType.F32_Default, DataType.F16_Default, DataType.F32_FracZ) \
.get_op_info()
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("matmulcubefraczrightmul.so") \
.compute_cost(10) \
.kernel_name("CusMatMulCubeFraczRightMul") \
.partial_flag(True) \
.input(0, "x1", False, "required", "all") \
.input(1, "x2", False, "required", "all") \
.input(2, "x3", False, "required", "all") \
.input(3, "x4", False, "optional", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F16_FracZ, DataType.F16_Default, DataType.F32_Default, DataType.F16_Default,
DataType.F32_FracZ) \
.get_op_info()
@op_info_register(cus_matmul_cube_fracz_right_mul_op_info)
def CusMatMulCubeFraczRightMul(input_x1, input_x2, input_x3, bias=None, output_y={}, trans_a=False, trans_b=False, kernel_name="matmulcube"):
def CusMatMulCubeFraczRightMul(input_x1, input_x2, input_x3, bias=None, output_y={}, trans_a=False, trans_b=False,
kernel_name="matmulcube"):
if util.get_product_version() == util.VERSION_MINI:
tik_instance = tik.Tik(tik.Dprofile("v100", "mini"))
else:
@ -61,10 +59,10 @@ def CusMatMulCubeFraczRightMul(input_x1, input_x2, input_x3, bias=None, output_y
input_x3_shape = input_x3.get("shape")
input_x3_dtype = input_x3.get("dtype").lower()
output_shape = output_y.get("shape")
Supported = [((72, 8, 16, 16),"float16", (72, 72, 16, 16), "float16", (1,), "float32"),
((32, 8, 16, 16),"float16", (32, 32, 16, 16), "float16", (1,), "float32"),
((8, 32, 16, 16),"float16", (8, 8, 16, 16), "float16", (1,), "float32"),
((4, 4, 16, 16),"float16", (4, 4, 16, 16), "float16", (1,), "float32"),
Supported = [((72, 8, 16, 16), "float16", (72, 72, 16, 16), "float16", (1,), "float32"),
((32, 8, 16, 16), "float16", (32, 32, 16, 16), "float16", (1,), "float32"),
((8, 32, 16, 16), "float16", (8, 8, 16, 16), "float16", (1,), "float32"),
((4, 4, 16, 16), "float16", (4, 4, 16, 16), "float16", (1,), "float32"),
((4, 16, 16, 16), 'float16', (4, 4, 16, 16), 'float16', (1,), 'float32'),
((49, 4, 16, 16), 'float16', (49, 49, 16, 16), 'float16', (1,), 'float32'),
((36, 4, 16, 16), 'float16', (36, 36, 16, 16), 'float16', (1,), 'float32'),
@ -81,7 +79,8 @@ def CusMatMulCubeFraczRightMul(input_x1, input_x2, input_x3, bias=None, output_y
((32, 128, 16, 16), 'float16', (32, 32, 16, 16), 'float16', (1,), 'float32'),
((64, 32, 16, 16), 'float16', (64, 64, 16, 16), 'float16', (1,), 'float32'),
((16, 64, 16, 16), 'float16', (16, 16, 16, 16), 'float16', (1,), 'float32')]
input_shape = (tuple(input_x1_shape), input_x1_dtype, tuple(input_x2_shape), input_x2_dtype, tuple(input_x3_shape), input_x3_dtype)
input_shape = (
tuple(input_x1_shape), input_x1_dtype, tuple(input_x2_shape), input_x2_dtype, tuple(input_x3_shape), input_x3_dtype)
if input_shape not in Supported:
raise RuntimeError("input_shape %s is not supported" % str(input_shape))
@ -93,6 +92,7 @@ def CusMatMulCubeFraczRightMul(input_x1, input_x2, input_x3, bias=None, output_y
tik_instance.BuildCCE(kernel_name=kernel_name, inputs=[input_x1, input_x2, input_x3], outputs=[resMatmul])
return tik_instance
def cus_cube_matmul_right_mul(tik_instance, input_x1, input_x2, input_x3,
res):
diag_size = 128
@ -176,7 +176,7 @@ def cus_cube_matmul_right_mul(tik_instance, input_x1, input_x2, input_x3,
name="resMatmul_L0C", scope=tik.scope_cc)
with tik_instance.for_range(0, loop_k_num, thread_num=thread_num_k) as thread_idx_k:
if diag_opt:
k_idx = (core_n*loop_n_num + cc_n) * no_tile + thread_idx_k * ko_tile_inner
k_idx = (core_n * loop_n_num + cc_n) * no_tile + thread_idx_k * ko_tile_inner
else:
k_idx = thread_idx_k * ko_tile_inner
# input_x1 -> input_x1_L1
@ -191,7 +191,7 @@ def cus_cube_matmul_right_mul(tik_instance, input_x1, input_x2, input_x3,
input_x2_L1 = tik_instance.Tensor("float16", [no_tile, ko_tile_inner, c0, c0],
name="input_x2_L1", scope=tik.scope_cbuf)
tik_instance.data_move(input_x2_L1,
input_x2[(core_n*loop_n_num + cc_n) * no_tile,
input_x2[(core_n * loop_n_num + cc_n) * no_tile,
k_idx, 0, 0],
0, no_tile, ko_tile_inner * c0 * c0 * fp16_size // blocksize,
(ko - ko_tile_inner) * c0 * c0 * fp16_size // blocksize, 0)
@ -215,9 +215,9 @@ def cus_cube_matmul_right_mul(tik_instance, input_x1, input_x2, input_x3,
tik_instance.mmad(res_L0C, input_x1_L0A, input_x2_L0B, mo_tile * c0,
ko_tile_inner * c0, no_tile * c0, 1)
res_ub = tik_instance.Tensor("float32", [no_tile, mo_tile, c0, c0],
name="resMatmul_ub", scope=tik.scope_ubuf)
name="resMatmul_ub", scope=tik.scope_ubuf)
tik_instance.data_move(res_ub, res_L0C, 0, 1, no_tile * mo_tile, 0, 0)
input_3_local_UB = tik_instance.Tensor("float32", (8,), scope=tik.scope_ubuf, name="input_3_local_UB")
tik_instance.data_move(input_3_local_UB, input_x3, 0, 1, 1, 0, 0)
matrix_max_scalar = tik_instance.Scalar("float32")
@ -236,7 +236,7 @@ def cus_cube_matmul_right_mul(tik_instance, input_x1, input_x2, input_x3,
res_ub[count * repeate_times_max * vectorfp32_size],
res_ub[count * repeate_times_max * vectorfp32_size],
matrix_max_scalar, repeate_num, 1, 1, 8, 8)
tik_instance.data_move(res[(core_n * loop_n_num + cc_n) * no_tile,
(core_m * loop_m_num + cc_m) * mo_tile, 0, 0],
res_ub, 0, no_tile,

File diff suppressed because it is too large Load Diff

@ -13,24 +13,25 @@
# limitations under the License.
# ============================================================================
"""CusMatrixCombine"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
from te import tik
from topi.cce import util
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
cus_matrix_combine_op_info = TBERegOp("CusMatrixCombine") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("matrixcombine.so") \
.compute_cost(10) \
.kernel_name("CusMatrixCombine") \
.partial_flag(True) \
.input(0, "x1", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
.get_op_info()
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("matrixcombine.so") \
.compute_cost(10) \
.kernel_name("CusMatrixCombine") \
.partial_flag(True) \
.input(0, "x1", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
.get_op_info()
@op_info_register(cus_matrix_combine_op_info)
def CusMatrixCombine(input_x, output,kernel_name="matrix_combine"):
def CusMatrixCombine(input_x, output, kernel_name="matrix_combine"):
input_x_shape = input_x.get("shape")
output_shape = output.get("shape")
split_dim = 128
@ -45,18 +46,20 @@ def CusMatrixCombine(input_x, output,kernel_name="matrix_combine"):
blocks = 32
matrix_dim = input_x_shape[0] * input_x_shape[1]
if input_x_shape[0] == 1 and input_x_shape[1] == 64 :
if input_x_shape[0] == 1 and input_x_shape[1] == 64:
tiling_dim = 2
bs = 1
with tik_instance.for_range(0,blocks,block_num=blocks) as block_index:
input_x_ub = tik_instance.Tensor("float32", (tiling_dim, matrix_dim), name="input_x_ub", scope=tik.scope_ubuf)
with tik_instance.for_range(0, blocks, block_num=blocks) as block_index:
input_x_ub = tik_instance.Tensor("float32", (tiling_dim, matrix_dim), name="input_x_ub",
scope=tik.scope_ubuf)
tik_instance.data_move(input_x_ub, input_x[0, block_index * tiling_dim, 0], 0, 1, 16, 0, 0)
tik_instance.data_move(res[block_index * tiling_dim, 0], input_x_ub, 0, 1, 16, 0, 0)
else:
tiling_dim = 4
bs = input_x_shape[0]
with tik_instance.for_range(0,blocks,block_num=blocks) as block_index:
input_x_ub = tik_instance.Tensor("float32", (tiling_dim, matrix_dim), name="input_x_ub", scope=tik.scope_ubuf)
with tik_instance.for_range(0, blocks, block_num=blocks) as block_index:
input_x_ub = tik_instance.Tensor("float32", (tiling_dim, matrix_dim), name="input_x_ub",
scope=tik.scope_ubuf)
zero = tik_instance.Scalar("float32")
zero.set_as(0.0)
with tik_instance.for_range(0, bs) as i:
@ -69,7 +72,9 @@ def CusMatrixCombine(input_x, output,kernel_name="matrix_combine"):
tik_instance.vector_dup(64, input_x_ub, zero, repeat_1, 1, 8)
tik_instance.vector_dup(64, input_x_ub[255 * 64], zero, repeat_2, 1, 8)
with tik_instance.for_range(0, tiling_dim) as j:
tik_instance.data_move(input_x_ub[j, split_dim * i], input_x[i, block_index * tiling_dim + j, 0], 0, 1, 16, 0, 0)
tik_instance.data_move(res[i * split_dim + block_index * tiling_dim, 0], input_x_ub, 0, 1, tiling_dim * matrix_dim *4 // 32, 0, 0)
tik_instance.data_move(input_x_ub[j, split_dim * i], input_x[i, block_index * tiling_dim + j, 0], 0,
1, 16, 0, 0)
tik_instance.data_move(res[i * split_dim + block_index * tiling_dim, 0], input_x_ub, 0, 1,
tiling_dim * matrix_dim * 4 // 32, 0, 0)
tik_instance.BuildCCE(kernel_name=kernel_name, inputs=[input_x], outputs=[res])
return tik_instance

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save