[llvm] 88e6449 - [X86] Update some AMX tests to use opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 23 03:22:16 PDT 2022


Author: Nikita Popov
Date: 2022-06-23T12:22:08+02:00
New Revision: 88e64490c163650271fab630090244016c92b823

URL: https://github.com/llvm/llvm-project/commit/88e64490c163650271fab630090244016c92b823
DIFF: https://github.com/llvm/llvm-project/commit/88e64490c163650271fab630090244016c92b823.diff

LOG: [X86] Update some AMX tests to use opaque pointers (NFC)

This only touches IR tests (or tests without codegen changes).

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/AMX/amx-configO2toO0-precfg.ll
    llvm/test/CodeGen/X86/AMX/amx-gemm.ll
    llvm/test/CodeGen/X86/AMX/amx-type.ll
    llvm/test/CodeGen/X86/AMX/lat-transform-amx-bitcast.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/AMX/amx-configO2toO0-precfg.ll b/llvm/test/CodeGen/X86/AMX/amx-configO2toO0-precfg.ll
index ae0d2135b89ee..82b9746c41933 100644
--- a/llvm/test/CodeGen/X86/AMX/amx-configO2toO0-precfg.ll
+++ b/llvm/test/CodeGen/X86/AMX/amx-configO2toO0-precfg.ll
@@ -16,190 +16,163 @@ define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) l
 ; CHECK-NEXT:    [[TMP6:%.*]] = alloca <16 x i32>, align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = alloca <16 x i32>, align 4
 ; CHECK-NEXT:    [[I:%.*]] = alloca <256 x i32>, align 1024
-; CHECK-NEXT:    [[I1:%.*]] = bitcast <256 x i32>* [[I]] to i8*
 ; CHECK-NEXT:    [[I2:%.*]] = alloca <256 x i32>, align 1024
-; CHECK-NEXT:    [[I3:%.*]] = bitcast <256 x i32>* [[I2]] to i8*
 ; CHECK-NEXT:    [[I4:%.*]] = alloca <256 x i32>, align 1024
-; CHECK-NEXT:    [[I5:%.*]] = bitcast <256 x i32>* [[I4]] to i8*
 ; CHECK-NEXT:    [[I6:%.*]] = alloca <256 x i32>, align 1024
-; CHECK-NEXT:    [[I7:%.*]] = bitcast <256 x i32>* [[I6]] to i8*
 ; CHECK-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[COND:%.*]], 0
 ; CHECK-NEXT:    br i1 [[TOBOOL_NOT]], label [[IF_ELSE:%.*]], label [[IF_THEN:%.*]]
 ; CHECK:       if.then:
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <16 x i32>* [[TMP7]] to i8*
-; CHECK-NEXT:    store <16 x i32> zeroinitializer, <16 x i32>* [[TMP7]], align 4
-; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr i8, i8* [[TMP8]], i64 0
-; CHECK-NEXT:    store i8 1, i8* [[TMP9]], align 1
-; CHECK-NEXT:    [[AMX_TMM_0_SHAPE_ROW:%.*]] = getelementptr i8, i8* [[TMP8]], i64 48
-; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr i8, i8* [[TMP8]], i64 16
-; CHECK-NEXT:    [[AMX_TMM_0_SHAPE_COL:%.*]] = bitcast i8* [[TMP10]] to i16*
-; CHECK-NEXT:    [[TMP11:%.*]] = trunc i16 [[ROW:%.*]] to i8
-; CHECK-NEXT:    store i8 [[TMP11]], i8* [[AMX_TMM_0_SHAPE_ROW]], align 1
-; CHECK-NEXT:    store i16 8, i16* [[AMX_TMM_0_SHAPE_COL]], align 2
-; CHECK-NEXT:    call void @llvm.x86.ldtilecfg.internal(i8* [[TMP8]])
-; CHECK-NEXT:    [[I8:%.*]] = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 [[ROW]], i16 8, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32)
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[ROW]], i16 8, i8* [[I5]], i64 64, x86_amx [[I8]])
-; CHECK-NEXT:    [[TMP12:%.*]] = bitcast <16 x i32>* [[TMP6]] to i8*
-; CHECK-NEXT:    store <16 x i32> zeroinitializer, <16 x i32>* [[TMP6]], align 4
-; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr i8, i8* [[TMP12]], i64 0
-; CHECK-NEXT:    store i8 1, i8* [[TMP13]], align 1
-; CHECK-NEXT:    [[AMX_TMM_0_SHAPE_ROW1:%.*]] = getelementptr i8, i8* [[TMP12]], i64 48
-; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr i8, i8* [[TMP12]], i64 16
-; CHECK-NEXT:    [[AMX_TMM_0_SHAPE_COL2:%.*]] = bitcast i8* [[TMP14]] to i16*
-; CHECK-NEXT:    store i8 8, i8* [[AMX_TMM_0_SHAPE_ROW1]], align 1
-; CHECK-NEXT:    store i16 [[COL:%.*]], i16* [[AMX_TMM_0_SHAPE_COL2]], align 2
-; CHECK-NEXT:    call void @llvm.x86.ldtilecfg.internal(i8* [[TMP12]])
-; CHECK-NEXT:    [[I9:%.*]] = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 [[COL]], i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32)
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 8, i16 [[COL]], i8* [[I3]], i64 64, x86_amx [[I9]])
-; CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i32>* [[TMP5]] to i8*
-; CHECK-NEXT:    store <16 x i32> zeroinitializer, <16 x i32>* [[TMP5]], align 4
-; CHECK-NEXT:    [[TMP16:%.*]] = getelementptr i8, i8* [[TMP15]], i64 0
-; CHECK-NEXT:    store i8 1, i8* [[TMP16]], align 1
-; CHECK-NEXT:    [[AMX_TMM_0_SHAPE_ROW3:%.*]] = getelementptr i8, i8* [[TMP15]], i64 48
-; CHECK-NEXT:    [[TMP17:%.*]] = getelementptr i8, i8* [[TMP15]], i64 16
-; CHECK-NEXT:    [[AMX_TMM_0_SHAPE_COL4:%.*]] = bitcast i8* [[TMP17]] to i16*
-; CHECK-NEXT:    [[TMP18:%.*]] = trunc i16 [[ROW]] to i8
-; CHECK-NEXT:    store i8 [[TMP18]], i8* [[AMX_TMM_0_SHAPE_ROW3]], align 1
-; CHECK-NEXT:    store i16 [[COL]], i16* [[AMX_TMM_0_SHAPE_COL4]], align 2
-; CHECK-NEXT:    call void @llvm.x86.ldtilecfg.internal(i8* [[TMP15]])
-; CHECK-NEXT:    [[I10:%.*]] = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 [[ROW]], i16 [[COL]], i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32)
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[ROW]], i16 [[COL]], i8* [[I1]], i64 64, x86_amx [[I10]])
+; CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr i8, ptr [[TMP7]], i64 0
+; CHECK-NEXT:    store i8 1, ptr [[TMP8]], align 1
+; CHECK-NEXT:    [[AMX_TMM_0_SHAPE_ROW:%.*]] = getelementptr i8, ptr [[TMP7]], i64 48
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr i8, ptr [[TMP7]], i64 16
+; CHECK-NEXT:    [[TMP10:%.*]] = trunc i16 [[ROW:%.*]] to i8
+; CHECK-NEXT:    store i8 [[TMP10]], ptr [[AMX_TMM_0_SHAPE_ROW]], align 1
+; CHECK-NEXT:    store i16 8, ptr [[TMP9]], align 2
+; CHECK-NEXT:    call void @llvm.x86.ldtilecfg.internal(ptr [[TMP7]])
+; CHECK-NEXT:    [[I8:%.*]] = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 [[ROW]], i16 8, ptr @buf, i64 32)
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[ROW]], i16 8, ptr [[I4]], i64 64, x86_amx [[I8]])
+; CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP6]], align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr i8, ptr [[TMP6]], i64 0
+; CHECK-NEXT:    store i8 1, ptr [[TMP11]], align 1
+; CHECK-NEXT:    [[AMX_TMM_0_SHAPE_ROW1:%.*]] = getelementptr i8, ptr [[TMP6]], i64 48
+; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr i8, ptr [[TMP6]], i64 16
+; CHECK-NEXT:    store i8 8, ptr [[AMX_TMM_0_SHAPE_ROW1]], align 1
+; CHECK-NEXT:    store i16 [[COL:%.*]], ptr [[TMP12]], align 2
+; CHECK-NEXT:    call void @llvm.x86.ldtilecfg.internal(ptr [[TMP6]])
+; CHECK-NEXT:    [[I9:%.*]] = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 [[COL]], ptr @buf, i64 32)
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 8, i16 [[COL]], ptr [[I2]], i64 64, x86_amx [[I9]])
+; CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP5]], align 4
+; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr i8, ptr [[TMP5]], i64 0
+; CHECK-NEXT:    store i8 1, ptr [[TMP13]], align 1
+; CHECK-NEXT:    [[AMX_TMM_0_SHAPE_ROW2:%.*]] = getelementptr i8, ptr [[TMP5]], i64 48
+; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr i8, ptr [[TMP5]], i64 16
+; CHECK-NEXT:    [[TMP15:%.*]] = trunc i16 [[ROW]] to i8
+; CHECK-NEXT:    store i8 [[TMP15]], ptr [[AMX_TMM_0_SHAPE_ROW2]], align 1
+; CHECK-NEXT:    store i16 [[COL]], ptr [[TMP14]], align 2
+; CHECK-NEXT:    call void @llvm.x86.ldtilecfg.internal(ptr [[TMP5]])
+; CHECK-NEXT:    [[I10:%.*]] = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 [[ROW]], i16 [[COL]], ptr @buf, i64 32)
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[ROW]], i16 [[COL]], ptr [[I]], i64 64, x86_amx [[I10]])
 ; CHECK-NEXT:    br label [[IF_END:%.*]]
 ; CHECK:       if.else:
-; CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i32>* [[TMP4]] to i8*
-; CHECK-NEXT:    store <16 x i32> zeroinitializer, <16 x i32>* [[TMP4]], align 4
-; CHECK-NEXT:    [[TMP20:%.*]] = getelementptr i8, i8* [[TMP19]], i64 0
-; CHECK-NEXT:    store i8 1, i8* [[TMP20]], align 1
-; CHECK-NEXT:    [[AMX_TMM_0_SHAPE_ROW5:%.*]] = getelementptr i8, i8* [[TMP19]], i64 48
-; CHECK-NEXT:    [[TMP21:%.*]] = getelementptr i8, i8* [[TMP19]], i64 16
-; CHECK-NEXT:    [[AMX_TMM_0_SHAPE_COL6:%.*]] = bitcast i8* [[TMP21]] to i16*
-; CHECK-NEXT:    [[TMP22:%.*]] = trunc i16 [[ROW]] to i8
-; CHECK-NEXT:    store i8 [[TMP22]], i8* [[AMX_TMM_0_SHAPE_ROW5]], align 1
-; CHECK-NEXT:    store i16 8, i16* [[AMX_TMM_0_SHAPE_COL6]], align 2
-; CHECK-NEXT:    call void @llvm.x86.ldtilecfg.internal(i8* [[TMP19]])
-; CHECK-NEXT:    [[I11:%.*]] = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 [[ROW]], i16 8, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf2, i64 0, i64 0), i64 32)
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[ROW]], i16 8, i8* [[I5]], i64 64, x86_amx [[I11]])
-; CHECK-NEXT:    [[TMP23:%.*]] = bitcast <16 x i32>* [[TMP3]] to i8*
-; CHECK-NEXT:    store <16 x i32> zeroinitializer, <16 x i32>* [[TMP3]], align 4
-; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr i8, i8* [[TMP23]], i64 0
-; CHECK-NEXT:    store i8 1, i8* [[TMP24]], align 1
-; CHECK-NEXT:    [[AMX_TMM_0_SHAPE_ROW7:%.*]] = getelementptr i8, i8* [[TMP23]], i64 48
-; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr i8, i8* [[TMP23]], i64 16
-; CHECK-NEXT:    [[AMX_TMM_0_SHAPE_COL8:%.*]] = bitcast i8* [[TMP25]] to i16*
-; CHECK-NEXT:    store i8 8, i8* [[AMX_TMM_0_SHAPE_ROW7]], align 1
-; CHECK-NEXT:    store i16 [[COL]], i16* [[AMX_TMM_0_SHAPE_COL8]], align 2
-; CHECK-NEXT:    call void @llvm.x86.ldtilecfg.internal(i8* [[TMP23]])
-; CHECK-NEXT:    [[I12:%.*]] = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 [[COL]], i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf2, i64 0, i64 0), i64 32)
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 8, i16 [[COL]], i8* [[I3]], i64 64, x86_amx [[I12]])
-; CHECK-NEXT:    [[TMP26:%.*]] = bitcast <16 x i32>* [[TMP2]] to i8*
-; CHECK-NEXT:    store <16 x i32> zeroinitializer, <16 x i32>* [[TMP2]], align 4
-; CHECK-NEXT:    [[TMP27:%.*]] = getelementptr i8, i8* [[TMP26]], i64 0
-; CHECK-NEXT:    store i8 1, i8* [[TMP27]], align 1
-; CHECK-NEXT:    [[AMX_TMM_0_SHAPE_ROW9:%.*]] = getelementptr i8, i8* [[TMP26]], i64 48
-; CHECK-NEXT:    [[TMP28:%.*]] = getelementptr i8, i8* [[TMP26]], i64 16
-; CHECK-NEXT:    [[AMX_TMM_0_SHAPE_COL10:%.*]] = bitcast i8* [[TMP28]] to i16*
-; CHECK-NEXT:    [[TMP29:%.*]] = trunc i16 [[ROW]] to i8
-; CHECK-NEXT:    store i8 [[TMP29]], i8* [[AMX_TMM_0_SHAPE_ROW9]], align 1
-; CHECK-NEXT:    store i16 [[COL]], i16* [[AMX_TMM_0_SHAPE_COL10]], align 2
-; CHECK-NEXT:    call void @llvm.x86.ldtilecfg.internal(i8* [[TMP26]])
-; CHECK-NEXT:    [[I13:%.*]] = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 [[ROW]], i16 [[COL]], i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf2, i64 0, i64 0), i64 32)
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[ROW]], i16 [[COL]], i8* [[I1]], i64 64, x86_amx [[I13]])
+; CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP4]], align 4
+; CHECK-NEXT:    [[TMP16:%.*]] = getelementptr i8, ptr [[TMP4]], i64 0
+; CHECK-NEXT:    store i8 1, ptr [[TMP16]], align 1
+; CHECK-NEXT:    [[AMX_TMM_0_SHAPE_ROW3:%.*]] = getelementptr i8, ptr [[TMP4]], i64 48
+; CHECK-NEXT:    [[TMP17:%.*]] = getelementptr i8, ptr [[TMP4]], i64 16
+; CHECK-NEXT:    [[TMP18:%.*]] = trunc i16 [[ROW]] to i8
+; CHECK-NEXT:    store i8 [[TMP18]], ptr [[AMX_TMM_0_SHAPE_ROW3]], align 1
+; CHECK-NEXT:    store i16 8, ptr [[TMP17]], align 2
+; CHECK-NEXT:    call void @llvm.x86.ldtilecfg.internal(ptr [[TMP4]])
+; CHECK-NEXT:    [[I11:%.*]] = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 [[ROW]], i16 8, ptr @buf2, i64 32)
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[ROW]], i16 8, ptr [[I4]], i64 64, x86_amx [[I11]])
+; CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP19:%.*]] = getelementptr i8, ptr [[TMP3]], i64 0
+; CHECK-NEXT:    store i8 1, ptr [[TMP19]], align 1
+; CHECK-NEXT:    [[AMX_TMM_0_SHAPE_ROW4:%.*]] = getelementptr i8, ptr [[TMP3]], i64 48
+; CHECK-NEXT:    [[TMP20:%.*]] = getelementptr i8, ptr [[TMP3]], i64 16
+; CHECK-NEXT:    store i8 8, ptr [[AMX_TMM_0_SHAPE_ROW4]], align 1
+; CHECK-NEXT:    store i16 [[COL]], ptr [[TMP20]], align 2
+; CHECK-NEXT:    call void @llvm.x86.ldtilecfg.internal(ptr [[TMP3]])
+; CHECK-NEXT:    [[I12:%.*]] = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 [[COL]], ptr @buf2, i64 32)
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 8, i16 [[COL]], ptr [[I2]], i64 64, x86_amx [[I12]])
+; CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP21:%.*]] = getelementptr i8, ptr [[TMP2]], i64 0
+; CHECK-NEXT:    store i8 1, ptr [[TMP21]], align 1
+; CHECK-NEXT:    [[AMX_TMM_0_SHAPE_ROW5:%.*]] = getelementptr i8, ptr [[TMP2]], i64 48
+; CHECK-NEXT:    [[TMP22:%.*]] = getelementptr i8, ptr [[TMP2]], i64 16
+; CHECK-NEXT:    [[TMP23:%.*]] = trunc i16 [[ROW]] to i8
+; CHECK-NEXT:    store i8 [[TMP23]], ptr [[AMX_TMM_0_SHAPE_ROW5]], align 1
+; CHECK-NEXT:    store i16 [[COL]], ptr [[TMP22]], align 2
+; CHECK-NEXT:    call void @llvm.x86.ldtilecfg.internal(ptr [[TMP2]])
+; CHECK-NEXT:    [[I13:%.*]] = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 [[ROW]], i16 [[COL]], ptr @buf2, i64 32)
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[ROW]], i16 [[COL]], ptr [[I]], i64 64, x86_amx [[I13]])
 ; CHECK-NEXT:    br label [[IF_END]]
 ; CHECK:       if.end:
-; CHECK-NEXT:    [[TMP30:%.*]] = bitcast <16 x i32>* [[TMP1]] to i8*
-; CHECK-NEXT:    store <16 x i32> zeroinitializer, <16 x i32>* [[TMP1]], align 4
-; CHECK-NEXT:    [[TMP31:%.*]] = getelementptr i8, i8* [[TMP30]], i64 0
-; CHECK-NEXT:    store i8 1, i8* [[TMP31]], align 1
-; CHECK-NEXT:    [[AMX_TMM_0_SHAPE_ROW11:%.*]] = getelementptr i8, i8* [[TMP30]], i64 48
-; CHECK-NEXT:    [[TMP32:%.*]] = getelementptr i8, i8* [[TMP30]], i64 16
-; CHECK-NEXT:    [[AMX_TMM_0_SHAPE_COL12:%.*]] = bitcast i8* [[TMP32]] to i16*
-; CHECK-NEXT:    [[TMP33:%.*]] = trunc i16 [[ROW]] to i8
-; CHECK-NEXT:    store i8 [[TMP33]], i8* [[AMX_TMM_0_SHAPE_ROW11]], align 1
-; CHECK-NEXT:    store i16 [[COL]], i16* [[AMX_TMM_0_SHAPE_COL12]], align 2
-; CHECK-NEXT:    [[AMX_TMM_1_SHAPE_ROW:%.*]] = getelementptr i8, i8* [[TMP30]], i64 49
-; CHECK-NEXT:    [[TMP34:%.*]] = getelementptr i8, i8* [[TMP30]], i64 18
-; CHECK-NEXT:    [[AMX_TMM_1_SHAPE_COL:%.*]] = bitcast i8* [[TMP34]] to i16*
-; CHECK-NEXT:    [[TMP35:%.*]] = trunc i16 [[ROW]] to i8
-; CHECK-NEXT:    store i8 [[TMP35]], i8* [[AMX_TMM_1_SHAPE_ROW]], align 1
-; CHECK-NEXT:    store i16 8, i16* [[AMX_TMM_1_SHAPE_COL]], align 2
-; CHECK-NEXT:    [[AMX_TMM_2_SHAPE_ROW:%.*]] = getelementptr i8, i8* [[TMP30]], i64 50
-; CHECK-NEXT:    [[TMP36:%.*]] = getelementptr i8, i8* [[TMP30]], i64 20
-; CHECK-NEXT:    [[AMX_TMM_2_SHAPE_COL:%.*]] = bitcast i8* [[TMP36]] to i16*
-; CHECK-NEXT:    store i8 8, i8* [[AMX_TMM_2_SHAPE_ROW]], align 1
-; CHECK-NEXT:    store i16 [[COL]], i16* [[AMX_TMM_2_SHAPE_COL]], align 2
-; CHECK-NEXT:    [[AMX_TMM_3_SHAPE_ROW:%.*]] = getelementptr i8, i8* [[TMP30]], i64 51
-; CHECK-NEXT:    [[TMP37:%.*]] = getelementptr i8, i8* [[TMP30]], i64 22
-; CHECK-NEXT:    [[AMX_TMM_3_SHAPE_COL:%.*]] = bitcast i8* [[TMP37]] to i16*
-; CHECK-NEXT:    [[TMP38:%.*]] = trunc i16 [[ROW]] to i8
-; CHECK-NEXT:    store i8 [[TMP38]], i8* [[AMX_TMM_3_SHAPE_ROW]], align 1
-; CHECK-NEXT:    store i16 [[COL]], i16* [[AMX_TMM_3_SHAPE_COL]], align 2
-; CHECK-NEXT:    call void @llvm.x86.ldtilecfg.internal(i8* [[TMP30]])
-; CHECK-NEXT:    [[I14:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[ROW]], i16 8, i8* [[I5]], i64 64)
-; CHECK-NEXT:    [[I15:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 [[COL]], i8* [[I3]], i64 64)
-; CHECK-NEXT:    [[I16:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[ROW]], i16 [[COL]], i8* [[I1]], i64 64)
+; CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr i8, ptr [[TMP1]], i64 0
+; CHECK-NEXT:    store i8 1, ptr [[TMP24]], align 1
+; CHECK-NEXT:    [[AMX_TMM_0_SHAPE_ROW6:%.*]] = getelementptr i8, ptr [[TMP1]], i64 48
+; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr i8, ptr [[TMP1]], i64 16
+; CHECK-NEXT:    [[TMP26:%.*]] = trunc i16 [[ROW]] to i8
+; CHECK-NEXT:    store i8 [[TMP26]], ptr [[AMX_TMM_0_SHAPE_ROW6]], align 1
+; CHECK-NEXT:    store i16 [[COL]], ptr [[TMP25]], align 2
+; CHECK-NEXT:    [[AMX_TMM_1_SHAPE_ROW:%.*]] = getelementptr i8, ptr [[TMP1]], i64 49
+; CHECK-NEXT:    [[TMP27:%.*]] = getelementptr i8, ptr [[TMP1]], i64 18
+; CHECK-NEXT:    [[TMP28:%.*]] = trunc i16 [[ROW]] to i8
+; CHECK-NEXT:    store i8 [[TMP28]], ptr [[AMX_TMM_1_SHAPE_ROW]], align 1
+; CHECK-NEXT:    store i16 8, ptr [[TMP27]], align 2
+; CHECK-NEXT:    [[AMX_TMM_2_SHAPE_ROW:%.*]] = getelementptr i8, ptr [[TMP1]], i64 50
+; CHECK-NEXT:    [[TMP29:%.*]] = getelementptr i8, ptr [[TMP1]], i64 20
+; CHECK-NEXT:    store i8 8, ptr [[AMX_TMM_2_SHAPE_ROW]], align 1
+; CHECK-NEXT:    store i16 [[COL]], ptr [[TMP29]], align 2
+; CHECK-NEXT:    [[AMX_TMM_3_SHAPE_ROW:%.*]] = getelementptr i8, ptr [[TMP1]], i64 51
+; CHECK-NEXT:    [[TMP30:%.*]] = getelementptr i8, ptr [[TMP1]], i64 22
+; CHECK-NEXT:    [[TMP31:%.*]] = trunc i16 [[ROW]] to i8
+; CHECK-NEXT:    store i8 [[TMP31]], ptr [[AMX_TMM_3_SHAPE_ROW]], align 1
+; CHECK-NEXT:    store i16 [[COL]], ptr [[TMP30]], align 2
+; CHECK-NEXT:    call void @llvm.x86.ldtilecfg.internal(ptr [[TMP1]])
+; CHECK-NEXT:    [[I14:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[ROW]], i16 8, ptr [[I4]], i64 64)
+; CHECK-NEXT:    [[I15:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 [[COL]], ptr [[I2]], i64 64)
+; CHECK-NEXT:    [[I16:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[ROW]], i16 [[COL]], ptr [[I]], i64 64)
 ; CHECK-NEXT:    [[I17:%.*]] = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 [[ROW]], i16 [[COL]], i16 8, x86_amx [[I16]], x86_amx [[I14]], x86_amx [[I15]])
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[ROW]], i16 [[COL]], i8* [[I7]], i64 64, x86_amx [[I17]])
-; CHECK-NEXT:    [[TMP39:%.*]] = bitcast <16 x i32>* [[TMP0]] to i8*
-; CHECK-NEXT:    store <16 x i32> zeroinitializer, <16 x i32>* [[TMP0]], align 4
-; CHECK-NEXT:    [[TMP40:%.*]] = getelementptr i8, i8* [[TMP39]], i64 0
-; CHECK-NEXT:    store i8 1, i8* [[TMP40]], align 1
-; CHECK-NEXT:    [[AMX_TMM_0_SHAPE_ROW13:%.*]] = getelementptr i8, i8* [[TMP39]], i64 48
-; CHECK-NEXT:    [[TMP41:%.*]] = getelementptr i8, i8* [[TMP39]], i64 16
-; CHECK-NEXT:    [[AMX_TMM_0_SHAPE_COL14:%.*]] = bitcast i8* [[TMP41]] to i16*
-; CHECK-NEXT:    [[TMP42:%.*]] = trunc i16 [[ROW]] to i8
-; CHECK-NEXT:    store i8 [[TMP42]], i8* [[AMX_TMM_0_SHAPE_ROW13]], align 1
-; CHECK-NEXT:    store i16 [[COL]], i16* [[AMX_TMM_0_SHAPE_COL14]], align 2
-; CHECK-NEXT:    call void @llvm.x86.ldtilecfg.internal(i8* [[TMP39]])
-; CHECK-NEXT:    [[I18:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[ROW]], i16 [[COL]], i8* [[I7]], i64 64)
-; CHECK-NEXT:    tail call void @llvm.x86.tilestored64.internal(i16 [[ROW]], i16 [[COL]], i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32, x86_amx [[I18]])
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[ROW]], i16 [[COL]], ptr [[I6]], i64 64, x86_amx [[I17]])
+; CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP32:%.*]] = getelementptr i8, ptr [[TMP0]], i64 0
+; CHECK-NEXT:    store i8 1, ptr [[TMP32]], align 1
+; CHECK-NEXT:    [[AMX_TMM_0_SHAPE_ROW7:%.*]] = getelementptr i8, ptr [[TMP0]], i64 48
+; CHECK-NEXT:    [[TMP33:%.*]] = getelementptr i8, ptr [[TMP0]], i64 16
+; CHECK-NEXT:    [[TMP34:%.*]] = trunc i16 [[ROW]] to i8
+; CHECK-NEXT:    store i8 [[TMP34]], ptr [[AMX_TMM_0_SHAPE_ROW7]], align 1
+; CHECK-NEXT:    store i16 [[COL]], ptr [[TMP33]], align 2
+; CHECK-NEXT:    call void @llvm.x86.ldtilecfg.internal(ptr [[TMP0]])
+; CHECK-NEXT:    [[I18:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[ROW]], i16 [[COL]], ptr [[I6]], i64 64)
+; CHECK-NEXT:    tail call void @llvm.x86.tilestored64.internal(i16 [[ROW]], i16 [[COL]], ptr @buf, i64 32, x86_amx [[I18]])
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %i = alloca <256 x i32>, align 1024
-  %i1 = bitcast <256 x i32>* %i to i8*
   %i2 = alloca <256 x i32>, align 1024
-  %i3 = bitcast <256 x i32>* %i2 to i8*
   %i4 = alloca <256 x i32>, align 1024
-  %i5 = bitcast <256 x i32>* %i4 to i8*
   %i6 = alloca <256 x i32>, align 1024
-  %i7 = bitcast <256 x i32>* %i6 to i8*
   %tobool.not = icmp eq i32 %cond, 0
   br i1 %tobool.not, label %if.else, label %if.then
 
 if.then:                                          ; preds = %entry
-  %i8 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 8, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32)
-  call void @llvm.x86.tilestored64.internal(i16 %row, i16 8, i8* %i5, i64 64, x86_amx %i8)
-  %i9 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32)
-  call void @llvm.x86.tilestored64.internal(i16 8, i16 %col, i8* %i3, i64 64, x86_amx %i9)
-  %i10 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32)
-  call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col, i8* %i1, i64 64, x86_amx %i10)
+  %i8 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 8, ptr @buf, i64 32)
+  call void @llvm.x86.tilestored64.internal(i16 %row, i16 8, ptr %i4, i64 64, x86_amx %i8)
+  %i9 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 %col, ptr @buf, i64 32)
+  call void @llvm.x86.tilestored64.internal(i16 8, i16 %col, ptr %i2, i64 64, x86_amx %i9)
+  %i10 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, ptr @buf, i64 32)
+  call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col, ptr %i, i64 64, x86_amx %i10)
   br label %if.end
 
 if.else:                                          ; preds = %entry
-  %i11 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 8, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf2, i64 0, i64 0), i64 32)
-  call void @llvm.x86.tilestored64.internal(i16 %row, i16 8, i8* %i5, i64 64, x86_amx %i11)
-  %i12 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf2, i64 0, i64 0), i64 32)
-  call void @llvm.x86.tilestored64.internal(i16 8, i16 %col, i8* %i3, i64 64, x86_amx %i12)
-  %i13 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf2, i64 0, i64 0), i64 32)
-  call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col, i8* %i1, i64 64, x86_amx %i13)
+  %i11 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 8, ptr @buf2, i64 32)
+  call void @llvm.x86.tilestored64.internal(i16 %row, i16 8, ptr %i4, i64 64, x86_amx %i11)
+  %i12 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 %col, ptr @buf2, i64 32)
+  call void @llvm.x86.tilestored64.internal(i16 8, i16 %col, ptr %i2, i64 64, x86_amx %i12)
+  %i13 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, ptr @buf2, i64 32)
+  call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col, ptr %i, i64 64, x86_amx %i13)
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
-  %i14 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 8, i8* %i5, i64 64)
-  %i15 = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 %col, i8* %i3, i64 64)
-  %i16 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, i8* %i1, i64 64)
+  %i14 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 8, ptr %i4, i64 64)
+  %i15 = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 %col, ptr %i2, i64 64)
+  %i16 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, ptr %i, i64 64)
   %i17 = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 %row, i16 %col, i16 8, x86_amx %i16, x86_amx %i14, x86_amx %i15)
-  call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col, i8* %i7, i64 64, x86_amx %i17)
-  %i18 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, i8* %i7, i64 64)
-  tail call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32, x86_amx %i18)
+  call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col, ptr %i6, i64 64, x86_amx %i17)
+  %i18 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, ptr %i6, i64 64)
+  tail call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col, ptr @buf, i64 32, x86_amx %i18)
   ret void
 }
 
 ; Function Attrs: nounwind
-declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, i8*, i64)
+declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, ptr, i64)
 
 ; Function Attrs: nounwind
 declare x86_amx @llvm.x86.tdpbssd.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)
 
 ; Function Attrs: nounwind
-declare void @llvm.x86.tilestored64.internal(i16, i16, i8*, i64, x86_amx)
+declare void @llvm.x86.tilestored64.internal(i16, i16, ptr, i64, x86_amx)

diff  --git a/llvm/test/CodeGen/X86/AMX/amx-gemm.ll b/llvm/test/CodeGen/X86/AMX/amx-gemm.ll
index ad374394290aa..b8771d525a54b 100644
--- a/llvm/test/CodeGen/X86/AMX/amx-gemm.ll
+++ b/llvm/test/CodeGen/X86/AMX/amx-gemm.ll
@@ -25,7 +25,7 @@
 
 ; CHECK:  ldtilecfg
 
-define dso_local void @inner_product(i32* %A_mem, i32* %B_mem, i32* %C_mem, i32 %M, i32 %N, i32 %K) local_unnamed_addr {
+define dso_local void @inner_product(ptr %A_mem, ptr %B_mem, ptr %C_mem, i32 %M, i32 %N, i32 %K) local_unnamed_addr {
 entry:
   %mul = shl i32 %K, 4
   %conv = sext i32 %K to i64
@@ -57,10 +57,10 @@ for.cond3.preheader:                              ; preds = %for.cond.cleanup5,
   %i2 = trunc i64 %indvars.iv205 to i32
   %mul11 = mul i32 %mul, %i2
   %idx.ext = sext i32 %mul11 to i64
-  %add.ptr = getelementptr inbounds i32, i32* %A_mem, i64 %idx.ext
+  %add.ptr = getelementptr inbounds i32, ptr %A_mem, i64 %idx.ext
   %mul26 = mul i32 %mul25, %i2
   %idx.ext27 = sext i32 %mul26 to i64
-  %add.ptr28 = getelementptr inbounds i32, i32* %C_mem, i64 %idx.ext27
+  %add.ptr28 = getelementptr inbounds i32, ptr %C_mem, i64 %idx.ext27
   br i1 %cmp4173, label %for.body6, label %for.cond.cleanup5
 
 for.cond.cleanup:                                 ; preds = %for.cond.cleanup5, %entry
@@ -78,7 +78,7 @@ for.body6:                                        ; preds = %for.cond.cleanup9,
   br i1 %cmp8163, label %for.body10.preheader, label %for.cond.cleanup9
 
 for.body10.preheader:                             ; preds = %for.body6
-  %add.ptr19 = getelementptr inbounds i32, i32* %B_mem, i64 %i4
+  %add.ptr19 = getelementptr inbounds i32, ptr %B_mem, i64 %i4
   br i1 %i1, label %for.cond.cleanup9.loopexit.unr-lcssa, label %for.body10
 
 for.cond.cleanup9.loopexit.unr-lcssa:             ; preds = %for.body10, %for.body10.preheader
@@ -92,13 +92,11 @@ for.body10.epil:                                  ; preds = %for.body10.epil, %f
   %c.sroa.8127.2.in164.epil = phi x86_amx [ %i11, %for.body10.epil ], [ %c.sroa.8127.2.in164.unr, %for.cond.cleanup9.loopexit.unr-lcssa ]
   %epil.iter = phi i64 [ %epil.iter.sub, %for.body10.epil ], [ %xtraiter, %for.cond.cleanup9.loopexit.unr-lcssa ]
   %i5 = shl nsw i64 %indvars.iv.epil, 4
-  %add.ptr14.epil = getelementptr inbounds i32, i32* %add.ptr, i64 %i5
-  %i6 = bitcast i32* %add.ptr14.epil to i8*
-  %i7 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, i8* %i6, i64 %mul15)
+  %add.ptr14.epil = getelementptr inbounds i32, ptr %add.ptr, i64 %i5
+  %i7 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr %add.ptr14.epil, i64 %mul15)
   %i8 = mul nsw i64 %i5, %conv23
-  %add.ptr22.epil = getelementptr inbounds i32, i32* %add.ptr19, i64 %i8
-  %i9 = bitcast i32* %add.ptr22.epil to i8*
-  %i10 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, i8* %i9, i64 %mul24)
+  %add.ptr22.epil = getelementptr inbounds i32, ptr %add.ptr19, i64 %i8
+  %i10 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr %add.ptr22.epil, i64 %mul24)
   %i11 = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 16, i16 64, i16 64, x86_amx %c.sroa.8127.2.in164.epil, x86_amx %i7, x86_amx %i10)
   %indvars.iv.next.epil = add nuw nsw i64 %indvars.iv.epil, 1
   %epil.iter.sub = add i64 %epil.iter, -1
@@ -107,9 +105,8 @@ for.body10.epil:                                  ; preds = %for.body10.epil, %f
 
 for.cond.cleanup9:                                ; preds = %for.body10.epil, %for.cond.cleanup9.loopexit.unr-lcssa, %for.body6
   %c.sroa.8127.2.in.lcssa = phi x86_amx [ %i3, %for.body6 ], [ %.lcssa.ph, %for.cond.cleanup9.loopexit.unr-lcssa ], [ %i11, %for.body10.epil ]
-  %add.ptr31 = getelementptr inbounds i32, i32* %add.ptr28, i64 %i4
-  %i12 = bitcast i32* %add.ptr31 to i8*
-  tail call void @llvm.x86.tilestored64.internal(i16 16, i16 64, i8* %i12, i64 %mul24, x86_amx %c.sroa.8127.2.in.lcssa)
+  %add.ptr31 = getelementptr inbounds i32, ptr %add.ptr28, i64 %i4
+  tail call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr %add.ptr31, i64 %mul24, x86_amx %c.sroa.8127.2.in.lcssa)
   %indvars.iv.next200 = add nuw nsw i64 %indvars.iv199, 1
   %exitcond204.not = icmp eq i64 %indvars.iv.next200, %wide.trip.count203
   br i1 %exitcond204.not, label %for.cond.cleanup5, label %for.body6
@@ -119,83 +116,67 @@ for.body10:                                       ; preds = %for.body10, %for.bo
   %c.sroa.8127.2.in164 = phi x86_amx [ %i68, %for.body10 ], [ %i3, %for.body10.preheader ]
   %niter = phi i64 [ %niter.nsub.7, %for.body10 ], [ %unroll_iter, %for.body10.preheader ]
   %i13 = shl nsw i64 %indvars.iv, 4
-  %add.ptr14 = getelementptr inbounds i32, i32* %add.ptr, i64 %i13
-  %i14 = bitcast i32* %add.ptr14 to i8*
-  %i15 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, i8* %i14, i64 %mul15)
+  %add.ptr14 = getelementptr inbounds i32, ptr %add.ptr, i64 %i13
+  %i15 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr %add.ptr14, i64 %mul15)
   %i16 = mul nsw i64 %i13, %conv23
-  %add.ptr22 = getelementptr inbounds i32, i32* %add.ptr19, i64 %i16
-  %i17 = bitcast i32* %add.ptr22 to i8*
-  %i18 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, i8* %i17, i64 %mul24)
+  %add.ptr22 = getelementptr inbounds i32, ptr %add.ptr19, i64 %i16
+  %i18 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr %add.ptr22, i64 %mul24)
   %i19 = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 16, i16 64, i16 64, x86_amx %c.sroa.8127.2.in164, x86_amx %i15, x86_amx %i18)
   %indvars.iv.next = shl i64 %indvars.iv, 4
   %i20 = or i64 %indvars.iv.next, 16
-  %add.ptr14.1 = getelementptr inbounds i32, i32* %add.ptr, i64 %i20
-  %i21 = bitcast i32* %add.ptr14.1 to i8*
-  %i22 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, i8* nonnull %i21, i64 %mul15)
+  %add.ptr14.1 = getelementptr inbounds i32, ptr %add.ptr, i64 %i20
+  %i22 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr nonnull %add.ptr14.1, i64 %mul15)
   %i23 = mul nsw i64 %i20, %conv23
-  %add.ptr22.1 = getelementptr inbounds i32, i32* %add.ptr19, i64 %i23
-  %i24 = bitcast i32* %add.ptr22.1 to i8*
-  %i25 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, i8* nonnull %i24, i64 %mul24)
+  %add.ptr22.1 = getelementptr inbounds i32, ptr %add.ptr19, i64 %i23
+  %i25 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr nonnull %add.ptr22.1, i64 %mul24)
   %i26 = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 16, i16 64, i16 64, x86_amx %i19, x86_amx %i22, x86_amx %i25)
   %indvars.iv.next.1 = shl i64 %indvars.iv, 4
   %i27 = or i64 %indvars.iv.next.1, 32
-  %add.ptr14.2 = getelementptr inbounds i32, i32* %add.ptr, i64 %i27
-  %i28 = bitcast i32* %add.ptr14.2 to i8*
-  %i29 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, i8* nonnull %i28, i64 %mul15)
+  %add.ptr14.2 = getelementptr inbounds i32, ptr %add.ptr, i64 %i27
+  %i29 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr nonnull %add.ptr14.2, i64 %mul15)
   %i30 = mul nsw i64 %i27, %conv23
-  %add.ptr22.2 = getelementptr inbounds i32, i32* %add.ptr19, i64 %i30
-  %i31 = bitcast i32* %add.ptr22.2 to i8*
-  %i32 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, i8* nonnull %i31, i64 %mul24)
+  %add.ptr22.2 = getelementptr inbounds i32, ptr %add.ptr19, i64 %i30
+  %i32 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr nonnull %add.ptr22.2, i64 %mul24)
   %i33 = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 16, i16 64, i16 64, x86_amx %i26, x86_amx %i29, x86_amx %i32)
   %indvars.iv.next.2 = shl i64 %indvars.iv, 4
   %i34 = or i64 %indvars.iv.next.2, 48
-  %add.ptr14.3 = getelementptr inbounds i32, i32* %add.ptr, i64 %i34
-  %i35 = bitcast i32* %add.ptr14.3 to i8*
-  %i36 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, i8* nonnull %i35, i64 %mul15)
+  %add.ptr14.3 = getelementptr inbounds i32, ptr %add.ptr, i64 %i34
+  %i36 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr nonnull %add.ptr14.3, i64 %mul15)
   %i37 = mul nsw i64 %i34, %conv23
-  %add.ptr22.3 = getelementptr inbounds i32, i32* %add.ptr19, i64 %i37
-  %i38 = bitcast i32* %add.ptr22.3 to i8*
-  %i39 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, i8* nonnull %i38, i64 %mul24)
+  %add.ptr22.3 = getelementptr inbounds i32, ptr %add.ptr19, i64 %i37
+  %i39 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr nonnull %add.ptr22.3, i64 %mul24)
   %i40 = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 16, i16 64, i16 64, x86_amx %i33, x86_amx %i36, x86_amx %i39)
   %indvars.iv.next.3 = shl i64 %indvars.iv, 4
   %i41 = or i64 %indvars.iv.next.3, 64
-  %add.ptr14.4 = getelementptr inbounds i32, i32* %add.ptr, i64 %i41
-  %i42 = bitcast i32* %add.ptr14.4 to i8*
-  %i43 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, i8* nonnull %i42, i64 %mul15)
+  %add.ptr14.4 = getelementptr inbounds i32, ptr %add.ptr, i64 %i41
+  %i43 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr nonnull %add.ptr14.4, i64 %mul15)
   %i44 = mul nsw i64 %i41, %conv23
-  %add.ptr22.4 = getelementptr inbounds i32, i32* %add.ptr19, i64 %i44
-  %i45 = bitcast i32* %add.ptr22.4 to i8*
-  %i46 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, i8* nonnull %i45, i64 %mul24)
+  %add.ptr22.4 = getelementptr inbounds i32, ptr %add.ptr19, i64 %i44
+  %i46 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr nonnull %add.ptr22.4, i64 %mul24)
   %i47 = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 16, i16 64, i16 64, x86_amx %i40, x86_amx %i43, x86_amx %i46)
   %indvars.iv.next.4 = shl i64 %indvars.iv, 4
   %i48 = or i64 %indvars.iv.next.4, 80
-  %add.ptr14.5 = getelementptr inbounds i32, i32* %add.ptr, i64 %i48
-  %i49 = bitcast i32* %add.ptr14.5 to i8*
-  %i50 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, i8* nonnull %i49, i64 %mul15)
+  %add.ptr14.5 = getelementptr inbounds i32, ptr %add.ptr, i64 %i48
+  %i50 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr nonnull %add.ptr14.5, i64 %mul15)
   %i51 = mul nsw i64 %i48, %conv23
-  %add.ptr22.5 = getelementptr inbounds i32, i32* %add.ptr19, i64 %i51
-  %i52 = bitcast i32* %add.ptr22.5 to i8*
-  %i53 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, i8* nonnull %i52, i64 %mul24)
+  %add.ptr22.5 = getelementptr inbounds i32, ptr %add.ptr19, i64 %i51
+  %i53 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr nonnull %add.ptr22.5, i64 %mul24)
   %i54 = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 16, i16 64, i16 64, x86_amx %i47, x86_amx %i50, x86_amx %i53)
   %indvars.iv.next.5 = shl i64 %indvars.iv, 4
   %i55 = or i64 %indvars.iv.next.5, 96
-  %add.ptr14.6 = getelementptr inbounds i32, i32* %add.ptr, i64 %i55
-  %i56 = bitcast i32* %add.ptr14.6 to i8*
-  %i57 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, i8* nonnull %i56, i64 %mul15)
+  %add.ptr14.6 = getelementptr inbounds i32, ptr %add.ptr, i64 %i55
+  %i57 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr nonnull %add.ptr14.6, i64 %mul15)
   %i58 = mul nsw i64 %i55, %conv23
-  %add.ptr22.6 = getelementptr inbounds i32, i32* %add.ptr19, i64 %i58
-  %i59 = bitcast i32* %add.ptr22.6 to i8*
-  %i60 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, i8* nonnull %i59, i64 %mul24)
+  %add.ptr22.6 = getelementptr inbounds i32, ptr %add.ptr19, i64 %i58
+  %i60 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr nonnull %add.ptr22.6, i64 %mul24)
   %i61 = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 16, i16 64, i16 64, x86_amx %i54, x86_amx %i57, x86_amx %i60)
   %indvars.iv.next.6 = shl i64 %indvars.iv, 4
   %i62 = or i64 %indvars.iv.next.6, 112
-  %add.ptr14.7 = getelementptr inbounds i32, i32* %add.ptr, i64 %i62
-  %i63 = bitcast i32* %add.ptr14.7 to i8*
-  %i64 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, i8* nonnull %i63, i64 %mul15)
+  %add.ptr14.7 = getelementptr inbounds i32, ptr %add.ptr, i64 %i62
+  %i64 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr nonnull %add.ptr14.7, i64 %mul15)
   %i65 = mul nsw i64 %i62, %conv23
-  %add.ptr22.7 = getelementptr inbounds i32, i32* %add.ptr19, i64 %i65
-  %i66 = bitcast i32* %add.ptr22.7 to i8*
-  %i67 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, i8* nonnull %i66, i64 %mul24)
+  %add.ptr22.7 = getelementptr inbounds i32, ptr %add.ptr19, i64 %i65
+  %i67 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr nonnull %add.ptr22.7, i64 %mul24)
   %i68 = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 16, i16 64, i16 64, x86_amx %i61, x86_amx %i64, x86_amx %i67)
   %indvars.iv.next.7 = add nuw nsw i64 %indvars.iv, 8
   %niter.nsub.7 = add i64 %niter, -8
@@ -204,6 +185,6 @@ for.body10:                                       ; preds = %for.body10, %for.bo
 }
 
 declare x86_amx @llvm.x86.tilezero.internal(i16, i16)
-declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, i8*, i64)
+declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, ptr, i64)
 declare x86_amx @llvm.x86.tdpbssd.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)
-declare void @llvm.x86.tilestored64.internal(i16, i16, i8*, i64, x86_amx)
+declare void @llvm.x86.tilestored64.internal(i16, i16, ptr, i64, x86_amx)

diff  --git a/llvm/test/CodeGen/X86/AMX/amx-type.ll b/llvm/test/CodeGen/X86/AMX/amx-type.ll
index ddf650525baaa..1d9af2b13cdfd 100644
--- a/llvm/test/CodeGen/X86/AMX/amx-type.ll
+++ b/llvm/test/CodeGen/X86/AMX/amx-type.ll
@@ -7,14 +7,14 @@
 @buf2 = dso_local global [1024 x i8] zeroinitializer, align 64
 
 ; test bitcast x86_amx to <256 x i32>
-define dso_local void @test_user_empty(i16 %m, i16 %n, i8 *%buf, i64 %s) {
+define dso_local void @test_user_empty(i16 %m, i16 %n, ptr%buf, i64 %s) {
 ; CHECK-LABEL: @test_user_empty(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[T1:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[N:%.*]], i8* [[BUF:%.*]], i64 [[S:%.*]])
+; CHECK-NEXT:    [[T1:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[N:%.*]], ptr [[BUF:%.*]], i64 [[S:%.*]])
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %t1 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %m, i16 %n, i8* %buf, i64 %s)
+  %t1 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %m, i16 %n, ptr %buf, i64 %s)
   %t2 = bitcast x86_amx %t1 to <256 x i32>
   ret void
 }
@@ -30,310 +30,274 @@ entry:
   ret void
 }
 
-define dso_local <256 x i32> @test_amx_load_bitcast(<256 x i32>* %in, i16 %m, i16 %n, i8 *%buf, i64 %s) {
+define dso_local <256 x i32> @test_amx_load_bitcast(ptr %in, i16 %m, i16 %n, ptr%buf, i64 %s) {
 ; CHECK-LABEL: @test_amx_load_bitcast(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[T1:%.*]] = load <256 x i32>, <256 x i32>* [[IN:%.*]], align 64
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast <256 x i32>* [[IN]] to i8*
-; CHECK-NEXT:    [[TMP1:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[N:%.*]], i8* [[TMP0]], i64 64)
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[N]], i8* [[BUF:%.*]], i64 [[S:%.*]], x86_amx [[TMP1]])
+; CHECK-NEXT:    [[T1:%.*]] = load <256 x i32>, ptr [[IN:%.*]], align 64
+; CHECK-NEXT:    [[TMP0:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[N:%.*]], ptr [[IN]], i64 64)
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[N]], ptr [[BUF:%.*]], i64 [[S:%.*]], x86_amx [[TMP0]])
 ; CHECK-NEXT:    ret <256 x i32> [[T1]]
 ;
 entry:
-  %t1 = load <256 x i32>, <256 x i32>* %in, align 64
+  %t1 = load <256 x i32>, ptr %in, align 64
   %t2 = bitcast <256 x i32> %t1 to x86_amx
-  call void @llvm.x86.tilestored64.internal(i16 %m, i16 %n, i8* %buf, i64 %s, x86_amx %t2)
+  call void @llvm.x86.tilestored64.internal(i16 %m, i16 %n, ptr %buf, i64 %s, x86_amx %t2)
   ret <256 x i32> %t1
 }
 
-define dso_local <256 x i32> @test_amx_bitcast_store(<256 x i32>* %out, i16 %m, i16 %n, i8 *%buf, i64 %s) {
+define dso_local <256 x i32> @test_amx_bitcast_store(ptr %out, i16 %m, i16 %n, ptr%buf, i64 %s) {
 ; CHECK-LABEL: @test_amx_bitcast_store(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[T1:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[M]], i8* [[BUF:%.*]], i64 [[S:%.*]])
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast <256 x i32>* [[OUT:%.*]] to i8*
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[M]], i8* [[TMP0]], i64 64, x86_amx [[T1]])
-; CHECK-NEXT:    [[TMP1:%.*]] = load <256 x i32>, <256 x i32>* [[OUT]], align 1024
-; CHECK-NEXT:    ret <256 x i32> [[TMP1]]
+; CHECK-NEXT:    [[T1:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[M]], ptr [[BUF:%.*]], i64 [[S:%.*]])
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[M]], ptr [[OUT:%.*]], i64 64, x86_amx [[T1]])
+; CHECK-NEXT:    [[TMP0:%.*]] = load <256 x i32>, ptr [[OUT]], align 1024
+; CHECK-NEXT:    ret <256 x i32> [[TMP0]]
 ;
 entry:
-  %t1 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %m, i16 %m, i8* %buf, i64 %s)
+  %t1 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %m, i16 %m, ptr %buf, i64 %s)
   %t2 = bitcast x86_amx %t1 to <256 x i32>
-  store <256 x i32> %t2, <256 x i32>* %out
+  store <256 x i32> %t2, ptr %out
   ret <256 x i32> %t2
 }
 
-define dso_local void @test_src_add(<256 x i32> %x, <256 x i32> %y, i16 %r, i16 %c, i8* %buf, i64 %s) {
+define dso_local void @test_src_add(<256 x i32> %x, <256 x i32> %y, i16 %r, i16 %c, ptr %buf, i64 %s) {
 ; CHECK-LABEL: @test_src_add(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = alloca <256 x i32>, align 64
 ; CHECK-NEXT:    [[ADD:%.*]] = add <256 x i32> [[Y:%.*]], [[X:%.*]]
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <256 x i32>* [[TMP0]] to i8*
-; CHECK-NEXT:    store <256 x i32> [[ADD]], <256 x i32>* [[TMP0]], align 1024
-; CHECK-NEXT:    [[TMP2:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[R:%.*]], i16 [[C:%.*]], i8* [[TMP1]], i64 64)
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[R]], i16 [[C]], i8* [[BUF:%.*]], i64 [[S:%.*]], x86_amx [[TMP2]])
+; CHECK-NEXT:    store <256 x i32> [[ADD]], ptr [[TMP0]], align 1024
+; CHECK-NEXT:    [[TMP1:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[R:%.*]], i16 [[C:%.*]], ptr [[TMP0]], i64 64)
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[R]], i16 [[C]], ptr [[BUF:%.*]], i64 [[S:%.*]], x86_amx [[TMP1]])
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %add = add <256 x i32> %y, %x
   %t = bitcast <256 x i32> %add to x86_amx
-  call void @llvm.x86.tilestored64.internal(i16 %r, i16 %c, i8* %buf, i64 %s, x86_amx %t)
+  call void @llvm.x86.tilestored64.internal(i16 %r, i16 %c, ptr %buf, i64 %s, x86_amx %t)
   ret void
 }
 
-define dso_local void @test_src_add2(<256 x i32> %x, i16 %r, i16 %c, i8* %buf, i64 %s) {
+define dso_local void @test_src_add2(<256 x i32> %x, i16 %r, i16 %c, ptr %buf, i64 %s) {
 ; CHECK-LABEL: @test_src_add2(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = alloca <256 x i32>, align 64
-; CHECK-NEXT:    [[T1:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[R:%.*]], i16 [[C:%.*]], i8* [[BUF:%.*]], i64 [[S:%.*]])
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <256 x i32>* [[TMP0]] to i8*
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[R]], i16 [[C]], i8* [[TMP1]], i64 64, x86_amx [[T1]])
-; CHECK-NEXT:    [[TMP2:%.*]] = load <256 x i32>, <256 x i32>* [[TMP0]], align 1024
-; CHECK-NEXT:    [[ADD:%.*]] = add <256 x i32> [[TMP2]], [[X:%.*]]
+; CHECK-NEXT:    [[T1:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[R:%.*]], i16 [[C:%.*]], ptr [[BUF:%.*]], i64 [[S:%.*]])
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[R]], i16 [[C]], ptr [[TMP0]], i64 64, x86_amx [[T1]])
+; CHECK-NEXT:    [[TMP1:%.*]] = load <256 x i32>, ptr [[TMP0]], align 1024
+; CHECK-NEXT:    [[ADD:%.*]] = add <256 x i32> [[TMP1]], [[X:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %t1 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %r, i16 %c, i8* %buf, i64 %s)
+  %t1 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %r, i16 %c, ptr %buf, i64 %s)
   %t2 = bitcast x86_amx %t1 to <256 x i32>
   %add = add <256 x i32> %t2, %x
   ret void
 }
 
-define dso_local void @test_load(i8* %in, i8* %out) local_unnamed_addr {
+define dso_local void @test_load(ptr %in, ptr %out) local_unnamed_addr {
 ; CHECK-LABEL: @test_load(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i8* [[IN:%.*]] to <256 x i32>*
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[OUT:%.*]] to <256 x i32>*
-; CHECK-NEXT:    [[TMP3:%.*]] = load <256 x i32>, <256 x i32>* [[TMP1]], align 64
-; CHECK-NEXT:    store <256 x i32> [[TMP3]], <256 x i32>* [[TMP2]], align 64
+; CHECK-NEXT:    [[TMP1:%.*]] = load <256 x i32>, ptr [[IN:%.*]], align 64
+; CHECK-NEXT:    store <256 x i32> [[TMP1]], ptr [[OUT:%.*]], align 64
 ; CHECK-NEXT:    ret void
 ;
-  %1 = bitcast i8* %in to <256 x i32>*
-  %2 = bitcast i8* %out to <256 x i32>*
-  %3 = load <256 x i32>, <256 x i32>* %1, align 64
-  store <256 x i32> %3, <256 x i32>* %2, align 64
+  %1 = load <256 x i32>, ptr %in, align 64
+  store <256 x i32> %1, ptr %out, align 64
   ret void
 }
 
-define dso_local <256 x i32> @foo(<256 x i32>* nocapture readonly byval(<256 x i32>) align 1024 %0, <256 x i32>* nocapture readonly byval(<256 x i32>) align 1024 %1) local_unnamed_addr {
+define dso_local <256 x i32> @foo(ptr nocapture readonly byval(<256 x i32>) align 1024 %0, ptr nocapture readonly byval(<256 x i32>) align 1024 %1) local_unnamed_addr {
 ; CHECK-LABEL: @foo(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[X:%.*]] = load <256 x i32>, <256 x i32>* [[TMP0:%.*]], align 1024
-; CHECK-NEXT:    [[Y:%.*]] = load <256 x i32>, <256 x i32>* [[TMP1:%.*]], align 1024
+; CHECK-NEXT:    [[X:%.*]] = load <256 x i32>, ptr [[TMP0:%.*]], align 1024
+; CHECK-NEXT:    [[Y:%.*]] = load <256 x i32>, ptr [[TMP1:%.*]], align 1024
 ; CHECK-NEXT:    [[ADD:%.*]] = add <256 x i32> [[Y]], [[X]]
 ; CHECK-NEXT:    ret <256 x i32> [[ADD]]
 ;
 entry:
-  %x = load <256 x i32>, <256 x i32>* %0, align 1024
-  %y = load <256 x i32>, <256 x i32>* %1, align 1024
+  %x = load <256 x i32>, ptr %0, align 1024
+  %y = load <256 x i32>, ptr %1, align 1024
   %add = add <256 x i32> %y, %x
   ret <256 x i32> %add
 }
 
-define dso_local void @__tile_loadd(%struct.__tile_str* nocapture %0, i8* %1, i64 %2) local_unnamed_addr {
+define dso_local void @__tile_loadd(ptr nocapture %0, ptr %1, i64 %2) local_unnamed_addr {
 ; CHECK-LABEL: @__tile_loadd(
-; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR:%.*]], %struct.__tile_str* [[TMP0:%.*]], i64 0, i32 0
-; CHECK-NEXT:    [[TMP5:%.*]] = load i16, i16* [[TMP4]], align 64
-; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], %struct.__tile_str* [[TMP0]], i64 0, i32 1
-; CHECK-NEXT:    [[TMP7:%.*]] = load i16, i16* [[TMP6]], align 2
-; CHECK-NEXT:    [[TMP8:%.*]] = shl i64 [[TMP2:%.*]], 32
-; CHECK-NEXT:    [[TMP9:%.*]] = ashr exact i64 [[TMP8]], 32
-; CHECK-NEXT:    [[TMP10:%.*]] = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP5]], i16 [[TMP7]], i8* [[TMP1:%.*]], i64 [[TMP9]])
-; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], %struct.__tile_str* [[TMP0]], i64 0, i32 2
-; CHECK-NEXT:    [[TMP12:%.*]] = bitcast <256 x i32>* [[TMP11]] to i8*
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[TMP5]], i16 [[TMP7]], i8* [[TMP12]], i64 64, x86_amx [[TMP10]])
+; CHECK-NEXT:    [[TMP4:%.*]] = load i16, ptr [[TMP0:%.*]], align 64
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR:%.*]], ptr [[TMP0]], i64 0, i32 1
+; CHECK-NEXT:    [[TMP6:%.*]] = load i16, ptr [[TMP5]], align 2
+; CHECK-NEXT:    [[TMP7:%.*]] = shl i64 [[TMP2:%.*]], 32
+; CHECK-NEXT:    [[TMP8:%.*]] = ashr exact i64 [[TMP7]], 32
+; CHECK-NEXT:    [[TMP9:%.*]] = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP4]], i16 [[TMP6]], ptr [[TMP1:%.*]], i64 [[TMP8]])
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], ptr [[TMP0]], i64 0, i32 2
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[TMP4]], i16 [[TMP6]], ptr [[TMP10]], i64 64, x86_amx [[TMP9]])
 ; CHECK-NEXT:    ret void
 ;
-  %4 = getelementptr inbounds %struct.__tile_str, %struct.__tile_str* %0, i64 0, i32 0
-  %5 = load i16, i16* %4, align 64
-  %6 = getelementptr inbounds %struct.__tile_str, %struct.__tile_str* %0, i64 0, i32 1
-  %7 = load i16, i16* %6, align 2
-  %8 = shl i64 %2, 32
-  %9 = ashr exact i64 %8, 32
-  %10 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %5, i16 %7, i8* %1, i64 %9)
-  %11 = bitcast x86_amx %10 to <256 x i32>
-  %12 = getelementptr inbounds %struct.__tile_str, %struct.__tile_str* %0, i64 0, i32 2
-  store <256 x i32> %11, <256 x i32>* %12, align 64
+  %4 = load i16, ptr %0, align 64
+  %5 = getelementptr inbounds %struct.__tile_str, ptr %0, i64 0, i32 1
+  %6 = load i16, ptr %5, align 2
+  %7 = shl i64 %2, 32
+  %8 = ashr exact i64 %7, 32
+  %9 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %4, i16 %6, ptr %1, i64 %8)
+  %10 = bitcast x86_amx %9 to <256 x i32>
+  %11 = getelementptr inbounds %struct.__tile_str, ptr %0, i64 0, i32 2
+  store <256 x i32> %10, ptr %11, align 64
   ret void
 }
 
-define dso_local void @__tile_dpbssd(%struct.__tile_str* nocapture %0, %struct.__tile_str* nocapture readonly byval(%struct.__tile_str) align 64 %1, %struct.__tile_str* nocapture readonly byval(%struct.__tile_str) align 64 %2) local_unnamed_addr {
+define dso_local void @__tile_dpbssd(ptr nocapture %0, ptr nocapture readonly byval(%struct.__tile_str) align 64 %1, ptr nocapture readonly byval(%struct.__tile_str) align 64 %2) local_unnamed_addr {
 ; CHECK-LABEL: @__tile_dpbssd(
-; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR:%.*]], %struct.__tile_str* [[TMP1:%.*]], i64 0, i32 0
-; CHECK-NEXT:    [[TMP5:%.*]] = load i16, i16* [[TMP4]], align 64
-; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], %struct.__tile_str* [[TMP2:%.*]], i64 0, i32 1
-; CHECK-NEXT:    [[TMP7:%.*]] = load i16, i16* [[TMP6]], align 2
-; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], %struct.__tile_str* [[TMP1]], i64 0, i32 1
-; CHECK-NEXT:    [[TMP9:%.*]] = load i16, i16* [[TMP8]], align 2
-; CHECK-NEXT:    [[TMP10:%.*]] = udiv i16 [[TMP9]], 4
-; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], %struct.__tile_str* [[TMP0:%.*]], i64 0, i32 2
-; CHECK-NEXT:    [[TMP12:%.*]] = bitcast <256 x i32>* [[TMP11]] to i8*
-; CHECK-NEXT:    [[TMP13:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP5]], i16 [[TMP7]], i8* [[TMP12]], i64 64)
-; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], %struct.__tile_str* [[TMP1]], i64 0, i32 2
-; CHECK-NEXT:    [[TMP15:%.*]] = bitcast <256 x i32>* [[TMP14]] to i8*
-; CHECK-NEXT:    [[TMP16:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP5]], i16 [[TMP9]], i8* [[TMP15]], i64 64)
-; CHECK-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], %struct.__tile_str* [[TMP2]], i64 0, i32 2
-; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <256 x i32>* [[TMP17]] to i8*
-; CHECK-NEXT:    [[TMP19:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP10]], i16 [[TMP7]], i8* [[TMP18]], i64 64)
-; CHECK-NEXT:    [[TMP20:%.*]] = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 [[TMP5]], i16 [[TMP7]], i16 [[TMP9]], x86_amx [[TMP13]], x86_amx [[TMP16]], x86_amx [[TMP19]])
-; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <256 x i32>* [[TMP11]] to i8*
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[TMP5]], i16 [[TMP7]], i8* [[TMP21]], i64 64, x86_amx [[TMP20]])
+; CHECK-NEXT:    [[TMP4:%.*]] = load i16, ptr [[TMP1:%.*]], align 64
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR:%.*]], ptr [[TMP2:%.*]], i64 0, i32 1
+; CHECK-NEXT:    [[TMP6:%.*]] = load i16, ptr [[TMP5]], align 2
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], ptr [[TMP1]], i64 0, i32 1
+; CHECK-NEXT:    [[TMP8:%.*]] = load i16, ptr [[TMP7]], align 2
+; CHECK-NEXT:    [[TMP9:%.*]] = udiv i16 [[TMP8]], 4
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], ptr [[TMP0:%.*]], i64 0, i32 2
+; CHECK-NEXT:    [[TMP11:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP4]], i16 [[TMP6]], ptr [[TMP10]], i64 64)
+; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], ptr [[TMP1]], i64 0, i32 2
+; CHECK-NEXT:    [[TMP13:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP4]], i16 [[TMP8]], ptr [[TMP12]], i64 64)
+; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], ptr [[TMP2]], i64 0, i32 2
+; CHECK-NEXT:    [[TMP15:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP9]], i16 [[TMP6]], ptr [[TMP14]], i64 64)
+; CHECK-NEXT:    [[TMP16:%.*]] = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 [[TMP4]], i16 [[TMP6]], i16 [[TMP8]], x86_amx [[TMP11]], x86_amx [[TMP13]], x86_amx [[TMP15]])
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[TMP4]], i16 [[TMP6]], ptr [[TMP10]], i64 64, x86_amx [[TMP16]])
 ; CHECK-NEXT:    ret void
 ;
-  %4 = getelementptr inbounds %struct.__tile_str, %struct.__tile_str* %1, i64 0, i32 0
-  %5 = load i16, i16* %4, align 64
-  %6 = getelementptr inbounds %struct.__tile_str, %struct.__tile_str* %2, i64 0, i32 1
-  %7 = load i16, i16* %6, align 2
-  %8 = getelementptr inbounds %struct.__tile_str, %struct.__tile_str* %1, i64 0, i32 1
-  %9 = load i16, i16* %8, align 2
-  %10 = getelementptr inbounds %struct.__tile_str, %struct.__tile_str* %0, i64 0, i32 2
-  %11 = load <256 x i32>, <256 x i32>* %10, align 64
-  %12 = bitcast <256 x i32> %11 to x86_amx
-  %13 = getelementptr inbounds %struct.__tile_str, %struct.__tile_str* %1, i64 0, i32 2
-  %14 = load <256 x i32>, <256 x i32>* %13, align 64
-  %15 = bitcast <256 x i32> %14 to x86_amx
-  %16 = getelementptr inbounds %struct.__tile_str, %struct.__tile_str* %2, i64 0, i32 2
-  %17 = load <256 x i32>, <256 x i32>* %16, align 64
-  %18 = bitcast <256 x i32> %17 to x86_amx
-  %19 = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 %5, i16 %7, i16 %9, x86_amx %12, x86_amx %15, x86_amx %18)
-  %20 = bitcast x86_amx %19 to <256 x i32>
-  store <256 x i32> %20, <256 x i32>* %10, align 64
+  %4 = load i16, ptr %1, align 64
+  %5 = getelementptr inbounds %struct.__tile_str, ptr %2, i64 0, i32 1
+  %6 = load i16, ptr %5, align 2
+  %7 = getelementptr inbounds %struct.__tile_str, ptr %1, i64 0, i32 1
+  %8 = load i16, ptr %7, align 2
+  %9 = getelementptr inbounds %struct.__tile_str, ptr %0, i64 0, i32 2
+  %10 = load <256 x i32>, ptr %9, align 64
+  %11 = bitcast <256 x i32> %10 to x86_amx
+  %12 = getelementptr inbounds %struct.__tile_str, ptr %1, i64 0, i32 2
+  %13 = load <256 x i32>, ptr %12, align 64
+  %14 = bitcast <256 x i32> %13 to x86_amx
+  %15 = getelementptr inbounds %struct.__tile_str, ptr %2, i64 0, i32 2
+  %16 = load <256 x i32>, ptr %15, align 64
+  %17 = bitcast <256 x i32> %16 to x86_amx
+  %18 = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 %4, i16 %6, i16 %8, x86_amx %11, x86_amx %14, x86_amx %17)
+  %19 = bitcast x86_amx %18 to <256 x i32>
+  store <256 x i32> %19, ptr %9, align 64
   ret void
 }
 
-define dso_local void @__tile_dpbsud(i16 %m, i16 %n, i16 %k, <256 x i32>* %pc, <256 x i32>* %pa, <256 x i32>* %pb) {
+define dso_local void @__tile_dpbsud(i16 %m, i16 %n, i16 %k, ptr %pc, ptr %pa, ptr %pb) {
 ; CHECK-LABEL: @__tile_dpbsud(
 ; CHECK-NEXT:    [[TMP1:%.*]] = udiv i16 [[K:%.*]], 4
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <256 x i32>* [[PA:%.*]] to i8*
-; CHECK-NEXT:    [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[K]], i8* [[TMP2]], i64 64)
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <256 x i32>* [[PB:%.*]] to i8*
-; CHECK-NEXT:    [[TMP5:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP1]], i16 [[N:%.*]], i8* [[TMP4]], i64 64)
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <256 x i32>* [[PC:%.*]] to i8*
-; CHECK-NEXT:    [[TMP7:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M]], i16 [[N]], i8* [[TMP6]], i64 64)
-; CHECK-NEXT:    [[T6:%.*]] = tail call x86_amx @llvm.x86.tdpbsud.internal(i16 [[M]], i16 [[N]], i16 [[K]], x86_amx [[TMP7]], x86_amx [[TMP3]], x86_amx [[TMP5]])
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <256 x i32>* [[PC]] to i8*
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[N]], i8* [[TMP8]], i64 64, x86_amx [[T6]])
+; CHECK-NEXT:    [[TMP2:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[K]], ptr [[PA:%.*]], i64 64)
+; CHECK-NEXT:    [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP1]], i16 [[N:%.*]], ptr [[PB:%.*]], i64 64)
+; CHECK-NEXT:    [[TMP4:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M]], i16 [[N]], ptr [[PC:%.*]], i64 64)
+; CHECK-NEXT:    [[T6:%.*]] = tail call x86_amx @llvm.x86.tdpbsud.internal(i16 [[M]], i16 [[N]], i16 [[K]], x86_amx [[TMP4]], x86_amx [[TMP2]], x86_amx [[TMP3]])
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[N]], ptr [[PC]], i64 64, x86_amx [[T6]])
 ; CHECK-NEXT:    ret void
 ;
-  %t0 = load <256 x i32>, <256 x i32>* %pa, align 64
+  %t0 = load <256 x i32>, ptr %pa, align 64
   %t1 = bitcast <256 x i32> %t0 to x86_amx
-  %t2 = load <256 x i32>, <256 x i32>* %pb, align 64
+  %t2 = load <256 x i32>, ptr %pb, align 64
   %t3 = bitcast <256 x i32> %t2 to x86_amx
-  %t4 = load <256 x i32>, <256 x i32>* %pc, align 64
+  %t4 = load <256 x i32>, ptr %pc, align 64
   %t5 = bitcast <256 x i32> %t4 to x86_amx
   %t6 = tail call x86_amx @llvm.x86.tdpbsud.internal(i16 %m, i16 %n, i16 %k, x86_amx %t5, x86_amx %t1, x86_amx %t3)
   %t7 = bitcast x86_amx %t6 to <256 x i32>
-  store <256 x i32> %t7, <256 x i32>* %pc, align 64
+  store <256 x i32> %t7, ptr %pc, align 64
   ret void
 }
 
-define dso_local void @__tile_dpbusd(i16 %m, i16 %n, i16 %k, <256 x i32>* %pc, <256 x i32>* %pa, <256 x i32>* %pb) {
+define dso_local void @__tile_dpbusd(i16 %m, i16 %n, i16 %k, ptr %pc, ptr %pa, ptr %pb) {
 ; CHECK-LABEL: @__tile_dpbusd(
 ; CHECK-NEXT:    [[TMP1:%.*]] = udiv i16 [[K:%.*]], 4
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <256 x i32>* [[PA:%.*]] to i8*
-; CHECK-NEXT:    [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[K]], i8* [[TMP2]], i64 64)
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <256 x i32>* [[PB:%.*]] to i8*
-; CHECK-NEXT:    [[TMP5:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP1]], i16 [[N:%.*]], i8* [[TMP4]], i64 64)
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <256 x i32>* [[PC:%.*]] to i8*
-; CHECK-NEXT:    [[TMP7:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M]], i16 [[N]], i8* [[TMP6]], i64 64)
-; CHECK-NEXT:    [[T6:%.*]] = tail call x86_amx @llvm.x86.tdpbusd.internal(i16 [[M]], i16 [[N]], i16 [[K]], x86_amx [[TMP7]], x86_amx [[TMP3]], x86_amx [[TMP5]])
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <256 x i32>* [[PC]] to i8*
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[N]], i8* [[TMP8]], i64 64, x86_amx [[T6]])
+; CHECK-NEXT:    [[TMP2:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[K]], ptr [[PA:%.*]], i64 64)
+; CHECK-NEXT:    [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP1]], i16 [[N:%.*]], ptr [[PB:%.*]], i64 64)
+; CHECK-NEXT:    [[TMP4:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M]], i16 [[N]], ptr [[PC:%.*]], i64 64)
+; CHECK-NEXT:    [[T6:%.*]] = tail call x86_amx @llvm.x86.tdpbusd.internal(i16 [[M]], i16 [[N]], i16 [[K]], x86_amx [[TMP4]], x86_amx [[TMP2]], x86_amx [[TMP3]])
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[N]], ptr [[PC]], i64 64, x86_amx [[T6]])
 ; CHECK-NEXT:    ret void
 ;
-  %t0 = load <256 x i32>, <256 x i32>* %pa, align 64
+  %t0 = load <256 x i32>, ptr %pa, align 64
   %t1 = bitcast <256 x i32> %t0 to x86_amx
-  %t2 = load <256 x i32>, <256 x i32>* %pb, align 64
+  %t2 = load <256 x i32>, ptr %pb, align 64
   %t3 = bitcast <256 x i32> %t2 to x86_amx
-  %t4 = load <256 x i32>, <256 x i32>* %pc, align 64
+  %t4 = load <256 x i32>, ptr %pc, align 64
   %t5 = bitcast <256 x i32> %t4 to x86_amx
   %t6 = tail call x86_amx @llvm.x86.tdpbusd.internal(i16 %m, i16 %n, i16 %k, x86_amx %t5, x86_amx %t1, x86_amx %t3)
   %t7 = bitcast x86_amx %t6 to <256 x i32>
-  store <256 x i32> %t7, <256 x i32>* %pc, align 64
+  store <256 x i32> %t7, ptr %pc, align 64
   ret void
 }
 
-define dso_local void @__tile_dpbuud(i16 %m, i16 %n, i16 %k, <256 x i32>* %pc, <256 x i32>* %pa, <256 x i32>* %pb) {
+define dso_local void @__tile_dpbuud(i16 %m, i16 %n, i16 %k, ptr %pc, ptr %pa, ptr %pb) {
 ; CHECK-LABEL: @__tile_dpbuud(
 ; CHECK-NEXT:    [[TMP1:%.*]] = udiv i16 [[K:%.*]], 4
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <256 x i32>* [[PA:%.*]] to i8*
-; CHECK-NEXT:    [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[K]], i8* [[TMP2]], i64 64)
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <256 x i32>* [[PB:%.*]] to i8*
-; CHECK-NEXT:    [[TMP5:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP1]], i16 [[N:%.*]], i8* [[TMP4]], i64 64)
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <256 x i32>* [[PC:%.*]] to i8*
-; CHECK-NEXT:    [[TMP7:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M]], i16 [[N]], i8* [[TMP6]], i64 64)
-; CHECK-NEXT:    [[T6:%.*]] = tail call x86_amx @llvm.x86.tdpbuud.internal(i16 [[M]], i16 [[N]], i16 [[K]], x86_amx [[TMP7]], x86_amx [[TMP3]], x86_amx [[TMP5]])
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <256 x i32>* [[PC]] to i8*
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[N]], i8* [[TMP8]], i64 64, x86_amx [[T6]])
+; CHECK-NEXT:    [[TMP2:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[K]], ptr [[PA:%.*]], i64 64)
+; CHECK-NEXT:    [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP1]], i16 [[N:%.*]], ptr [[PB:%.*]], i64 64)
+; CHECK-NEXT:    [[TMP4:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M]], i16 [[N]], ptr [[PC:%.*]], i64 64)
+; CHECK-NEXT:    [[T6:%.*]] = tail call x86_amx @llvm.x86.tdpbuud.internal(i16 [[M]], i16 [[N]], i16 [[K]], x86_amx [[TMP4]], x86_amx [[TMP2]], x86_amx [[TMP3]])
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[N]], ptr [[PC]], i64 64, x86_amx [[T6]])
 ; CHECK-NEXT:    ret void
 ;
-  %t0 = load <256 x i32>, <256 x i32>* %pa, align 64
+  %t0 = load <256 x i32>, ptr %pa, align 64
   %t1 = bitcast <256 x i32> %t0 to x86_amx
-  %t2 = load <256 x i32>, <256 x i32>* %pb, align 64
+  %t2 = load <256 x i32>, ptr %pb, align 64
   %t3 = bitcast <256 x i32> %t2 to x86_amx
-  %t4 = load <256 x i32>, <256 x i32>* %pc, align 64
+  %t4 = load <256 x i32>, ptr %pc, align 64
   %t5 = bitcast <256 x i32> %t4 to x86_amx
   %t6 = tail call x86_amx @llvm.x86.tdpbuud.internal(i16 %m, i16 %n, i16 %k, x86_amx %t5, x86_amx %t1, x86_amx %t3)
   %t7 = bitcast x86_amx %t6 to <256 x i32>
-  store <256 x i32> %t7, <256 x i32>* %pc, align 64
+  store <256 x i32> %t7, ptr %pc, align 64
   ret void
 }
 
-define dso_local void @__tile_dpbf16ps(i16 %m, i16 %n, i16 %k, <256 x i32>* %pc, <256 x i32>* %pa, <256 x i32>* %pb) {
+define dso_local void @__tile_dpbf16ps(i16 %m, i16 %n, i16 %k, ptr %pc, ptr %pa, ptr %pb) {
 ; CHECK-LABEL: @__tile_dpbf16ps(
 ; CHECK-NEXT:    [[TMP1:%.*]] = udiv i16 [[K:%.*]], 4
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <256 x i32>* [[PA:%.*]] to i8*
-; CHECK-NEXT:    [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[K]], i8* [[TMP2]], i64 64)
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <256 x i32>* [[PB:%.*]] to i8*
-; CHECK-NEXT:    [[TMP5:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP1]], i16 [[N:%.*]], i8* [[TMP4]], i64 64)
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <256 x i32>* [[PC:%.*]] to i8*
-; CHECK-NEXT:    [[TMP7:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M]], i16 [[N]], i8* [[TMP6]], i64 64)
-; CHECK-NEXT:    [[T6:%.*]] = tail call x86_amx @llvm.x86.tdpbf16ps.internal(i16 [[M]], i16 [[N]], i16 [[K]], x86_amx [[TMP7]], x86_amx [[TMP3]], x86_amx [[TMP5]])
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <256 x i32>* [[PC]] to i8*
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[N]], i8* [[TMP8]], i64 64, x86_amx [[T6]])
+; CHECK-NEXT:    [[TMP2:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[K]], ptr [[PA:%.*]], i64 64)
+; CHECK-NEXT:    [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP1]], i16 [[N:%.*]], ptr [[PB:%.*]], i64 64)
+; CHECK-NEXT:    [[TMP4:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M]], i16 [[N]], ptr [[PC:%.*]], i64 64)
+; CHECK-NEXT:    [[T6:%.*]] = tail call x86_amx @llvm.x86.tdpbf16ps.internal(i16 [[M]], i16 [[N]], i16 [[K]], x86_amx [[TMP4]], x86_amx [[TMP2]], x86_amx [[TMP3]])
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[N]], ptr [[PC]], i64 64, x86_amx [[T6]])
 ; CHECK-NEXT:    ret void
 ;
-  %t0 = load <256 x i32>, <256 x i32>* %pa, align 64
+  %t0 = load <256 x i32>, ptr %pa, align 64
   %t1 = bitcast <256 x i32> %t0 to x86_amx
-  %t2 = load <256 x i32>, <256 x i32>* %pb, align 64
+  %t2 = load <256 x i32>, ptr %pb, align 64
   %t3 = bitcast <256 x i32> %t2 to x86_amx
-  %t4 = load <256 x i32>, <256 x i32>* %pc, align 64
+  %t4 = load <256 x i32>, ptr %pc, align 64
   %t5 = bitcast <256 x i32> %t4 to x86_amx
   %t6 = tail call x86_amx @llvm.x86.tdpbf16ps.internal(i16 %m, i16 %n, i16 %k, x86_amx %t5, x86_amx %t1, x86_amx %t3)
   %t7 = bitcast x86_amx %t6 to <256 x i32>
-  store <256 x i32> %t7, <256 x i32>* %pc, align 64
+  store <256 x i32> %t7, ptr %pc, align 64
   ret void
 }
 
-define dso_local void @__tile_stored(i8* %0, i64 %1, %struct.__tile_str* nocapture readonly byval(%struct.__tile_str) align 64 %2) local_unnamed_addr {
+define dso_local void @__tile_stored(ptr %0, i64 %1, ptr nocapture readonly byval(%struct.__tile_str) align 64 %2) local_unnamed_addr {
 ; CHECK-LABEL: @__tile_stored(
-; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR:%.*]], %struct.__tile_str* [[TMP2:%.*]], i64 0, i32 0
-; CHECK-NEXT:    [[TMP5:%.*]] = load i16, i16* [[TMP4]], align 64
-; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], %struct.__tile_str* [[TMP2]], i64 0, i32 1
-; CHECK-NEXT:    [[TMP7:%.*]] = load i16, i16* [[TMP6]], align 2
-; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], %struct.__tile_str* [[TMP2]], i64 0, i32 2
-; CHECK-NEXT:    [[TMP9:%.*]] = bitcast <256 x i32>* [[TMP8]] to i8*
-; CHECK-NEXT:    [[TMP10:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP5]], i16 [[TMP7]], i8* [[TMP9]], i64 64)
-; CHECK-NEXT:    [[TMP11:%.*]] = shl i64 [[TMP1:%.*]], 32
-; CHECK-NEXT:    [[TMP12:%.*]] = ashr exact i64 [[TMP11]], 32
-; CHECK-NEXT:    tail call void @llvm.x86.tilestored64.internal(i16 [[TMP5]], i16 [[TMP7]], i8* [[TMP0:%.*]], i64 [[TMP12]], x86_amx [[TMP10]])
+; CHECK-NEXT:    [[TMP4:%.*]] = load i16, ptr [[TMP2:%.*]], align 64
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR:%.*]], ptr [[TMP2]], i64 0, i32 1
+; CHECK-NEXT:    [[TMP6:%.*]] = load i16, ptr [[TMP5]], align 2
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], ptr [[TMP2]], i64 0, i32 2
+; CHECK-NEXT:    [[TMP8:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP4]], i16 [[TMP6]], ptr [[TMP7]], i64 64)
+; CHECK-NEXT:    [[TMP9:%.*]] = shl i64 [[TMP1:%.*]], 32
+; CHECK-NEXT:    [[TMP10:%.*]] = ashr exact i64 [[TMP9]], 32
+; CHECK-NEXT:    tail call void @llvm.x86.tilestored64.internal(i16 [[TMP4]], i16 [[TMP6]], ptr [[TMP0:%.*]], i64 [[TMP10]], x86_amx [[TMP8]])
 ; CHECK-NEXT:    ret void
 ;
-  %4 = getelementptr inbounds %struct.__tile_str, %struct.__tile_str* %2, i64 0, i32 0
-  %5 = load i16, i16* %4, align 64
-  %6 = getelementptr inbounds %struct.__tile_str, %struct.__tile_str* %2, i64 0, i32 1
-  %7 = load i16, i16* %6, align 2
-  %8 = getelementptr inbounds %struct.__tile_str, %struct.__tile_str* %2, i64 0, i32 2
-  %9 = load <256 x i32>, <256 x i32>* %8, align 64
-  %10 = bitcast <256 x i32> %9 to x86_amx
-  %11 = shl i64 %1, 32
-  %12 = ashr exact i64 %11, 32
-  tail call void @llvm.x86.tilestored64.internal(i16 %5, i16 %7, i8* %0, i64 %12, x86_amx %10)
+  %4 = load i16, ptr %2, align 64
+  %5 = getelementptr inbounds %struct.__tile_str, ptr %2, i64 0, i32 1
+  %6 = load i16, ptr %5, align 2
+  %7 = getelementptr inbounds %struct.__tile_str, ptr %2, i64 0, i32 2
+  %8 = load <256 x i32>, ptr %7, align 64
+  %9 = bitcast <256 x i32> %8 to x86_amx
+  %10 = shl i64 %1, 32
+  %11 = ashr exact i64 %10, 32
+  tail call void @llvm.x86.tilestored64.internal(i16 %4, i16 %6, ptr %0, i64 %11, x86_amx %9)
   ret void
 }
 
-declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, i8*, i64)
+declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, ptr, i64)
 declare x86_amx @llvm.x86.tdpbssd.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)
 declare x86_amx @llvm.x86.tdpbsud.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)
 declare x86_amx @llvm.x86.tdpbusd.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)
 declare x86_amx @llvm.x86.tdpbuud.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)
 declare x86_amx @llvm.x86.tdpbf16ps.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)
-declare void @llvm.x86.tilestored64.internal(i16, i16, i8*, i64, x86_amx)
+declare void @llvm.x86.tilestored64.internal(i16, i16, ptr, i64, x86_amx)

diff  --git a/llvm/test/CodeGen/X86/AMX/lat-transform-amx-bitcast.ll b/llvm/test/CodeGen/X86/AMX/lat-transform-amx-bitcast.ll
index 129d515ff078d..dc3b15e7c5503 100644
--- a/llvm/test/CodeGen/X86/AMX/lat-transform-amx-bitcast.ll
+++ b/llvm/test/CodeGen/X86/AMX/lat-transform-amx-bitcast.ll
@@ -7,14 +7,14 @@
 @buf2 = dso_local global [1024 x i8] zeroinitializer, align 64
 
 ; test bitcast x86_amx to <256 x i32>
-define dso_local void @test_user_empty(i16 %m, i16 %n, i8 *%buf, i64 %s) {
+define dso_local void @test_user_empty(i16 %m, i16 %n, ptr%buf, i64 %s) {
 ; CHECK-LABEL: @test_user_empty(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[T1:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[N:%.*]], i8* [[BUF:%.*]], i64 [[S:%.*]])
+; CHECK-NEXT:    [[T1:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[N:%.*]], ptr [[BUF:%.*]], i64 [[S:%.*]])
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %t1 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %m, i16 %n, i8* %buf, i64 %s)
+  %t1 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %m, i16 %n, ptr %buf, i64 %s)
   %t2 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t1)
   ret void
 }
@@ -30,319 +30,283 @@ entry:
   ret void
 }
 
-define dso_local <256 x i32> @test_amx_load_bitcast_v256i32(<256 x i32>* %in, i16 %m, i16 %n, i8 *%buf, i64 %s) {
+define dso_local <256 x i32> @test_amx_load_bitcast_v256i32(ptr %in, i16 %m, i16 %n, ptr%buf, i64 %s) {
 ; CHECK-LABEL: @test_amx_load_bitcast_v256i32(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = alloca <256 x i32>, align 64
-; CHECK-NEXT:    [[T1:%.*]] = load <256 x i32>, <256 x i32>* [[IN:%.*]], align 64
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <256 x i32>* [[TMP0]] to i8*
-; CHECK-NEXT:    store <256 x i32> [[T1]], <256 x i32>* [[TMP0]], align 1024
-; CHECK-NEXT:    [[TMP2:%.*]] = sext i16 [[N:%.*]] to i64
-; CHECK-NEXT:    [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[N]], i8* [[TMP1]], i64 [[TMP2]])
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[N]], i8* [[BUF:%.*]], i64 [[S:%.*]], x86_amx [[TMP3]])
+; CHECK-NEXT:    [[T1:%.*]] = load <256 x i32>, ptr [[IN:%.*]], align 64
+; CHECK-NEXT:    store <256 x i32> [[T1]], ptr [[TMP0]], align 1024
+; CHECK-NEXT:    [[TMP1:%.*]] = sext i16 [[N:%.*]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[N]], ptr [[TMP0]], i64 [[TMP1]])
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[N]], ptr [[BUF:%.*]], i64 [[S:%.*]], x86_amx [[TMP2]])
 ; CHECK-NEXT:    ret <256 x i32> [[T1]]
 ;
 entry:
-  %t1 = load <256 x i32>, <256 x i32>* %in, align 64
+  %t1 = load <256 x i32>, ptr %in, align 64
   %t2 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t1)
-  call void @llvm.x86.tilestored64.internal(i16 %m, i16 %n, i8* %buf, i64 %s, x86_amx %t2)
+  call void @llvm.x86.tilestored64.internal(i16 %m, i16 %n, ptr %buf, i64 %s, x86_amx %t2)
   ret <256 x i32> %t1
 }
 
-define dso_local <225 x i32> @test_amx_load_bitcast_v225i32(<225 x i32>* %in, i16 %m, i16 %n, i8 *%buf, i64 %s) {
+define dso_local <225 x i32> @test_amx_load_bitcast_v225i32(ptr %in, i16 %m, i16 %n, ptr%buf, i64 %s) {
 ; CHECK-LABEL: @test_amx_load_bitcast_v225i32(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = alloca <225 x i32>, align 64
-; CHECK-NEXT:    [[T1:%.*]] = load <225 x i32>, <225 x i32>* [[IN:%.*]], align 64
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <225 x i32>* [[TMP0]] to i8*
-; CHECK-NEXT:    store <225 x i32> [[T1]], <225 x i32>* [[TMP0]], align 1024
-; CHECK-NEXT:    [[TMP2:%.*]] = sext i16 [[N:%.*]] to i64
-; CHECK-NEXT:    [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[N]], i8* [[TMP1]], i64 [[TMP2]])
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[N]], i8* [[BUF:%.*]], i64 [[S:%.*]], x86_amx [[TMP3]])
+; CHECK-NEXT:    [[T1:%.*]] = load <225 x i32>, ptr [[IN:%.*]], align 64
+; CHECK-NEXT:    store <225 x i32> [[T1]], ptr [[TMP0]], align 1024
+; CHECK-NEXT:    [[TMP1:%.*]] = sext i16 [[N:%.*]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[N]], ptr [[TMP0]], i64 [[TMP1]])
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[N]], ptr [[BUF:%.*]], i64 [[S:%.*]], x86_amx [[TMP2]])
 ; CHECK-NEXT:    ret <225 x i32> [[T1]]
 ;
 entry:
-  %t1 = load <225 x i32>, <225 x i32>* %in, align 64
+  %t1 = load <225 x i32>, ptr %in, align 64
   %t2 = call x86_amx @llvm.x86.cast.vector.to.tile.v225i32(<225 x i32> %t1)
-  call void @llvm.x86.tilestored64.internal(i16 %m, i16 %n, i8* %buf, i64 %s, x86_amx %t2)
+  call void @llvm.x86.tilestored64.internal(i16 %m, i16 %n, ptr %buf, i64 %s, x86_amx %t2)
   ret <225 x i32> %t1
 }
 
-define dso_local <256 x i32> @test_amx_bitcast_store(<256 x i32>* %out, i16 %m, i16 %n, i8 *%buf, i64 %s) {
+define dso_local <256 x i32> @test_amx_bitcast_store(ptr %out, i16 %m, i16 %n, ptr%buf, i64 %s) {
 ; CHECK-LABEL: @test_amx_bitcast_store(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = alloca <256 x i32>, align 64
-; CHECK-NEXT:    [[T1:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[M]], i8* [[BUF:%.*]], i64 [[S:%.*]])
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <256 x i32>* [[TMP0]] to i8*
-; CHECK-NEXT:    [[TMP2:%.*]] = sext i16 [[M]] to i64
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[M]], i8* [[TMP1]], i64 [[TMP2]], x86_amx [[T1]])
-; CHECK-NEXT:    [[TMP3:%.*]] = load <256 x i32>, <256 x i32>* [[TMP0]], align 1024
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <256 x i32>* [[OUT:%.*]] to i8*
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[M]], i8* [[TMP4]], i64 64, x86_amx [[T1]])
-; CHECK-NEXT:    ret <256 x i32> [[TMP3]]
+; CHECK-NEXT:    [[T1:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[M]], ptr [[BUF:%.*]], i64 [[S:%.*]])
+; CHECK-NEXT:    [[TMP1:%.*]] = sext i16 [[M]] to i64
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[M]], ptr [[TMP0]], i64 [[TMP1]], x86_amx [[T1]])
+; CHECK-NEXT:    [[TMP2:%.*]] = load <256 x i32>, ptr [[TMP0]], align 1024
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[M]], ptr [[OUT:%.*]], i64 64, x86_amx [[T1]])
+; CHECK-NEXT:    ret <256 x i32> [[TMP2]]
 ;
 entry:
-  %t1 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %m, i16 %m, i8* %buf, i64 %s)
+  %t1 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %m, i16 %m, ptr %buf, i64 %s)
   %t2 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t1)
-  store <256 x i32> %t2, <256 x i32>* %out
+  store <256 x i32> %t2, ptr %out
   ret <256 x i32> %t2
 }
 
-define dso_local void @test_src_add(<256 x i32> %x, <256 x i32> %y, i16 %r, i16 %c, i8* %buf, i64 %s) {
+define dso_local void @test_src_add(<256 x i32> %x, <256 x i32> %y, i16 %r, i16 %c, ptr %buf, i64 %s) {
 ; CHECK-LABEL: @test_src_add(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = alloca <256 x i32>, align 64
 ; CHECK-NEXT:    [[ADD:%.*]] = add <256 x i32> [[Y:%.*]], [[X:%.*]]
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <256 x i32>* [[TMP0]] to i8*
-; CHECK-NEXT:    store <256 x i32> [[ADD]], <256 x i32>* [[TMP0]], align 1024
-; CHECK-NEXT:    [[TMP2:%.*]] = sext i16 [[C:%.*]] to i64
-; CHECK-NEXT:    [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[R:%.*]], i16 [[C]], i8* [[TMP1]], i64 [[TMP2]])
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[R]], i16 [[C]], i8* [[BUF:%.*]], i64 [[S:%.*]], x86_amx [[TMP3]])
+; CHECK-NEXT:    store <256 x i32> [[ADD]], ptr [[TMP0]], align 1024
+; CHECK-NEXT:    [[TMP1:%.*]] = sext i16 [[C:%.*]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[R:%.*]], i16 [[C]], ptr [[TMP0]], i64 [[TMP1]])
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[R]], i16 [[C]], ptr [[BUF:%.*]], i64 [[S:%.*]], x86_amx [[TMP2]])
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %add = add <256 x i32> %y, %x
   %t = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %add)
-  call void @llvm.x86.tilestored64.internal(i16 %r, i16 %c, i8* %buf, i64 %s, x86_amx %t)
+  call void @llvm.x86.tilestored64.internal(i16 %r, i16 %c, ptr %buf, i64 %s, x86_amx %t)
   ret void
 }
 
-define dso_local void @test_src_add2(<256 x i32> %x, i16 %r, i16 %c, i8* %buf, i64 %s) {
+define dso_local void @test_src_add2(<256 x i32> %x, i16 %r, i16 %c, ptr %buf, i64 %s) {
 ; CHECK-LABEL: @test_src_add2(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = alloca <256 x i32>, align 64
-; CHECK-NEXT:    [[T1:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[R:%.*]], i16 [[C:%.*]], i8* [[BUF:%.*]], i64 [[S:%.*]])
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <256 x i32>* [[TMP0]] to i8*
-; CHECK-NEXT:    [[TMP2:%.*]] = sext i16 [[C]] to i64
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[R]], i16 [[C]], i8* [[TMP1]], i64 [[TMP2]], x86_amx [[T1]])
-; CHECK-NEXT:    [[TMP3:%.*]] = load <256 x i32>, <256 x i32>* [[TMP0]], align 1024
-; CHECK-NEXT:    [[ADD:%.*]] = add <256 x i32> [[TMP3]], [[X:%.*]]
+; CHECK-NEXT:    [[T1:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[R:%.*]], i16 [[C:%.*]], ptr [[BUF:%.*]], i64 [[S:%.*]])
+; CHECK-NEXT:    [[TMP1:%.*]] = sext i16 [[C]] to i64
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[R]], i16 [[C]], ptr [[TMP0]], i64 [[TMP1]], x86_amx [[T1]])
+; CHECK-NEXT:    [[TMP2:%.*]] = load <256 x i32>, ptr [[TMP0]], align 1024
+; CHECK-NEXT:    [[ADD:%.*]] = add <256 x i32> [[TMP2]], [[X:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %t1 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %r, i16 %c, i8* %buf, i64 %s)
+  %t1 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %r, i16 %c, ptr %buf, i64 %s)
   %t2 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t1)
   %add = add <256 x i32> %t2, %x
   ret void
 }
 
-define dso_local void @__tile_loadd(%struct.__tile_str* nocapture %0, i8* %1, i64 %2) local_unnamed_addr {
+define dso_local void @__tile_loadd(ptr nocapture %0, ptr %1, i64 %2) local_unnamed_addr {
 ; CHECK-LABEL: @__tile_loadd(
-; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR:%.*]], %struct.__tile_str* [[TMP0:%.*]], i64 0, i32 0
-; CHECK-NEXT:    [[TMP5:%.*]] = load i16, i16* [[TMP4]], align 64
-; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], %struct.__tile_str* [[TMP0]], i64 0, i32 1
-; CHECK-NEXT:    [[TMP7:%.*]] = load i16, i16* [[TMP6]], align 2
-; CHECK-NEXT:    [[TMP8:%.*]] = shl i64 [[TMP2:%.*]], 32
-; CHECK-NEXT:    [[TMP9:%.*]] = ashr exact i64 [[TMP8]], 32
-; CHECK-NEXT:    [[TMP10:%.*]] = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP5]], i16 [[TMP7]], i8* [[TMP1:%.*]], i64 [[TMP9]])
-; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], %struct.__tile_str* [[TMP0]], i64 0, i32 2
-; CHECK-NEXT:    [[TMP12:%.*]] = bitcast <256 x i32>* [[TMP11]] to i8*
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[TMP5]], i16 [[TMP7]], i8* [[TMP12]], i64 64, x86_amx [[TMP10]])
+; CHECK-NEXT:    [[TMP4:%.*]] = load i16, ptr [[TMP0:%.*]], align 64
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR:%.*]], ptr [[TMP0]], i64 0, i32 1
+; CHECK-NEXT:    [[TMP6:%.*]] = load i16, ptr [[TMP5]], align 2
+; CHECK-NEXT:    [[TMP7:%.*]] = shl i64 [[TMP2:%.*]], 32
+; CHECK-NEXT:    [[TMP8:%.*]] = ashr exact i64 [[TMP7]], 32
+; CHECK-NEXT:    [[TMP9:%.*]] = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP4]], i16 [[TMP6]], ptr [[TMP1:%.*]], i64 [[TMP8]])
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], ptr [[TMP0]], i64 0, i32 2
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[TMP4]], i16 [[TMP6]], ptr [[TMP10]], i64 64, x86_amx [[TMP9]])
 ; CHECK-NEXT:    ret void
 ;
-  %4 = getelementptr inbounds %struct.__tile_str, %struct.__tile_str* %0, i64 0, i32 0
-  %5 = load i16, i16* %4, align 64
-  %6 = getelementptr inbounds %struct.__tile_str, %struct.__tile_str* %0, i64 0, i32 1
-  %7 = load i16, i16* %6, align 2
-  %8 = shl i64 %2, 32
-  %9 = ashr exact i64 %8, 32
-  %10 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %5, i16 %7, i8* %1, i64 %9)
-  %11 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %10)
-  %12 = getelementptr inbounds %struct.__tile_str, %struct.__tile_str* %0, i64 0, i32 2
-  store <256 x i32> %11, <256 x i32>* %12, align 64
+  %4 = load i16, ptr %0, align 64
+  %5 = getelementptr inbounds %struct.__tile_str, ptr %0, i64 0, i32 1
+  %6 = load i16, ptr %5, align 2
+  %7 = shl i64 %2, 32
+  %8 = ashr exact i64 %7, 32
+  %9 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %4, i16 %6, ptr %1, i64 %8)
+  %10 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %9)
+  %11 = getelementptr inbounds %struct.__tile_str, ptr %0, i64 0, i32 2
+  store <256 x i32> %10, ptr %11, align 64
   ret void
 }
 
-define dso_local void @__tile_dpbssd(%struct.__tile_str* nocapture %0, %struct.__tile_str* nocapture readonly byval(%struct.__tile_str) align 64 %1, %struct.__tile_str* nocapture readonly byval(%struct.__tile_str) align 64 %2) local_unnamed_addr {
+define dso_local void @__tile_dpbssd(ptr nocapture %0, ptr nocapture readonly byval(%struct.__tile_str) align 64 %1, ptr nocapture readonly byval(%struct.__tile_str) align 64 %2) local_unnamed_addr {
 ; CHECK-LABEL: @__tile_dpbssd(
-; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR:%.*]], %struct.__tile_str* [[TMP1:%.*]], i64 0, i32 0
-; CHECK-NEXT:    [[TMP5:%.*]] = load i16, i16* [[TMP4]], align 64
-; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], %struct.__tile_str* [[TMP2:%.*]], i64 0, i32 1
-; CHECK-NEXT:    [[TMP7:%.*]] = load i16, i16* [[TMP6]], align 2
-; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], %struct.__tile_str* [[TMP1]], i64 0, i32 1
-; CHECK-NEXT:    [[TMP9:%.*]] = load i16, i16* [[TMP8]], align 2
-; CHECK-NEXT:    [[TMP10:%.*]] = udiv i16 [[TMP9]], 4
-; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], %struct.__tile_str* [[TMP0:%.*]], i64 0, i32 2
-; CHECK-NEXT:    [[TMP12:%.*]] = bitcast <256 x i32>* [[TMP11]] to i8*
-; CHECK-NEXT:    [[TMP13:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP5]], i16 [[TMP7]], i8* [[TMP12]], i64 64)
-; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], %struct.__tile_str* [[TMP1]], i64 0, i32 2
-; CHECK-NEXT:    [[TMP15:%.*]] = bitcast <256 x i32>* [[TMP14]] to i8*
-; CHECK-NEXT:    [[TMP16:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP5]], i16 [[TMP9]], i8* [[TMP15]], i64 64)
-; CHECK-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], %struct.__tile_str* [[TMP2]], i64 0, i32 2
-; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <256 x i32>* [[TMP17]] to i8*
-; CHECK-NEXT:    [[TMP19:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP10]], i16 [[TMP7]], i8* [[TMP18]], i64 64)
-; CHECK-NEXT:    [[TMP20:%.*]] = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 [[TMP5]], i16 [[TMP7]], i16 [[TMP9]], x86_amx [[TMP13]], x86_amx [[TMP16]], x86_amx [[TMP19]])
-; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <256 x i32>* [[TMP11]] to i8*
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[TMP5]], i16 [[TMP7]], i8* [[TMP21]], i64 64, x86_amx [[TMP20]])
+; CHECK-NEXT:    [[TMP4:%.*]] = load i16, ptr [[TMP1:%.*]], align 64
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR:%.*]], ptr [[TMP2:%.*]], i64 0, i32 1
+; CHECK-NEXT:    [[TMP6:%.*]] = load i16, ptr [[TMP5]], align 2
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], ptr [[TMP1]], i64 0, i32 1
+; CHECK-NEXT:    [[TMP8:%.*]] = load i16, ptr [[TMP7]], align 2
+; CHECK-NEXT:    [[TMP9:%.*]] = udiv i16 [[TMP8]], 4
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], ptr [[TMP0:%.*]], i64 0, i32 2
+; CHECK-NEXT:    [[TMP11:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP4]], i16 [[TMP6]], ptr [[TMP10]], i64 64)
+; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], ptr [[TMP1]], i64 0, i32 2
+; CHECK-NEXT:    [[TMP13:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP4]], i16 [[TMP8]], ptr [[TMP12]], i64 64)
+; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], ptr [[TMP2]], i64 0, i32 2
+; CHECK-NEXT:    [[TMP15:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP9]], i16 [[TMP6]], ptr [[TMP14]], i64 64)
+; CHECK-NEXT:    [[TMP16:%.*]] = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 [[TMP4]], i16 [[TMP6]], i16 [[TMP8]], x86_amx [[TMP11]], x86_amx [[TMP13]], x86_amx [[TMP15]])
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[TMP4]], i16 [[TMP6]], ptr [[TMP10]], i64 64, x86_amx [[TMP16]])
 ; CHECK-NEXT:    ret void
 ;
-  %4 = getelementptr inbounds %struct.__tile_str, %struct.__tile_str* %1, i64 0, i32 0
-  %5 = load i16, i16* %4, align 64
-  %6 = getelementptr inbounds %struct.__tile_str, %struct.__tile_str* %2, i64 0, i32 1
-  %7 = load i16, i16* %6, align 2
-  %8 = getelementptr inbounds %struct.__tile_str, %struct.__tile_str* %1, i64 0, i32 1
-  %9 = load i16, i16* %8, align 2
-  %10 = getelementptr inbounds %struct.__tile_str, %struct.__tile_str* %0, i64 0, i32 2
-  %11 = load <256 x i32>, <256 x i32>* %10, align 64
-  %12 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %11)
-  %13 = getelementptr inbounds %struct.__tile_str, %struct.__tile_str* %1, i64 0, i32 2
-  %14 = load <256 x i32>, <256 x i32>* %13, align 64
-  %15 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %14)
-  %16 = getelementptr inbounds %struct.__tile_str, %struct.__tile_str* %2, i64 0, i32 2
-  %17 = load <256 x i32>, <256 x i32>* %16, align 64
-  %18 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %17)
-  %19 = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 %5, i16 %7, i16 %9, x86_amx %12, x86_amx %15, x86_amx %18)
-  %20 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %19)
-  store <256 x i32> %20, <256 x i32>* %10, align 64
+  %4 = load i16, ptr %1, align 64
+  %5 = getelementptr inbounds %struct.__tile_str, ptr %2, i64 0, i32 1
+  %6 = load i16, ptr %5, align 2
+  %7 = getelementptr inbounds %struct.__tile_str, ptr %1, i64 0, i32 1
+  %8 = load i16, ptr %7, align 2
+  %9 = getelementptr inbounds %struct.__tile_str, ptr %0, i64 0, i32 2
+  %10 = load <256 x i32>, ptr %9, align 64
+  %11 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %10)
+  %12 = getelementptr inbounds %struct.__tile_str, ptr %1, i64 0, i32 2
+  %13 = load <256 x i32>, ptr %12, align 64
+  %14 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %13)
+  %15 = getelementptr inbounds %struct.__tile_str, ptr %2, i64 0, i32 2
+  %16 = load <256 x i32>, ptr %15, align 64
+  %17 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %16)
+  %18 = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 %4, i16 %6, i16 %8, x86_amx %11, x86_amx %14, x86_amx %17)
+  %19 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %18)
+  store <256 x i32> %19, ptr %9, align 64
   ret void
 }
 
-define dso_local void @__tile_dpbsud(i16 %m, i16 %n, i16 %k, <256 x i32>* %pc, <256 x i32>* %pa, <256 x i32>* %pb) {
+define dso_local void @__tile_dpbsud(i16 %m, i16 %n, i16 %k, ptr %pc, ptr %pa, ptr %pb) {
 ; CHECK-LABEL: @__tile_dpbsud(
 ; CHECK-NEXT:    [[TMP1:%.*]] = udiv i16 [[K:%.*]], 4
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <256 x i32>* [[PA:%.*]] to i8*
-; CHECK-NEXT:    [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[K]], i8* [[TMP2]], i64 64)
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <256 x i32>* [[PB:%.*]] to i8*
-; CHECK-NEXT:    [[TMP5:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP1]], i16 [[N:%.*]], i8* [[TMP4]], i64 64)
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <256 x i32>* [[PC:%.*]] to i8*
-; CHECK-NEXT:    [[TMP7:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M]], i16 [[N]], i8* [[TMP6]], i64 64)
-; CHECK-NEXT:    [[T6:%.*]] = tail call x86_amx @llvm.x86.tdpbsud.internal(i16 [[M]], i16 [[N]], i16 [[K]], x86_amx [[TMP7]], x86_amx [[TMP3]], x86_amx [[TMP5]])
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <256 x i32>* [[PC]] to i8*
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[N]], i8* [[TMP8]], i64 64, x86_amx [[T6]])
+; CHECK-NEXT:    [[TMP2:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[K]], ptr [[PA:%.*]], i64 64)
+; CHECK-NEXT:    [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP1]], i16 [[N:%.*]], ptr [[PB:%.*]], i64 64)
+; CHECK-NEXT:    [[TMP4:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M]], i16 [[N]], ptr [[PC:%.*]], i64 64)
+; CHECK-NEXT:    [[T6:%.*]] = tail call x86_amx @llvm.x86.tdpbsud.internal(i16 [[M]], i16 [[N]], i16 [[K]], x86_amx [[TMP4]], x86_amx [[TMP2]], x86_amx [[TMP3]])
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[N]], ptr [[PC]], i64 64, x86_amx [[T6]])
 ; CHECK-NEXT:    ret void
 ;
-  %t0 = load <256 x i32>, <256 x i32>* %pa, align 64
+  %t0 = load <256 x i32>, ptr %pa, align 64
   %t1 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t0)
-  %t2 = load <256 x i32>, <256 x i32>* %pb, align 64
+  %t2 = load <256 x i32>, ptr %pb, align 64
   %t3 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t2)
-  %t4 = load <256 x i32>, <256 x i32>* %pc, align 64
+  %t4 = load <256 x i32>, ptr %pc, align 64
   %t5 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t4)
   %t6 = tail call x86_amx @llvm.x86.tdpbsud.internal(i16 %m, i16 %n, i16 %k, x86_amx %t5, x86_amx %t1, x86_amx %t3)
   %t7 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t6)
-  store <256 x i32> %t7, <256 x i32>* %pc, align 64
+  store <256 x i32> %t7, ptr %pc, align 64
   ret void
 }
 
-define dso_local void @__tile_dpbusd(i16 %m, i16 %n, i16 %k, <256 x i32>* %pc, <256 x i32>* %pa, <256 x i32>* %pb) {
+define dso_local void @__tile_dpbusd(i16 %m, i16 %n, i16 %k, ptr %pc, ptr %pa, ptr %pb) {
 ; CHECK-LABEL: @__tile_dpbusd(
 ; CHECK-NEXT:    [[TMP1:%.*]] = udiv i16 [[K:%.*]], 4
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <256 x i32>* [[PA:%.*]] to i8*
-; CHECK-NEXT:    [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[K]], i8* [[TMP2]], i64 64)
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <256 x i32>* [[PB:%.*]] to i8*
-; CHECK-NEXT:    [[TMP5:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP1]], i16 [[N:%.*]], i8* [[TMP4]], i64 64)
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <256 x i32>* [[PC:%.*]] to i8*
-; CHECK-NEXT:    [[TMP7:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M]], i16 [[N]], i8* [[TMP6]], i64 64)
-; CHECK-NEXT:    [[T6:%.*]] = tail call x86_amx @llvm.x86.tdpbusd.internal(i16 [[M]], i16 [[N]], i16 [[K]], x86_amx [[TMP7]], x86_amx [[TMP3]], x86_amx [[TMP5]])
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <256 x i32>* [[PC]] to i8*
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[N]], i8* [[TMP8]], i64 64, x86_amx [[T6]])
+; CHECK-NEXT:    [[TMP2:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[K]], ptr [[PA:%.*]], i64 64)
+; CHECK-NEXT:    [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP1]], i16 [[N:%.*]], ptr [[PB:%.*]], i64 64)
+; CHECK-NEXT:    [[TMP4:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M]], i16 [[N]], ptr [[PC:%.*]], i64 64)
+; CHECK-NEXT:    [[T6:%.*]] = tail call x86_amx @llvm.x86.tdpbusd.internal(i16 [[M]], i16 [[N]], i16 [[K]], x86_amx [[TMP4]], x86_amx [[TMP2]], x86_amx [[TMP3]])
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[N]], ptr [[PC]], i64 64, x86_amx [[T6]])
 ; CHECK-NEXT:    ret void
 ;
-  %t0 = load <256 x i32>, <256 x i32>* %pa, align 64
+  %t0 = load <256 x i32>, ptr %pa, align 64
   %t1 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t0)
-  %t2 = load <256 x i32>, <256 x i32>* %pb, align 64
+  %t2 = load <256 x i32>, ptr %pb, align 64
   %t3 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t2)
-  %t4 = load <256 x i32>, <256 x i32>* %pc, align 64
+  %t4 = load <256 x i32>, ptr %pc, align 64
   %t5 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t4)
   %t6 = tail call x86_amx @llvm.x86.tdpbusd.internal(i16 %m, i16 %n, i16 %k, x86_amx %t5, x86_amx %t1, x86_amx %t3)
   %t7 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t6)
-  store <256 x i32> %t7, <256 x i32>* %pc, align 64
+  store <256 x i32> %t7, ptr %pc, align 64
   ret void
 }
 
-define dso_local void @__tile_dpbuud(i16 %m, i16 %n, i16 %k, <256 x i32>* %pc, <256 x i32>* %pa, <256 x i32>* %pb) {
+define dso_local void @__tile_dpbuud(i16 %m, i16 %n, i16 %k, ptr %pc, ptr %pa, ptr %pb) {
 ; CHECK-LABEL: @__tile_dpbuud(
 ; CHECK-NEXT:    [[TMP1:%.*]] = udiv i16 [[K:%.*]], 4
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <256 x i32>* [[PA:%.*]] to i8*
-; CHECK-NEXT:    [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[K]], i8* [[TMP2]], i64 64)
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <256 x i32>* [[PB:%.*]] to i8*
-; CHECK-NEXT:    [[TMP5:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP1]], i16 [[N:%.*]], i8* [[TMP4]], i64 64)
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <256 x i32>* [[PC:%.*]] to i8*
-; CHECK-NEXT:    [[TMP7:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M]], i16 [[N]], i8* [[TMP6]], i64 64)
-; CHECK-NEXT:    [[T6:%.*]] = tail call x86_amx @llvm.x86.tdpbuud.internal(i16 [[M]], i16 [[N]], i16 [[K]], x86_amx [[TMP7]], x86_amx [[TMP3]], x86_amx [[TMP5]])
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <256 x i32>* [[PC]] to i8*
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[N]], i8* [[TMP8]], i64 64, x86_amx [[T6]])
+; CHECK-NEXT:    [[TMP2:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[K]], ptr [[PA:%.*]], i64 64)
+; CHECK-NEXT:    [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP1]], i16 [[N:%.*]], ptr [[PB:%.*]], i64 64)
+; CHECK-NEXT:    [[TMP4:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M]], i16 [[N]], ptr [[PC:%.*]], i64 64)
+; CHECK-NEXT:    [[T6:%.*]] = tail call x86_amx @llvm.x86.tdpbuud.internal(i16 [[M]], i16 [[N]], i16 [[K]], x86_amx [[TMP4]], x86_amx [[TMP2]], x86_amx [[TMP3]])
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[N]], ptr [[PC]], i64 64, x86_amx [[T6]])
 ; CHECK-NEXT:    ret void
 ;
-  %t0 = load <256 x i32>, <256 x i32>* %pa, align 64
+  %t0 = load <256 x i32>, ptr %pa, align 64
   %t1 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t0)
-  %t2 = load <256 x i32>, <256 x i32>* %pb, align 64
+  %t2 = load <256 x i32>, ptr %pb, align 64
   %t3 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t2)
-  %t4 = load <256 x i32>, <256 x i32>* %pc, align 64
+  %t4 = load <256 x i32>, ptr %pc, align 64
   %t5 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t4)
   %t6 = tail call x86_amx @llvm.x86.tdpbuud.internal(i16 %m, i16 %n, i16 %k, x86_amx %t5, x86_amx %t1, x86_amx %t3)
   %t7 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t6)
-  store <256 x i32> %t7, <256 x i32>* %pc, align 64
+  store <256 x i32> %t7, ptr %pc, align 64
   ret void
 }
 
-define dso_local void @__tile_dpbf16ps(i16 %m, i16 %n, i16 %k, <256 x i32>* %pc, <256 x i32>* %pa, <256 x i32>* %pb) {
+define dso_local void @__tile_dpbf16ps(i16 %m, i16 %n, i16 %k, ptr %pc, ptr %pa, ptr %pb) {
 ; CHECK-LABEL: @__tile_dpbf16ps(
 ; CHECK-NEXT:    [[TMP1:%.*]] = udiv i16 [[K:%.*]], 4
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <256 x i32>* [[PA:%.*]] to i8*
-; CHECK-NEXT:    [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[K]], i8* [[TMP2]], i64 64)
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <256 x i32>* [[PB:%.*]] to i8*
-; CHECK-NEXT:    [[TMP5:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP1]], i16 [[N:%.*]], i8* [[TMP4]], i64 64)
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <256 x i32>* [[PC:%.*]] to i8*
-; CHECK-NEXT:    [[TMP7:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M]], i16 [[N]], i8* [[TMP6]], i64 64)
-; CHECK-NEXT:    [[T6:%.*]] = tail call x86_amx @llvm.x86.tdpbf16ps.internal(i16 [[M]], i16 [[N]], i16 [[K]], x86_amx [[TMP7]], x86_amx [[TMP3]], x86_amx [[TMP5]])
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <256 x i32>* [[PC]] to i8*
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[N]], i8* [[TMP8]], i64 64, x86_amx [[T6]])
+; CHECK-NEXT:    [[TMP2:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M:%.*]], i16 [[K]], ptr [[PA:%.*]], i64 64)
+; CHECK-NEXT:    [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP1]], i16 [[N:%.*]], ptr [[PB:%.*]], i64 64)
+; CHECK-NEXT:    [[TMP4:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[M]], i16 [[N]], ptr [[PC:%.*]], i64 64)
+; CHECK-NEXT:    [[T6:%.*]] = tail call x86_amx @llvm.x86.tdpbf16ps.internal(i16 [[M]], i16 [[N]], i16 [[K]], x86_amx [[TMP4]], x86_amx [[TMP2]], x86_amx [[TMP3]])
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 [[M]], i16 [[N]], ptr [[PC]], i64 64, x86_amx [[T6]])
 ; CHECK-NEXT:    ret void
 ;
-  %t0 = load <256 x i32>, <256 x i32>* %pa, align 64
+  %t0 = load <256 x i32>, ptr %pa, align 64
   %t1 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t0)
-  %t2 = load <256 x i32>, <256 x i32>* %pb, align 64
+  %t2 = load <256 x i32>, ptr %pb, align 64
   %t3 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t2)
-  %t4 = load <256 x i32>, <256 x i32>* %pc, align 64
+  %t4 = load <256 x i32>, ptr %pc, align 64
   %t5 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t4)
   %t6 = tail call x86_amx @llvm.x86.tdpbf16ps.internal(i16 %m, i16 %n, i16 %k, x86_amx %t5, x86_amx %t1, x86_amx %t3)
   %t7 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t6)
-  store <256 x i32> %t7, <256 x i32>* %pc, align 64
+  store <256 x i32> %t7, ptr %pc, align 64
   ret void
 }
 
-define dso_local void @__tile_stored(i8* %0, i64 %1, %struct.__tile_str* nocapture readonly byval(%struct.__tile_str) align 64 %2) local_unnamed_addr {
+define dso_local void @__tile_stored(ptr %0, i64 %1, ptr nocapture readonly byval(%struct.__tile_str) align 64 %2) local_unnamed_addr {
 ; CHECK-LABEL: @__tile_stored(
-; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR:%.*]], %struct.__tile_str* [[TMP2:%.*]], i64 0, i32 0
-; CHECK-NEXT:    [[TMP5:%.*]] = load i16, i16* [[TMP4]], align 64
-; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], %struct.__tile_str* [[TMP2]], i64 0, i32 1
-; CHECK-NEXT:    [[TMP7:%.*]] = load i16, i16* [[TMP6]], align 2
-; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], %struct.__tile_str* [[TMP2]], i64 0, i32 2
-; CHECK-NEXT:    [[TMP9:%.*]] = bitcast <256 x i32>* [[TMP8]] to i8*
-; CHECK-NEXT:    [[TMP10:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP5]], i16 [[TMP7]], i8* [[TMP9]], i64 64)
-; CHECK-NEXT:    [[TMP11:%.*]] = shl i64 [[TMP1:%.*]], 32
-; CHECK-NEXT:    [[TMP12:%.*]] = ashr exact i64 [[TMP11]], 32
-; CHECK-NEXT:    tail call void @llvm.x86.tilestored64.internal(i16 [[TMP5]], i16 [[TMP7]], i8* [[TMP0:%.*]], i64 [[TMP12]], x86_amx [[TMP10]])
+; CHECK-NEXT:    [[TMP4:%.*]] = load i16, ptr [[TMP2:%.*]], align 64
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR:%.*]], ptr [[TMP2]], i64 0, i32 1
+; CHECK-NEXT:    [[TMP6:%.*]] = load i16, ptr [[TMP5]], align 2
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TILE_STR]], ptr [[TMP2]], i64 0, i32 2
+; CHECK-NEXT:    [[TMP8:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP4]], i16 [[TMP6]], ptr [[TMP7]], i64 64)
+; CHECK-NEXT:    [[TMP9:%.*]] = shl i64 [[TMP1:%.*]], 32
+; CHECK-NEXT:    [[TMP10:%.*]] = ashr exact i64 [[TMP9]], 32
+; CHECK-NEXT:    tail call void @llvm.x86.tilestored64.internal(i16 [[TMP4]], i16 [[TMP6]], ptr [[TMP0:%.*]], i64 [[TMP10]], x86_amx [[TMP8]])
 ; CHECK-NEXT:    ret void
 ;
-  %4 = getelementptr inbounds %struct.__tile_str, %struct.__tile_str* %2, i64 0, i32 0
-  %5 = load i16, i16* %4, align 64
-  %6 = getelementptr inbounds %struct.__tile_str, %struct.__tile_str* %2, i64 0, i32 1
-  %7 = load i16, i16* %6, align 2
-  %8 = getelementptr inbounds %struct.__tile_str, %struct.__tile_str* %2, i64 0, i32 2
-  %9 = load <256 x i32>, <256 x i32>* %8, align 64
-  %10 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %9)
-  %11 = shl i64 %1, 32
-  %12 = ashr exact i64 %11, 32
-  tail call void @llvm.x86.tilestored64.internal(i16 %5, i16 %7, i8* %0, i64 %12, x86_amx %10)
+  %4 = load i16, ptr %2, align 64
+  %5 = getelementptr inbounds %struct.__tile_str, ptr %2, i64 0, i32 1
+  %6 = load i16, ptr %5, align 2
+  %7 = getelementptr inbounds %struct.__tile_str, ptr %2, i64 0, i32 2
+  %8 = load <256 x i32>, ptr %7, align 64
+  %9 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %8)
+  %10 = shl i64 %1, 32
+  %11 = ashr exact i64 %10, 32
+  tail call void @llvm.x86.tilestored64.internal(i16 %4, i16 %6, ptr %0, i64 %11, x86_amx %9)
   ret void
 }
 
-define void @dead_code(i8 *%buf) {
+define void @dead_code(ptr%buf) {
 ; CHECK-LABEL: @dead_code(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = alloca <256 x i32>, align 64
 ; CHECK-NEXT:    br i1 undef, label [[L1:%.*]], label [[L2:%.*]]
 ; CHECK:       l1:
 ; CHECK-NEXT:    [[T1:%.*]] = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 32)
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <256 x i32>* [[TMP0]] to i8*
-; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 8, i16 32, i8* [[TMP1]], i64 32, x86_amx [[T1]])
-; CHECK-NEXT:    [[TMP2:%.*]] = load <256 x i32>, <256 x i32>* [[TMP0]], align 1024
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 8, i16 32, ptr [[TMP0]], i64 32, x86_amx [[T1]])
+; CHECK-NEXT:    [[TMP1:%.*]] = load <256 x i32>, ptr [[TMP0]], align 1024
 ; CHECK-NEXT:    br i1 undef, label [[L2]], label [[EXIT:%.*]]
 ; CHECK:       l2:
-; CHECK-NEXT:    [[T3:%.*]] = phi <256 x i32> [ undef, [[ENTRY:%.*]] ], [ [[TMP2]], [[L1]] ]
-; CHECK-NEXT:    [[P:%.*]] = bitcast i8* [[BUF:%.*]] to <256 x i32>*
-; CHECK-NEXT:    store <256 x i32> [[T3]], <256 x i32>* [[P]], align 1024
+; CHECK-NEXT:    [[T3:%.*]] = phi <256 x i32> [ undef, [[ENTRY:%.*]] ], [ [[TMP1]], [[L1]] ]
+; CHECK-NEXT:    store <256 x i32> [[T3]], ptr [[BUF:%.*]], align 1024
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    ret void
@@ -359,8 +323,7 @@ l2:
   %t3 = phi <256 x i32> [ undef, %entry ], [ %t2, %l1 ]
   %t4 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t3)
   %t5 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t4)
-  %p = bitcast i8* %buf to <256 x i32>*
-  store <256 x i32> %t5, <256 x i32>* %p
+  store <256 x i32> %t5, ptr %buf
   br label %exit
 
 exit:
@@ -368,13 +331,13 @@ exit:
 }
 
 declare x86_amx @llvm.x86.tilezero.internal(i16, i16)
-declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, i8*, i64)
+declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, ptr, i64)
 declare x86_amx @llvm.x86.tdpbssd.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)
 declare x86_amx @llvm.x86.tdpbsud.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)
 declare x86_amx @llvm.x86.tdpbusd.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)
 declare x86_amx @llvm.x86.tdpbuud.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)
 declare x86_amx @llvm.x86.tdpbf16ps.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)
-declare void @llvm.x86.tilestored64.internal(i16, i16, i8*, i64, x86_amx)
+declare void @llvm.x86.tilestored64.internal(i16, i16, ptr, i64, x86_amx)
 
 declare x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32>)
 declare x86_amx @llvm.x86.cast.vector.to.tile.v225i32(<225 x i32>)


        


More information about the llvm-commits mailing list