[llvm] c712bf3 - [X86][AMX] Add test case for D124378.

via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 25 05:03:43 PDT 2022


Author: Luo, Yuanke
Date: 2022-04-25T20:03:27+08:00
New Revision: c712bf3ce41f4bd553e14d36f29a87c3898ef5b0

URL: https://github.com/llvm/llvm-project/commit/c712bf3ce41f4bd553e14d36f29a87c3898ef5b0
DIFF: https://github.com/llvm/llvm-project/commit/c712bf3ce41f4bd553e14d36f29a87c3898ef5b0.diff

LOG: [X86][AMX] Add test case for D124378.

Added: 
    llvm/test/CodeGen/X86/AMX/amx-combine.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/AMX/amx-combine.ll b/llvm/test/CodeGen/X86/AMX/amx-combine.ll
new file mode 100644
index 0000000000000..2fd095f73cb92
--- /dev/null
+++ b/llvm/test/CodeGen/X86/AMX/amx-combine.ll
@@ -0,0 +1,93 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt --codegen-opt-level=2 -mtriple=x86_64 -lower-amx-type %s -S | FileCheck %s
+
+define void @combine_store(<256 x i32> *%p) {
+; CHECK-LABEL: @combine_store(
+; CHECK-NEXT:    [[TMP1:%.*]] = alloca <256 x i32>, align 64
+; CHECK-NEXT:    [[T1:%.*]] = call x86_amx @llvm.x86.tilezero.internal(i16 16, i16 64)
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <256 x i32>* [[TMP1]] to i8*
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 16, i16 64, i8* [[TMP2]], i64 64, x86_amx [[T1]])
+; CHECK-NEXT:    [[TMP3:%.*]] = load <256 x i32>, <256 x i32>* [[TMP1]], align 1024
+; CHECK-NEXT:    store <256 x i32> [[TMP3]], <256 x i32>* [[P:%.*]], align 64
+; CHECK-NEXT:    ret void
+;
+  %t1 = call x86_amx @llvm.x86.tilezero.internal(i16 16, i16 64)
+  %t2 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t1)
+  store <256 x i32> %t2, <256 x i32>* %p, align 64
+  ret void
+}
+
+define <256 x i32> @combine_store_2user(<256 x i32> *%p) {
+; CHECK-LABEL: @combine_store_2user(
+; CHECK-NEXT:    [[TMP1:%.*]] = alloca <256 x i32>, align 64
+; CHECK-NEXT:    [[T1:%.*]] = call x86_amx @llvm.x86.tilezero.internal(i16 16, i16 64)
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <256 x i32>* [[TMP1]] to i8*
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 16, i16 64, i8* [[TMP2]], i64 64, x86_amx [[T1]])
+; CHECK-NEXT:    [[TMP3:%.*]] = load <256 x i32>, <256 x i32>* [[TMP1]], align 1024
+; CHECK-NEXT:    store <256 x i32> [[TMP3]], <256 x i32>* [[P:%.*]], align 64
+; CHECK-NEXT:    ret <256 x i32> [[TMP3]]
+;
+  %t1 = call x86_amx @llvm.x86.tilezero.internal(i16 16, i16 64)
+  %t2 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t1)
+  store <256 x i32> %t2, <256 x i32>* %p, align 64
+  ret <256 x i32> %t2
+}
+
+define void @combine_load(<256 x i32> *%p, i8 *%p2) {
+; CHECK-LABEL: @combine_load(
+; CHECK-NEXT:    [[TMP1:%.*]] = alloca <256 x i32>, align 64
+; CHECK-NEXT:    [[T1:%.*]] = load <256 x i32>, <256 x i32>* [[P:%.*]], align 64
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <256 x i32>* [[TMP1]] to i8*
+; CHECK-NEXT:    store <256 x i32> [[T1]], <256 x i32>* [[TMP1]], align 1024
+; CHECK-NEXT:    [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, i8* [[TMP2]], i64 64)
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 16, i16 64, i8* [[P2:%.*]], i64 64, x86_amx [[TMP3]])
+; CHECK-NEXT:    ret void
+;
+  %t1 = load <256 x i32>, <256 x i32>* %p, align 64
+  %t2 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t1)
+  call void @llvm.x86.tilestored64.internal(i16 16, i16 64, i8* %p2, i64 64, x86_amx %t2)
+  ret void
+}
+
+define <256 x i32> @combine_load_2user(<256 x i32> *%p, i8 *%p2) {
+; CHECK-LABEL: @combine_load_2user(
+; CHECK-NEXT:    [[TMP1:%.*]] = alloca <256 x i32>, align 64
+; CHECK-NEXT:    [[T1:%.*]] = load <256 x i32>, <256 x i32>* [[P:%.*]], align 64
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <256 x i32>* [[TMP1]] to i8*
+; CHECK-NEXT:    store <256 x i32> [[T1]], <256 x i32>* [[TMP1]], align 1024
+; CHECK-NEXT:    [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, i8* [[TMP2]], i64 64)
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 16, i16 64, i8* [[P2:%.*]], i64 64, x86_amx [[TMP3]])
+; CHECK-NEXT:    ret <256 x i32> [[T1]]
+;
+  %t1 = load <256 x i32>, <256 x i32>* %p, align 64
+  %t2 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t1)
+  call void @llvm.x86.tilestored64.internal(i16 16, i16 64, i8* %p2, i64 64, x86_amx %t2)
+  %t3 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t2)
+  ret <256 x i32> %t3
+}
+
+define <256 x i32> @combine_load_3user(<256 x i32> *%p, i8 *%p2) {
+; CHECK-LABEL: @combine_load_3user(
+; CHECK-NEXT:    [[TMP1:%.*]] = alloca <256 x i32>, align 64
+; CHECK-NEXT:    [[T1:%.*]] = load <256 x i32>, <256 x i32>* [[P:%.*]], align 64
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <256 x i32>* [[TMP1]] to i8*
+; CHECK-NEXT:    store <256 x i32> [[T1]], <256 x i32>* [[TMP1]], align 1024
+; CHECK-NEXT:    [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 16, i8* [[TMP2]], i64 16)
+; CHECK-NEXT:    call void @llvm.x86.tilestored64.internal(i16 16, i16 64, i8* [[P2:%.*]], i64 64, x86_amx [[TMP3]])
+; CHECK-NEXT:    [[TMP4:%.*]] = call x86_amx @llvm.x86.tdpbssd.internal(i16 16, i16 16, i16 64, x86_amx [[TMP3]], x86_amx [[TMP3]], x86_amx [[TMP3]])
+; CHECK-NEXT:    ret <256 x i32> [[T1]]
+;
+  %t1 = load <256 x i32>, <256 x i32>* %p, align 64
+  %t2 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t1)
+  call void @llvm.x86.tilestored64.internal(i16 16, i16 64, i8* %p2, i64 64, x86_amx %t2)
+  %t3 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t2)
+  call x86_amx @llvm.x86.tdpbssd.internal(i16 16, i16 16, i16 64, x86_amx %t2, x86_amx %t2, x86_amx %t2)
+  ret <256 x i32> %t3
+}
+
+declare x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32>)
+declare <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx)
+declare x86_amx @llvm.x86.tilezero.internal(i16, i16)
+declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, i8*, i64)
+declare void @llvm.x86.tilestored64.internal(i16, i16, i8*, i64, x86_amx)
+declare x86_amx @llvm.x86.tdpbssd.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)


        


More information about the llvm-commits mailing list