[llvm] c3f95e9 - [X86] Refine AMX fast register allocation

Xiang1 Zhang via llvm-commits llvm-commits at lists.llvm.org
Sat Apr 24 23:22:04 PDT 2021


Author: Xiang1 Zhang
Date: 2021-04-25T14:20:53+08:00
New Revision: c3f95e9197643b699b891ca416ce7d72cf89f5fc

URL: https://github.com/llvm/llvm-project/commit/c3f95e9197643b699b891ca416ce7d72cf89f5fc
DIFF: https://github.com/llvm/llvm-project/commit/c3f95e9197643b699b891ca416ce7d72cf89f5fc.diff

LOG: [X86] Refine AMX fast register allocation

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86PreAMXConfig.cpp
    llvm/test/CodeGen/X86/AMX/amx-configO2toO0-precfg.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86PreAMXConfig.cpp b/llvm/lib/Target/X86/X86PreAMXConfig.cpp
index c608af5e7493..243f2edd6ef1 100644
--- a/llvm/lib/Target/X86/X86PreAMXConfig.cpp
+++ b/llvm/lib/Target/X86/X86PreAMXConfig.cpp
@@ -161,7 +161,7 @@ bool X86PreAMXConfig::preWriteTileCfg(Value *I8Ptr, Instruction *Pos,
   Value *PaletteValue = ConstantInt::get(Type::getInt8Ty(Ctx), 1);
   Value *PalettePos =
       GetElementPtrInst::Create(I8Ty, I8Ptr, PaletteOffset, "", Pos);
-  new StoreInst(PaletteValue, PalettePos, "", Pos);
+  new StoreInst(PaletteValue, PalettePos, Pos);
 
   for (int I = 0, E = Shapes.size() / 2; I < E; I++) {
     Value *RowOffset = ConstantInt::get(Type::getInt64Ty(Ctx), 48 + I);
@@ -175,8 +175,8 @@ bool X86PreAMXConfig::preWriteTileCfg(Value *I8Ptr, Instruction *Pos,
     Value *Row = Shapes[I * 2];
     Value *Col = Shapes[I * 2 + 1];
     Row = new TruncInst(Row, I8Ty, "", Pos);
-    new StoreInst(Row, RowPos, "", Pos);
-    new StoreInst(Col, ColPos, "", Pos);
+    new StoreInst(Row, RowPos, Pos);
+    new StoreInst(Col, ColPos, Pos);
     Write = true;
   }
   return Write;

diff  --git a/llvm/test/CodeGen/X86/AMX/amx-configO2toO0-precfg.ll b/llvm/test/CodeGen/X86/AMX/amx-configO2toO0-precfg.ll
index ec2a86ce9b60..ef308790b431 100644
--- a/llvm/test/CodeGen/X86/AMX/amx-configO2toO0-precfg.ll
+++ b/llvm/test/CodeGen/X86/AMX/amx-configO2toO0-precfg.ll
@@ -25,117 +25,119 @@ define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) l
 ; CHECK-NEXT:   %{{[0-9]+}} = bitcast <256 x i32>* %{{[0-9]+}} to i8*
 ; CHECK-NEXT:   %tobool.not = icmp eq i32 %cond, 0
 ; CHECK-NEXT:   br i1 %tobool.not, label %if.else, label %if.then
-; CHECK:     if.then:                                          ; preds = %entry
+
+; CHECK:     if.then:
 ; CHECK-NEXT:   %{{[0-9]+}} = bitcast <16 x i32>* %{{[0-9]+}} to i8*
 ; CHECK-NEXT:   store <16 x i32> zeroinitializer, <16 x i32>* %{{[0-9]+}}, align 4
 ; CHECK-NEXT:   %{{[0-9]+}} = getelementptr i8, i8* %{{[0-9]+}}, i64 0
-; CHECK-NEXT:   store volatile i8 1, i8* %{{[0-9]+}}, align 1
+; CHECK-NEXT:   store i8 1, i8* %{{[0-9]+}}, align 1
 ; CHECK-NEXT:   %amx.tmm.0.shape.row{{.*}} = getelementptr i8, i8* %{{[0-9]+}}, i64 48
 ; CHECK-NEXT:   %{{[0-9]+}} = getelementptr i8, i8* %{{[0-9]+}}, i64 16
 ; CHECK-NEXT:   %amx.tmm.0.shape.col{{.*}} = bitcast i8* %{{[0-9]+}} to i16*
 ; CHECK-NEXT:   %{{[0-9]+}} = trunc i16 %row to i8
-; CHECK-NEXT:   store volatile i8 %{{[0-9]+}}, i8* %amx.tmm.0.shape.row{{.*}}, align 1
-; CHECK-NEXT:   store volatile i16 8, i16* %amx.tmm.0.shape.col{{.*}}, align 2
+; CHECK-NEXT:   store i8 %{{[0-9]+}}, i8* %amx.tmm.0.shape.row{{.*}}, align 1
+; CHECK-NEXT:   store i16 8, i16* %amx.tmm.0.shape.col{{.*}}, align 2
 ; CHECK-NEXT:   call void @llvm.x86.ldtilecfg(i8* %{{[0-9]+}})
 ; CHECK-NEXT:   %{{[0-9]+}} = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 8, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32)
 ; CHECK-NEXT:   call void @llvm.x86.tilestored64.internal(i16 %row, i16 8, i8* %{{[0-9]+}}, i64 64, x86_amx %{{[0-9]+}})
 ; CHECK-NEXT:   %{{[0-9]+}} = bitcast <16 x i32>* %{{[0-9]+}} to i8*
 ; CHECK-NEXT:   store <16 x i32> zeroinitializer, <16 x i32>* %{{[0-9]+}}, align 4
 ; CHECK-NEXT:   %{{[0-9]+}} = getelementptr i8, i8* %{{[0-9]+}}, i64 0
-; CHECK-NEXT:   store volatile i8 1, i8* %{{[0-9]+}}, align 1
+; CHECK-NEXT:   store i8 1, i8* %{{[0-9]+}}, align 1
 ; CHECK-NEXT:   %amx.tmm.0.shape.row{{.*}} = getelementptr i8, i8* %{{[0-9]+}}, i64 48
 ; CHECK-NEXT:   %{{[0-9]+}} = getelementptr i8, i8* %{{[0-9]+}}, i64 16
 ; CHECK-NEXT:   %amx.tmm.0.shape.col{{.*}} = bitcast i8* %{{[0-9]+}} to i16*
 ; CHECK-NEXT:   %{{[0-9]+}} = trunc i16 8 to i8
-; CHECK-NEXT:   store volatile i8 %{{[0-9]+}}, i8* %amx.tmm.0.shape.row{{.*}}, align 1
-; CHECK-NEXT:   store volatile i16 %col, i16* %amx.tmm.0.shape.col{{.*}}, align 2
+; CHECK-NEXT:   store i8 %{{[0-9]+}}, i8* %amx.tmm.0.shape.row{{.*}}, align 1
+; CHECK-NEXT:   store i16 %col, i16* %amx.tmm.0.shape.col{{.*}}, align 2
 ; CHECK-NEXT:   call void @llvm.x86.ldtilecfg(i8* %{{[0-9]+}})
 ; CHECK-NEXT:   %{{[0-9]+}} = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32)
 ; CHECK-NEXT:   call void @llvm.x86.tilestored64.internal(i16 8, i16 %col, i8* %{{[0-9]+}}, i64 64, x86_amx %{{[0-9]+}})
 ; CHECK-NEXT:   %{{[0-9]+}} = bitcast <16 x i32>* %{{[0-9]+}} to i8*
 ; CHECK-NEXT:   store <16 x i32> zeroinitializer, <16 x i32>* %{{[0-9]+}}, align 4
 ; CHECK-NEXT:   %{{[0-9]+}} = getelementptr i8, i8* %{{[0-9]+}}, i64 0
-; CHECK-NEXT:   store volatile i8 1, i8* %{{[0-9]+}}, align 1
+; CHECK-NEXT:   store i8 1, i8* %{{[0-9]+}}, align 1
 ; CHECK-NEXT:   %amx.tmm.0.shape.row{{.*}} = getelementptr i8, i8* %{{[0-9]+}}, i64 48
 ; CHECK-NEXT:   %{{[0-9]+}} = getelementptr i8, i8* %{{[0-9]+}}, i64 16
 ; CHECK-NEXT:   %amx.tmm.0.shape.col{{.*}} = bitcast i8* %{{[0-9]+}} to i16*
 ; CHECK-NEXT:   %{{[0-9]+}} = trunc i16 %row to i8
-; CHECK-NEXT:   store volatile i8 %{{[0-9]+}}, i8* %amx.tmm.0.shape.row{{.*}}, align 1
-; CHECK-NEXT:   store volatile i16 %col, i16* %amx.tmm.0.shape.col{{.*}}, align 2
+; CHECK-NEXT:   store i8 %{{[0-9]+}}, i8* %amx.tmm.0.shape.row{{.*}}, align 1
+; CHECK-NEXT:   store i16 %col, i16* %amx.tmm.0.shape.col{{.*}}, align 2
 ; CHECK-NEXT:   call void @llvm.x86.ldtilecfg(i8* %{{[0-9]+}})
 ; CHECK-NEXT:   %{{[0-9]+}} = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32)
 ; CHECK-NEXT:   call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col, i8* %{{[0-9]+}}, i64 64, x86_amx %{{[0-9]+}})
 ; CHECK-NEXT:   br label %if.end
-; CHECK:   if.else:
+
+; CHECK:     if.else:
 ; CHECK-NEXT:   %{{[0-9]+}} = bitcast <16 x i32>* %{{[0-9]+}} to i8*
 ; CHECK-NEXT:   store <16 x i32> zeroinitializer, <16 x i32>* %{{[0-9]+}}, align 4
 ; CHECK-NEXT:   %{{[0-9]+}} = getelementptr i8, i8* %{{[0-9]+}}, i64 0
-; CHECK-NEXT:   store volatile i8 1, i8* %{{[0-9]+}}, align 1
+; CHECK-NEXT:   store i8 1, i8* %{{[0-9]+}}, align 1
 ; CHECK-NEXT:   %amx.tmm.0.shape.row{{.*}} = getelementptr i8, i8* %{{[0-9]+}}, i64 48
 ; CHECK-NEXT:   %{{[0-9]+}} = getelementptr i8, i8* %{{[0-9]+}}, i64 16
 ; CHECK-NEXT:   %amx.tmm.0.shape.col{{.*}} = bitcast i8* %{{[0-9]+}} to i16*
 ; CHECK-NEXT:   %{{[0-9]+}} = trunc i16 %row to i8
-; CHECK-NEXT:   store volatile i8 %{{[0-9]+}}, i8* %amx.tmm.0.shape.row{{.*}}, align 1
-; CHECK-NEXT:   store volatile i16 8, i16* %amx.tmm.0.shape.col{{.*}}, align 2
+; CHECK-NEXT:   store i8 %{{[0-9]+}}, i8* %amx.tmm.0.shape.row{{.*}}, align 1
+; CHECK-NEXT:   store i16 8, i16* %amx.tmm.0.shape.col{{.*}}, align 2
 ; CHECK-NEXT:   call void @llvm.x86.ldtilecfg(i8* %{{[0-9]+}})
 ; CHECK-NEXT:   %{{[0-9]+}} = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 8, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf2, i64 0, i64 0), i64 32)
 ; CHECK-NEXT:   call void @llvm.x86.tilestored64.internal(i16 %row, i16 8, i8* %{{[0-9]+}}, i64 64, x86_amx %{{[0-9]+}})
 ; CHECK-NEXT:   %{{[0-9]+}} = bitcast <16 x i32>* %{{[0-9]+}} to i8*
 ; CHECK-NEXT:   store <16 x i32> zeroinitializer, <16 x i32>* %{{[0-9]+}}, align 4
 ; CHECK-NEXT:   %{{[0-9]+}} = getelementptr i8, i8* %{{[0-9]+}}, i64 0
-; CHECK-NEXT:   store volatile i8 1, i8* %{{[0-9]+}}, align 1
+; CHECK-NEXT:   store i8 1, i8* %{{[0-9]+}}, align 1
 ; CHECK-NEXT:   %amx.tmm.0.shape.row{{.*}} = getelementptr i8, i8* %{{[0-9]+}}, i64 48
 ; CHECK-NEXT:   %{{[0-9]+}} = getelementptr i8, i8* %{{[0-9]+}}, i64 16
 ; CHECK-NEXT:   %amx.tmm.0.shape.col{{.*}} = bitcast i8* %{{[0-9]+}} to i16*
 ; CHECK-NEXT:   %{{[0-9]+}} = trunc i16 8 to i8
-; CHECK-NEXT:   store volatile i8 %{{[0-9]+}}, i8* %amx.tmm.0.shape.row{{.*}}, align 1
-; CHECK-NEXT:   store volatile i16 %col, i16* %amx.tmm.0.shape.col{{.*}}, align 2
+; CHECK-NEXT:   store i8 %{{[0-9]+}}, i8* %amx.tmm.0.shape.row{{.*}}, align 1
+; CHECK-NEXT:   store i16 %col, i16* %amx.tmm.0.shape.col{{.*}}, align 2
 ; CHECK-NEXT:   call void @llvm.x86.ldtilecfg(i8* %{{[0-9]+}})
 ; CHECK-NEXT:   %{{[0-9]+}} = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf2, i64 0, i64 0), i64 32)
 ; CHECK-NEXT:   call void @llvm.x86.tilestored64.internal(i16 8, i16 %col, i8* %{{[0-9]+}}, i64 64, x86_amx %{{[0-9]+}})
 ; CHECK-NEXT:   %{{[0-9]+}} = bitcast <16 x i32>* %{{[0-9]+}} to i8*
 ; CHECK-NEXT:   store <16 x i32> zeroinitializer, <16 x i32>* %{{[0-9]+}}, align 4
 ; CHECK-NEXT:   %{{[0-9]+}} = getelementptr i8, i8* %{{[0-9]+}}, i64 0
-; CHECK-NEXT:   store volatile i8 1, i8* %{{[0-9]+}}, align 1
+; CHECK-NEXT:   store i8 1, i8* %{{[0-9]+}}, align 1
 ; CHECK-NEXT:   %amx.tmm.0.shape.row{{.*}} = getelementptr i8, i8* %{{[0-9]+}}, i64 48
 ; CHECK-NEXT:   %{{[0-9]+}} = getelementptr i8, i8* %{{[0-9]+}}, i64 16
 ; CHECK-NEXT:   %amx.tmm.0.shape.col{{.*}} = bitcast i8* %{{[0-9]+}} to i16*
 ; CHECK-NEXT:   %{{[0-9]+}} = trunc i16 %row to i8
-; CHECK-NEXT:   store volatile i8 %{{[0-9]+}}, i8* %amx.tmm.0.shape.row{{.*}}, align 1
-; CHECK-NEXT:   store volatile i16 %col, i16* %amx.tmm.0.shape.col{{.*}}, align 2
+; CHECK-NEXT:   store i8 %{{[0-9]+}}, i8* %amx.tmm.0.shape.row{{.*}}, align 1
+; CHECK-NEXT:   store i16 %col, i16* %amx.tmm.0.shape.col{{.*}}, align 2
 ; CHECK-NEXT:   call void @llvm.x86.ldtilecfg(i8* %{{[0-9]+}})
 ; CHECK-NEXT:   %{{[0-9]+}} = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf2, i64 0, i64 0), i64 32)
 ; CHECK-NEXT:   call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col, i8* %{{[0-9]+}}, i64 64, x86_amx %{{[0-9]+}})
 ; CHECK-NEXT:   br label %if.end
-; CHECK:   if.end:                                           ; preds = %if.else, %if.then
+; CHECK:     if.end:                                           ; preds = %if.else, %if.then
 ; CHECK-NEXT:   %{{[0-9]+}} = bitcast <16 x i32>* %{{[0-9]+}} to i8*
 ; CHECK-NEXT:   store <16 x i32> zeroinitializer, <16 x i32>* %{{[0-9]+}}, align 4
 ; CHECK-NEXT:   %{{[0-9]+}} = getelementptr i8, i8* %{{[0-9]+}}, i64 0
-; CHECK-NEXT:   store volatile i8 1, i8* %{{[0-9]+}}, align 1
+; CHECK-NEXT:   store i8 1, i8* %{{[0-9]+}}, align 1
 ; CHECK-NEXT:   %amx.tmm.0.shape.row{{.*}} = getelementptr i8, i8* %{{[0-9]+}}, i64 48
 ; CHECK-NEXT:   %{{[0-9]+}} = getelementptr i8, i8* %{{[0-9]+}}, i64 16
 ; CHECK-NEXT:   %amx.tmm.0.shape.col{{.*}} = bitcast i8* %{{[0-9]+}} to i16*
 ; CHECK-NEXT:   %{{[0-9]+}} = trunc i16 %row to i8
-; CHECK-NEXT:   store volatile i8 %{{[0-9]+}}, i8* %amx.tmm.0.shape.row{{.*}}, align 1
-; CHECK-NEXT:   store volatile i16 %col, i16* %amx.tmm.0.shape.col{{.*}}, align 2
+; CHECK-NEXT:   store i8 %{{[0-9]+}}, i8* %amx.tmm.0.shape.row{{.*}}, align 1
+; CHECK-NEXT:   store i16 %col, i16* %amx.tmm.0.shape.col{{.*}}, align 2
 ; CHECK-NEXT:   %amx.tmm.1.shape.row{{.*}} = getelementptr i8, i8* %{{[0-9]+}}, i64 49
 ; CHECK-NEXT:   %{{[0-9]+}} = getelementptr i8, i8* %{{[0-9]+}}, i64 18
 ; CHECK-NEXT:   %amx.tmm.1.shape.col{{.*}} = bitcast i8* %{{[0-9]+}} to i16*
 ; CHECK-NEXT:   %{{[0-9]+}} = trunc i16 %row to i8
-; CHECK-NEXT:   store volatile i8 %{{[0-9]+}}, i8* %amx.tmm.1.shape.row{{.*}}, align 1
-; CHECK-NEXT:   store volatile i16 8, i16* %amx.tmm.1.shape.col{{.*}}, align 2
+; CHECK-NEXT:   store i8 %{{[0-9]+}}, i8* %amx.tmm.1.shape.row{{.*}}, align 1
+; CHECK-NEXT:   store i16 8, i16* %amx.tmm.1.shape.col{{.*}}, align 2
 ; CHECK-NEXT:   %amx.tmm.2.shape.row{{.*}} = getelementptr i8, i8* %{{[0-9]+}}, i64 50
 ; CHECK-NEXT:   %{{[0-9]+}} = getelementptr i8, i8* %{{[0-9]+}}, i64 20
 ; CHECK-NEXT:   %amx.tmm.2.shape.col{{.*}} = bitcast i8* %{{[0-9]+}} to i16*
 ; CHECK-NEXT:   %{{[0-9]+}} = trunc i16 8 to i8
-; CHECK-NEXT:   store volatile i8 %{{[0-9]+}}, i8* %amx.tmm.2.shape.row{{.*}}, align 1
-; CHECK-NEXT:   store volatile i16 %col, i16* %amx.tmm.2.shape.col{{.*}}, align 2
+; CHECK-NEXT:   store i8 %{{[0-9]+}}, i8* %amx.tmm.2.shape.row{{.*}}, align 1
+; CHECK-NEXT:   store i16 %col, i16* %amx.tmm.2.shape.col{{.*}}, align 2
 ; CHECK-NEXT:   %amx.tmm.3.shape.row{{.*}} = getelementptr i8, i8* %{{[0-9]+}}, i64 51
 ; CHECK-NEXT:   %{{[0-9]+}} = getelementptr i8, i8* %{{[0-9]+}}, i64 22
 ; CHECK-NEXT:   %amx.tmm.3.shape.col{{.*}} = bitcast i8* %{{[0-9]+}} to i16*
 ; CHECK-NEXT:   %{{[0-9]+}} = trunc i16 %row to i8
-; CHECK-NEXT:   store volatile i8 %{{[0-9]+}}, i8* %amx.tmm.3.shape.row{{.*}}, align 1
-; CHECK-NEXT:   store volatile i16 %col, i16* %amx.tmm.3.shape.col{{.*}}, align 2
+; CHECK-NEXT:   store i8 %{{[0-9]+}}, i8* %amx.tmm.3.shape.row{{.*}}, align 1
+; CHECK-NEXT:   store i16 %col, i16* %amx.tmm.3.shape.col{{.*}}, align 2
 ; CHECK-NEXT:   call void @llvm.x86.ldtilecfg(i8* %{{[0-9]+}})
 ; CHECK-NEXT:   %{{[0-9]+}} = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 8, i8* %{{[0-9]+}}, i64 64)
 ; CHECK-NEXT:   %{{[0-9]+}} = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 %col, i8* %{{[0-9]+}}, i64 64)
@@ -145,13 +147,13 @@ define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) l
 ; CHECK-NEXT:   %{{[0-9]+}} = bitcast <16 x i32>* %{{[0-9]+}} to i8*
 ; CHECK-NEXT:   store <16 x i32> zeroinitializer, <16 x i32>* %{{[0-9]+}}, align 4
 ; CHECK-NEXT:   %{{[0-9]+}} = getelementptr i8, i8* %{{[0-9]+}}, i64 0
-; CHECK-NEXT:   store volatile i8 1, i8* %{{[0-9]+}}, align 1
+; CHECK-NEXT:   store i8 1, i8* %{{[0-9]+}}, align 1
 ; CHECK-NEXT:   %amx.tmm.0.shape.row{{.*}} = getelementptr i8, i8* %{{[0-9]+}}, i64 48
 ; CHECK-NEXT:   %{{[0-9]+}} = getelementptr i8, i8* %{{[0-9]+}}, i64 16
 ; CHECK-NEXT:   %amx.tmm.0.shape.col{{.*}} = bitcast i8* %{{[0-9]+}} to i16*
 ; CHECK-NEXT:   %{{[0-9]+}} = trunc i16 %row to i8
-; CHECK-NEXT:   store volatile i8 %{{[0-9]+}}, i8* %amx.tmm.0.shape.row{{.*}}, align 1
-; CHECK-NEXT:   store volatile i16 %col, i16* %amx.tmm.0.shape.col{{.*}}, align 2
+; CHECK-NEXT:   store i8 %{{[0-9]+}}, i8* %amx.tmm.0.shape.row{{.*}}, align 1
+; CHECK-NEXT:   store i16 %col, i16* %amx.tmm.0.shape.col{{.*}}, align 2
 ; CHECK-NEXT:   call void @llvm.x86.ldtilecfg(i8* %{{[0-9]+}})
 ; CHECK-NEXT:   %{{[0-9]+}} = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, i8* %{{[0-9]+}}, i64 64)
 ; CHECK-NEXT:   tail call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32, x86_amx %{{[0-9]+}})


        


More information about the llvm-commits mailing list