[llvm-branch-commits] [llvm] c535a7f - [X86] Fix tile spill merge issue.

via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Mon Jan 18 18:57:16 PST 2021


Author: Luo, Yuanke
Date: 2021-01-19T10:51:42+08:00
New Revision: c535a7fdadb4679327ebb1b3b82c73c9ff6a164a

URL: https://github.com/llvm/llvm-project/commit/c535a7fdadb4679327ebb1b3b82c73c9ff6a164a
DIFF: https://github.com/llvm/llvm-project/commit/c535a7fdadb4679327ebb1b3b82c73c9ff6a164a.diff

LOG: [X86] Fix tile spill merge issue.

This is a additional bug fix for c5be0e0cc0. The distance for
the spill instructions is wrong in previous patch.

Differential Revision: https://reviews.llvm.org/D94772

Added: 
    

Modified: 
    llvm/lib/CodeGen/InlineSpiller.cpp
    llvm/test/CodeGen/X86/AMX/amx-spill-merge.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/InlineSpiller.cpp b/llvm/lib/CodeGen/InlineSpiller.cpp
index 1892ca646788..876e1d3f932a 100644
--- a/llvm/lib/CodeGen/InlineSpiller.cpp
+++ b/llvm/lib/CodeGen/InlineSpiller.cpp
@@ -431,7 +431,7 @@ bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
   // If there is only 1 store instruction is required for spill, add it
   // to mergeable list. In X86 AMX, 2 intructions are required to store.
   // We disable the merge for this case.
-  if (std::distance(MIS.begin(), MII) <= 1)
+  if (MIS.begin() == MII)
     HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
   ++NumSpills;
   return true;

diff  --git a/llvm/test/CodeGen/X86/AMX/amx-spill-merge.ll b/llvm/test/CodeGen/X86/AMX/amx-spill-merge.ll
index 6f3303f0049b..6541e3e4ea6d 100644
--- a/llvm/test/CodeGen/X86/AMX/amx-spill-merge.ll
+++ b/llvm/test/CodeGen/X86/AMX/amx-spill-merge.ll
@@ -3,23 +3,14 @@
 
 @buf = dso_local global [3072 x i8] zeroinitializer, align 16
 
-define dso_local void @test_api(i16 signext %0, i16 signext %1) local_unnamed_addr {
+define dso_local void @test_api(i16 signext %0, i16 signext %1) nounwind {
 ; CHECK-LABEL: test_api:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pushq %rbp
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    pushq %r15
-; CHECK-NEXT:    .cfi_def_cfa_offset 24
 ; CHECK-NEXT:    pushq %r14
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    pushq %rbx
-; CHECK-NEXT:    .cfi_def_cfa_offset 40
 ; CHECK-NEXT:    subq $4056, %rsp # imm = 0xFD8
-; CHECK-NEXT:    .cfi_def_cfa_offset 4096
-; CHECK-NEXT:    .cfi_offset %rbx, -40
-; CHECK-NEXT:    .cfi_offset %r14, -32
-; CHECK-NEXT:    .cfi_offset %r15, -24
-; CHECK-NEXT:    .cfi_offset %rbp, -16
 ; CHECK-NEXT:    movl %esi, %ebx
 ; CHECK-NEXT:    movl %edi, %ebp
 ; CHECK-NEXT:    vpxord %zmm0, %zmm0, %zmm0
@@ -92,15 +83,10 @@ define dso_local void @test_api(i16 signext %0, i16 signext %1) local_unnamed_ad
 ; CHECK-NEXT:    movl $buf+2048, %eax
 ; CHECK-NEXT:    tilestored %tmm5, (%rax,%rcx)
 ; CHECK-NEXT:    addq $4056, %rsp # imm = 0xFD8
-; CHECK-NEXT:    .cfi_def_cfa_offset 40
 ; CHECK-NEXT:    popq %rbx
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    popq %r14
-; CHECK-NEXT:    .cfi_def_cfa_offset 24
 ; CHECK-NEXT:    popq %r15
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    popq %rbp
-; CHECK-NEXT:    .cfi_def_cfa_offset 8
 ; CHECK-NEXT:    tilerelease
 ; CHECK-NEXT:    retq
   %c = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %0, i16 %1, i8* getelementptr inbounds ([3072 x i8], [3072 x i8]* @buf, i64 0, i64 2048), i64 32)
@@ -126,8 +112,93 @@ exit:
   ret void
 }
 
-declare dso_local void @foo(...) local_unnamed_addr
+define dso_local void @test3(i8 *%buf) nounwind {
+; CHECK-LABEL: test3:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    pushq %rbp
+; CHECK-NEXT:    pushq %r15
+; CHECK-NEXT:    pushq %r14
+; CHECK-NEXT:    pushq %rbx
+; CHECK-NEXT:    subq $3032, %rsp # imm = 0xBD8
+; CHECK-NEXT:    vpxord %zmm0, %zmm0, %zmm0
+; CHECK-NEXT:    vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movb $1, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movb $8, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movw $8, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movb $8, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movw $8, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movb $8, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movw $8, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    ldtilecfg {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movw $8, %r15w
+; CHECK-NEXT:    tilezero %tmm0
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    testb %al, %al
+; CHECK-NEXT:    jne .LBB1_3
+; CHECK-NEXT:  # %bb.1: # %loop.header.preheader
+; CHECK-NEXT:    movq %rdi, %rbx
+; CHECK-NEXT:    movl $32, %r14d
+; CHECK-NEXT:    xorl %ebp, %ebp
+; CHECK-NEXT:    sttilecfg {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Folded Spill
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB1_2: # %loop.header
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movabsq $64, %rax
+; CHECK-NEXT:    tilestored %tmm0, 1024(%rsp,%rax) # 1024-byte Folded Spill
+; CHECK-NEXT:    tilestored %tmm0, (%rbx,%r14)
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    callq foo
+; CHECK-NEXT:    ldtilecfg {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Folded Reload
+; CHECK-NEXT:    tilezero %tmm0
+; CHECK-NEXT:    tileloadd (%rbx,%r14), %tmm1
+; CHECK-NEXT:    tileloadd (%rbx,%r14), %tmm2
+; CHECK-NEXT:    tdpbssd %tmm2, %tmm1, %tmm0
+; CHECK-NEXT:    tilestored %tmm0, (%rbx,%r14)
+; CHECK-NEXT:    movabsq $64, %rax
+; CHECK-NEXT:    tileloadd 1024(%rsp,%rax), %tmm0 # 1024-byte Folded Reload
+; CHECK-NEXT:    incl %ebp
+; CHECK-NEXT:    cmpw $100, %bp
+; CHECK-NEXT:    jl .LBB1_2
+; CHECK-NEXT:  .LBB1_3: # %exit
+; CHECK-NEXT:    addq $3032, %rsp # imm = 0xBD8
+; CHECK-NEXT:    popq %rbx
+; CHECK-NEXT:    popq %r14
+; CHECK-NEXT:    popq %r15
+; CHECK-NEXT:    popq %rbp
+; CHECK-NEXT:    tilerelease
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+entry:
+  %t5 = tail call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 8)
+  br i1 undef, label %loop.header, label %exit
+
+loop.header:
+  %ivphi = phi i16 [0, %entry], [%iv, %loop.latch]
+  call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %buf, i64 32, x86_amx %t5)
+  call void (...) @foo()
+  br label %loop.body
+
+loop.body:
+  %t1 = tail call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 8)
+  %t2 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 8, i8* %buf, i64 32)
+  %t3 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 8, i8* %buf, i64 32)
+  %t4 = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 8, i16 8, i16 8, x86_amx %t1, x86_amx %t2, x86_amx %t3)
+  tail call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %buf, i64 32, x86_amx %t4)
+  br label %loop.latch
+
+loop.latch:
+  %iv = add i16 %ivphi, 1
+  %c = icmp slt i16 %iv, 100
+  br i1 %c, label %loop.header, label %exit
+
+exit:
+  ret void
+}
+
+declare dso_local void @foo(...) nounwind
 
+declare x86_amx @llvm.x86.tilezero.internal(i16, i16)
 declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, i8*, i64)
 declare x86_amx @llvm.x86.tdpbssd.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)
 declare void @llvm.x86.tilestored64.internal(i16, i16, i8*, i64, x86_amx)


        


More information about the llvm-branch-commits mailing list