[llvm] b576a6b - [X86][AMX] Fix a bug after #83628 (#91207)

via llvm-commits llvm-commits at lists.llvm.org
Wed May 15 08:15:52 PDT 2024


Author: Phoebe Wang
Date: 2024-05-15T23:15:48+08:00
New Revision: b576a6b0452b9bfb634feaa215506d8a1afe857d

URL: https://github.com/llvm/llvm-project/commit/b576a6b0452b9bfb634feaa215506d8a1afe857d
DIFF: https://github.com/llvm/llvm-project/commit/b576a6b0452b9bfb634feaa215506d8a1afe857d.diff

LOG: [X86][AMX] Fix a bug after #83628 (#91207)

We need to check if `GR64Cand` a valid register before using it.

Test is not needed since it's covered in llvm-test-suite.

Fixes #90954

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86LowerTileCopy.cpp
    llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86LowerTileCopy.cpp b/llvm/lib/Target/X86/X86LowerTileCopy.cpp
index fd05e16ac1cef..60c024556ff13 100644
--- a/llvm/lib/Target/X86/X86LowerTileCopy.cpp
+++ b/llvm/lib/Target/X86/X86LowerTileCopy.cpp
@@ -146,7 +146,7 @@ bool X86LowerTileCopy::runOnMachineFunction(MachineFunction &MF) {
           addFrameReference(BuildMI(MBB, MI, DL, TII->get(Opc)), TileSS)
               .addReg(SrcReg, getKillRegState(SrcMO.isKill()));
       MachineOperand &MO = NewMI->getOperand(2);
-      MO.setReg(GR64Cand);
+      MO.setReg(GR64Cand ? GR64Cand : X86::RAX);
       MO.setIsKill(true);
       // tileloadd (%sp, %idx), %tmm
       Opc = GET_EGPR_IF_ENABLED(X86::TILELOADD);
@@ -157,7 +157,7 @@ bool X86LowerTileCopy::runOnMachineFunction(MachineFunction &MF) {
         // restore %rax
         // mov (%sp) %rax
         addFrameReference(
-            BuildMI(MBB, MI, DL, TII->get(X86::MOV64rm), GR64Cand), StrideSS);
+            BuildMI(MBB, MI, DL, TII->get(X86::MOV64rm), X86::RAX), StrideSS);
       }
       MI.eraseFromParent();
       Changed = true;

diff  --git a/llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll b/llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll
index 4a9f9d3bf77aa..7511e5953dac1 100644
--- a/llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll
+++ b/llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll
@@ -51,3 +51,156 @@ declare x86_amx @llvm.x86.tdpbusd.internal(i16, i16, i16, x86_amx, x86_amx, x86_
 declare x86_amx @llvm.x86.tdpbuud.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)
 declare x86_amx @llvm.x86.tdpbf16ps.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)
 declare void @llvm.x86.tilestored64.internal(i16, i16, ptr, i64, x86_amx)
+
+define void @PR90954(ptr %0, ptr %1, i32 %2) {
+; CHECK-LABEL: PR90954:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushq %rbp
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset %rbp, -16
+; CHECK-NEXT:    movq %rsp, %rbp
+; CHECK-NEXT:    .cfi_def_cfa_register %rbp
+; CHECK-NEXT:    pushq %r15
+; CHECK-NEXT:    pushq %r14
+; CHECK-NEXT:    pushq %r13
+; CHECK-NEXT:    pushq %r12
+; CHECK-NEXT:    pushq %rbx
+; CHECK-NEXT:    andq $-1024, %rsp # imm = 0xFC00
+; CHECK-NEXT:    subq $5120, %rsp # imm = 0x1400
+; CHECK-NEXT:    .cfi_offset %rbx, -56
+; CHECK-NEXT:    .cfi_offset %r12, -48
+; CHECK-NEXT:    .cfi_offset %r13, -40
+; CHECK-NEXT:    .cfi_offset %r14, -32
+; CHECK-NEXT:    .cfi_offset %r15, -24
+; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; CHECK-NEXT:    vmovups %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movb $1, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movb $16, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movw $64, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movb $16, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movw $64, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movb $16, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movw $64, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    ldtilecfg {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    shll $4, %edx
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    movw $64, %cx
+; CHECK-NEXT:    movw $16, %di
+; CHECK-NEXT:    movb $1, %r8b
+; CHECK-NEXT:    movl $64, %r9d
+; CHECK-NEXT:    leaq {{[0-9]+}}(%rsp), %r10
+; CHECK-NEXT:    leaq {{[0-9]+}}(%rsp), %r11
+; CHECK-NEXT:    xorl %ebx, %ebx
+; CHECK-NEXT:    xorl %r14d, %r14d
+; CHECK-NEXT:    jmp .LBB1_1
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB1_5: # in Loop: Header=BB1_1 Depth=1
+; CHECK-NEXT:    incq %r14
+; CHECK-NEXT:    addl %edx, %ebx
+; CHECK-NEXT:  .LBB1_1: # =>This Loop Header: Depth=1
+; CHECK-NEXT:    # Child Loop BB1_2 Depth 2
+; CHECK-NEXT:    movslq %ebx, %r15
+; CHECK-NEXT:    leaq (%rsi,%r15,4), %r15
+; CHECK-NEXT:    xorl %r12d, %r12d
+; CHECK-NEXT:    xorl %r13d, %r13d
+; CHECK-NEXT:    jmp .LBB1_2
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB1_4: # in Loop: Header=BB1_2 Depth=2
+; CHECK-NEXT:    tilestored %tmm1, (%r15,%rax)
+; CHECK-NEXT:    incq %r13
+; CHECK-NEXT:    addq $64, %r15
+; CHECK-NEXT:    decq %r12
+; CHECK-NEXT:    je .LBB1_5
+; CHECK-NEXT:  .LBB1_2: # Parent Loop BB1_1 Depth=1
+; CHECK-NEXT:    # => This Inner Loop Header: Depth=2
+; CHECK-NEXT:    tilezero %tmm0
+; CHECK-NEXT:    tilezero %tmm1
+; CHECK-NEXT:    testb %r8b, %r8b
+; CHECK-NEXT:    jne .LBB1_4
+; CHECK-NEXT:  # %bb.3: # in Loop: Header=BB1_2 Depth=2
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    tileloadd (%r10,%r9), %tmm1
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %zmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    tileloadd (%r11,%r9), %tmm2
+; CHECK-NEXT:    tdpbf16ps %tmm2, %tmm1, %tmm0
+; CHECK-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT:    movabsq $64, %rax
+; CHECK-NEXT:    tilestored %tmm0, 3072(%rsp,%rax) # 1024-byte Folded Spill
+; CHECK-NEXT:    tileloadd {{[-0-9]+}}(%r{{[sb]}}p), %tmm1 # 1024-byte Folded Reload
+; CHECK-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; CHECK-NEXT:    jmp .LBB1_4
+  %4 = shl i32 %2, 4
+  %5 = icmp eq i64 0, 0
+  br label %6
+
+6:                                                ; preds = %31, %3
+  %7 = phi i64 [ 0, %3 ], [ %32, %31 ]
+  %8 = trunc nuw nsw i64 %7 to i32
+  %9 = mul i32 %4, %8
+  %10 = mul i32 0, %8
+  %11 = sext i32 %9 to i64
+  %12 = getelementptr inbounds i32, ptr %1, i64 %11
+  br label %13
+
+13:                                               ; preds = %25, %6
+  %14 = phi i64 [ %29, %25 ], [ 0, %6 ]
+  %15 = tail call x86_amx @llvm.x86.tilezero.internal(i16 16, i16 64)
+  %16 = tail call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %15)
+  %17 = shl nsw i64 %14, 4
+  %18 = getelementptr i32, ptr %0, i64 %17
+  br i1 %5, label %25, label %19
+
+19:                                               ; preds = %13
+  %20 = tail call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %16)
+  %21 = tail call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> zeroinitializer)
+  %22 = tail call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> zeroinitializer)
+  %23 = tail call x86_amx @llvm.x86.tdpbf16ps.internal(i16 16, i16 64, i16 64, x86_amx %20, x86_amx %21, x86_amx %22)
+  %24 = tail call noundef <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %23)
+  br label %25
+
+25:                                               ; preds = %19, %13
+  %26 = phi <256 x i32> [ undef, %13 ], [ %24, %19 ]
+  %27 = getelementptr inbounds i32, ptr %12, i64 %17
+  %28 = tail call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %26)
+  tail call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr %27, i64 0, x86_amx %28)
+  %29 = add nuw nsw i64 %14, 1
+  %30 = icmp eq i64 %29, 0
+  br i1 %30, label %31, label %13
+
+31:                                               ; preds = %25
+  %32 = add nuw nsw i64 %7, 1
+  br label %6
+}
+
+declare x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32>)
+declare <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx)


        


More information about the llvm-commits mailing list