[llvm] a8913f8 - [X86] Regenerate pr38539.ll

Hongtao Yu via llvm-commits llvm-commits at lists.llvm.org
Wed Oct 25 14:48:40 PDT 2023


Hi Simon,

The test now has hard-coded stack offsets such as ` subl $176, %esp` that fails our downstream testing. It may also not be immune to future codegen changes. Would it make sense to make them symbolic?

Thanks,
Hongtao


From: llvm-commits <llvm-commits-bounces at lists.llvm.org> on behalf of Simon Pilgrim via llvm-commits <llvm-commits at lists.llvm.org>
Date: Wednesday, October 25, 2023 at 5:25 AM
To: llvm-commits at lists.llvm.org <llvm-commits at lists.llvm.org>
Subject: [llvm] a8913f8 - [X86] Regenerate pr38539.ll
!-------------------------------------------------------------------|
  This Message Is From an External Sender

|-------------------------------------------------------------------!


Author: Simon Pilgrim
Date: 2023-10-25T13:25:15+01:00
New Revision: a8913f8e04bea5ab446d1ccce681657013fc0fa9

URL: https://github.com/llvm/llvm-project/commit/a8913f8e04bea5ab446d1ccce681657013fc0fa9
DIFF: https://github.com/llvm/llvm-project/commit/a8913f8e04bea5ab446d1ccce681657013fc0fa9.diff

LOG: [X86] Regenerate pr38539.ll

Even though we're only interested in the X64 codegen for the first test, its much easier to maintain if we just let the update script generate the codegen checks for X86 as well.

Added:


Modified:
    llvm/test/CodeGen/X86/pr38539.ll

Removed:



################################################################################
diff  --git a/llvm/test/CodeGen/X86/pr38539.ll b/llvm/test/CodeGen/X86/pr38539.ll
index 97f5985cf9092b9..04aff9b7d2e5863 100644
--- a/llvm/test/CodeGen/X86/pr38539.ll
+++ b/llvm/test/CodeGen/X86/pr38539.ll
@@ -3,7 +3,7 @@
 ; RUN: llc < %s -mtriple=i686-unknown -verify-machineinstrs | FileCheck %s --check-prefix=X86

 ; This test is targeted at 64-bit mode. It used to crash due to the creation of an EXTRACT_SUBREG after the peephole pass had ran.
-define void @f() {
+define void @f() nounwind {
 ; X64-LABEL: f:
 ; X64:       # %bb.0: # %BB
 ; X64-NEXT:    movzbl (%rax), %eax
@@ -13,6 +13,242 @@ define void @f() {
 ; X64-NEXT:    movq %rax, (%rax)
 ; X64-NEXT:    movb $0, (%rax)
 ; X64-NEXT:    retq
+;
+; X86-LABEL: f:
+; X86:       # %bb.0: # %BB_udiv-special-cases
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    andl $-16, %esp
+; X86-NEXT:    subl $176, %esp
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movzbl (%eax), %eax
+; X86-NEXT:    movzbl (%eax), %ecx
+; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    divb %cl
+; X86-NEXT:    movl %edx, %eax
+; X86-NEXT:    shll $30, %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    sarl $30, %ecx
+; X86-NEXT:    sarl $31, %eax
+; X86-NEXT:    shrdl $1, %eax, %ecx
+; X86-NEXT:    xorl %eax, %edx
+; X86-NEXT:    xorl %eax, %edi
+; X86-NEXT:    xorl %ecx, %esi
+; X86-NEXT:    subl %ecx, %esi
+; X86-NEXT:    sbbl %eax, %edi
+; X86-NEXT:    sbbl %eax, %edx
+; X86-NEXT:    andl $3, %edx
+; X86-NEXT:    testl %edi, %edi
+; X86-NEXT:    jne .LBB0_1
+; X86-NEXT:  # %bb.2: # %BB_udiv-special-cases
+; X86-NEXT:    bsrl %esi, %ecx
+; X86-NEXT:    xorl $31, %ecx
+; X86-NEXT:    addl $32, %ecx
+; X86-NEXT:    jmp .LBB0_3
+; X86-NEXT:  .LBB0_1:
+; X86-NEXT:    bsrl %edi, %ecx
+; X86-NEXT:    xorl $31, %ecx
+; X86-NEXT:  .LBB0_3: # %BB_udiv-special-cases
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    testl %edx, %edx
+; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    jne .LBB0_4
+; X86-NEXT:  # %bb.5: # %BB_udiv-special-cases
+; X86-NEXT:    addl $64, %ecx
+; X86-NEXT:    jmp .LBB0_6
+; X86-NEXT:  .LBB0_4:
+; X86-NEXT:    bsrl %edx, %ecx
+; X86-NEXT:    xorl $31, %ecx
+; X86-NEXT:    addl $32, %ecx
+; X86-NEXT:  .LBB0_6: # %BB_udiv-special-cases
+; X86-NEXT:    subl $62, %ecx
+; X86-NEXT:    movl $0, %ebx
+; X86-NEXT:    sbbl %ebx, %ebx
+; X86-NEXT:    sbbl %eax, %eax
+; X86-NEXT:    addl $-66, %ecx
+; X86-NEXT:    adcl $-1, %ebx
+; X86-NEXT:    adcl $3, %eax
+; X86-NEXT:    movl %eax, %edi
+; X86-NEXT:    movb $1, %al
+; X86-NEXT:    testb %al, %al
+; X86-NEXT:    jne .LBB0_11
+; X86-NEXT:  # %bb.7: # %BB_udiv-special-cases
+; X86-NEXT:    andl $3, %edi
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    xorl $65, %eax
+; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    orl %edi, %eax
+; X86-NEXT:    orl %ebx, %eax
+; X86-NEXT:    je .LBB0_11
+; X86-NEXT:  # %bb.8: # %udiv-bb1
+; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    addl $1, %ecx
+; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    adcl $0, %ebx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NEXT:    adcl $0, %esi
+; X86-NEXT:    andl $3, %esi
+; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movb $65, %cl
+; X86-NEXT:    subb %al, %cl
+; X86-NEXT:    movb %cl, %ch
+; X86-NEXT:    andb $7, %ch
+; X86-NEXT:    shrb $3, %cl
+; X86-NEXT:    andb $15, %cl
+; X86-NEXT:    negb %cl
+; X86-NEXT:    movsbl %cl, %eax
+; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl 136(%esp,%eax), %edx
+; X86-NEXT:    movb %ch, %cl
+; X86-NEXT:    shll %cl, %edx
+; X86-NEXT:    notb %cl
+; X86-NEXT:    movl 128(%esp,%eax), %edi
+; X86-NEXT:    movl 132(%esp,%eax), %esi
+; X86-NEXT:    movl %esi, %eax
+; X86-NEXT:    shrl %eax
+; X86-NEXT:    shrl %cl, %eax
+; X86-NEXT:    movb %ch, %cl
+; X86-NEXT:    shldl %cl, %edi, %esi
+; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    shll %cl, %edi
+; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X86-NEXT:    orl %ebx, %ecx
+; X86-NEXT:    je .LBB0_11
+; X86-NEXT:  # %bb.9: # %udiv-preheader
+; X86-NEXT:    orl %eax, %edx
+; X86-NEXT:    andl $3, %edx
+; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT:    movb %al, %ch
+; X86-NEXT:    andb $7, %ch
+; X86-NEXT:    # kill: def $al killed $al killed $eax
+; X86-NEXT:    shrb $3, %al
+; X86-NEXT:    andb $15, %al
+; X86-NEXT:    movzbl %al, %esi
+; X86-NEXT:    movl 80(%esp,%esi), %edx
+; X86-NEXT:    movl 84(%esp,%esi), %eax
+; X86-NEXT:    movl %eax, %edi
+; X86-NEXT:    movb %ch, %cl
+; X86-NEXT:    shrl %cl, %edi
+; X86-NEXT:    notb %cl
+; X86-NEXT:    movl 88(%esp,%esi), %esi
+; X86-NEXT:    addl %esi, %esi
+; X86-NEXT:    shll %cl, %esi
+; X86-NEXT:    orl %edi, %esi
+; X86-NEXT:    movb %ch, %cl
+; X86-NEXT:    shrdl %cl, %eax, %edx
+; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT:    addl $-1, %eax
+; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT:    adcl $-1, %eax
+; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT:    adcl $3, %eax
+; X86-NEXT:    andl $3, %eax
+; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    xorl %ecx, %ecx
+; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:  .LBB0_10: # %udiv-do-while
+; X86-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    shldl $1, %esi, %ecx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT:    shldl $1, %edx, %esi
+; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT:    movl %ebx, %eax
+; X86-NEXT:    andl $2, %eax
+; X86-NEXT:    shrl %eax
+; X86-NEXT:    leal (%eax,%edx,2), %edx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-NEXT:    shldl $1, %edi, %ebx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NEXT:    orl %esi, %ebx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT:    shldl $1, %eax, %edi
+; X86-NEXT:    orl %esi, %edi
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    addl %eax, %eax
+; X86-NEXT:    orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    andl $3, %ebx
+; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    cmpl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT:    sbbl %esi, %eax
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT:    sbbl %ecx, %ebx
+; X86-NEXT:    shll $30, %ebx
+; X86-NEXT:    movl %ebx, %eax
+; X86-NEXT:    sarl $30, %eax
+; X86-NEXT:    sarl $31, %ebx
+; X86-NEXT:    shrdl $1, %ebx, %eax
+; X86-NEXT:    movl %eax, %edi
+; X86-NEXT:    andl $1, %edi
+; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-NEXT:    movl %ebx, %edi
+; X86-NEXT:    andl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X86-NEXT:    andl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X86-NEXT:    subl %eax, %edx
+; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    sbbl %ebx, %esi
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT:    sbbl %edi, %ecx
+; X86-NEXT:    andl $3, %ecx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT:    addl $-1, %eax
+; X86-NEXT:    adcl $-1, %ebx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-NEXT:    adcl $3, %edi
+; X86-NEXT:    andl $3, %edi
+; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    orl %edi, %eax
+; X86-NEXT:    orl %ebx, %eax
+; X86-NEXT:    jne .LBB0_10
+; X86-NEXT:  .LBB0_11: # %udiv-end
+; X86-NEXT:    cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
+; X86-NEXT:    setne (%eax)
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl %eax, (%eax)
+; X86-NEXT:    movb $0, (%eax)
+; X86-NEXT:    leal -12(%ebp), %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 BB:
   %A30 = alloca i66
   %L17 = load i66, ptr %A30
@@ -38,7 +274,7 @@ BB:
 }

 ; Similar to above, but bitwidth adjusted to target 32-bit mode. This also shows that we didn't constrain the register class when extracting a subreg.
-define void @g() {
+define void @g() nounwind {
 ; X64-LABEL: g:
 ; X64:       # %bb.0: # %BB
 ; X64-NEXT:    movzbl (%rax), %eax
@@ -52,10 +288,7 @@ define void @g() {
 ; X86-LABEL: g:
 ; X86:       # %bb.0: # %BB
 ; X86-NEXT:    pushl %ebp
-; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    .cfi_offset %ebp, -8
 ; X86-NEXT:    movl %esp, %ebp
-; X86-NEXT:    .cfi_def_cfa_register %ebp
 ; X86-NEXT:    andl $-8, %esp
 ; X86-NEXT:    subl $8, %esp
 ; X86-NEXT:    movzbl (%eax), %eax
@@ -66,7 +299,6 @@ define void @g() {
 ; X86-NEXT:    movb $0, (%eax)
 ; X86-NEXT:    movl %ebp, %esp
 ; X86-NEXT:    popl %ebp
-; X86-NEXT:    .cfi_def_cfa %esp, 4
 ; X86-NEXT:    retl
 BB:
   %A30 = alloca i34



_______________________________________________
llvm-commits mailing list
llvm-commits at lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits<https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20231025/29f708c0/attachment.html>


More information about the llvm-commits mailing list