[llvm] fcacc41 - [X86] Replace X32 test check prefix with X86

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 5 06:41:57 PDT 2023


Author: Simon Pilgrim
Date: 2023-06-05T14:41:40+01:00
New Revision: fcacc41a22741735d470ca2ced33be2339cb39c2

URL: https://github.com/llvm/llvm-project/commit/fcacc41a22741735d470ca2ced33be2339cb39c2
DIFF: https://github.com/llvm/llvm-project/commit/fcacc41a22741735d470ca2ced33be2339cb39c2.diff

LOG: [X86] Replace X32 test check prefix with X86

We try to only use X32 for gnux32 triple test cases

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/known-bits.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/known-bits.ll b/llvm/test/CodeGen/X86/known-bits.ll
index 31ca3256e6077..9741f6f0a5e2d 100644
--- a/llvm/test/CodeGen/X86/known-bits.ll
+++ b/llvm/test/CodeGen/X86/known-bits.ll
@@ -1,32 +1,32 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X64
 
 define void @knownbits_zext_in_reg(ptr) nounwind {
-; X32-LABEL: knownbits_zext_in_reg:
-; X32:       # %bb.0: # %BB
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movzbl (%eax), %ecx
-; X32-NEXT:    imull $101, %ecx, %eax
-; X32-NEXT:    shrl $14, %eax
-; X32-NEXT:    imull $177, %ecx, %edx
-; X32-NEXT:    shrl $14, %edx
-; X32-NEXT:    movzbl %al, %ecx
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    .p2align 4, 0x90
-; X32-NEXT:  .LBB0_1: # %CF
-; X32-NEXT:    # =>This Loop Header: Depth=1
-; X32-NEXT:    # Child Loop BB0_2 Depth 2
-; X32-NEXT:    movl %ecx, %eax
-; X32-NEXT:    divb %dl
-; X32-NEXT:    .p2align 4, 0x90
-; X32-NEXT:  .LBB0_2: # %CF237
-; X32-NEXT:    # Parent Loop BB0_1 Depth=1
-; X32-NEXT:    # => This Inner Loop Header: Depth=2
-; X32-NEXT:    testb %bl, %bl
-; X32-NEXT:    jne .LBB0_2
-; X32-NEXT:    jmp .LBB0_1
+; X86-LABEL: knownbits_zext_in_reg:
+; X86:       # %bb.0: # %BB
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movzbl (%eax), %ecx
+; X86-NEXT:    imull $101, %ecx, %eax
+; X86-NEXT:    shrl $14, %eax
+; X86-NEXT:    imull $177, %ecx, %edx
+; X86-NEXT:    shrl $14, %edx
+; X86-NEXT:    movzbl %al, %ecx
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:  .LBB0_1: # %CF
+; X86-NEXT:    # =>This Loop Header: Depth=1
+; X86-NEXT:    # Child Loop BB0_2 Depth 2
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    divb %dl
+; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:  .LBB0_2: # %CF237
+; X86-NEXT:    # Parent Loop BB0_1 Depth=1
+; X86-NEXT:    # => This Inner Loop Header: Depth=2
+; X86-NEXT:    testb %bl, %bl
+; X86-NEXT:    jne .LBB0_2
+; X86-NEXT:    jmp .LBB0_1
 ;
 ; X64-LABEL: knownbits_zext_in_reg:
 ; X64:       # %bb.0: # %BB
@@ -74,15 +74,10 @@ CF246:                                            ; preds = %CF237
 }
 
 define i32 @knownbits_mask_add_lshr(i32 %a0, i32 %a1) nounwind {
-; X32-LABEL: knownbits_mask_add_lshr:
-; X32:       # %bb.0:
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    retl
-;
-; X64-LABEL: knownbits_mask_add_lshr:
-; X64:       # %bb.0:
-; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    retq
+; CHECK-LABEL: knownbits_mask_add_lshr:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = and i32 %a0, 32767
   %2 = and i32 %a1, 32766
   %3 = add i32 %1, %2
@@ -91,29 +86,29 @@ define i32 @knownbits_mask_add_lshr(i32 %a0, i32 %a1) nounwind {
 }
 
 define i128 @knownbits_mask_addc_shl(i64 %a0, i64 %a1, i64 %a2) nounwind {
-; X32-LABEL: knownbits_mask_addc_shl:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %edi
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    movl $-1024, %esi # imm = 0xFC00
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT:    andl %esi, %edi
-; X32-NEXT:    andl {{[0-9]+}}(%esp), %esi
-; X32-NEXT:    addl %edi, %esi
-; X32-NEXT:    adcl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    adcl $0, %ecx
-; X32-NEXT:    shldl $22, %edx, %ecx
-; X32-NEXT:    shldl $22, %esi, %edx
-; X32-NEXT:    movl %edx, 8(%eax)
-; X32-NEXT:    movl %ecx, 12(%eax)
-; X32-NEXT:    movl $0, 4(%eax)
-; X32-NEXT:    movl $0, (%eax)
-; X32-NEXT:    popl %esi
-; X32-NEXT:    popl %edi
-; X32-NEXT:    retl $4
+; X86-LABEL: knownbits_mask_addc_shl:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl $-1024, %esi # imm = 0xFC00
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    andl %esi, %edi
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    addl %edi, %esi
+; X86-NEXT:    adcl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    adcl $0, %ecx
+; X86-NEXT:    shldl $22, %edx, %ecx
+; X86-NEXT:    shldl $22, %esi, %edx
+; X86-NEXT:    movl %edx, 8(%eax)
+; X86-NEXT:    movl %ecx, 12(%eax)
+; X86-NEXT:    movl $0, 4(%eax)
+; X86-NEXT:    movl $0, (%eax)
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    retl $4
 ;
 ; X64-LABEL: knownbits_mask_addc_shl:
 ; X64:       # %bb.0:
@@ -137,15 +132,15 @@ define i128 @knownbits_mask_addc_shl(i64 %a0, i64 %a1, i64 %a2) nounwind {
 }
 
 define {i32, i1} @knownbits_uaddo_saddo(i64 %a0, i64 %a1) nounwind {
-; X32-LABEL: knownbits_uaddo_saddo:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    setb %al
-; X32-NEXT:    seto %dl
-; X32-NEXT:    orb %al, %dl
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    retl
+; X86-LABEL: knownbits_uaddo_saddo:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    setb %al
+; X86-NEXT:    seto %dl
+; X86-NEXT:    orb %al, %dl
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_uaddo_saddo:
 ; X64:       # %bb.0:
@@ -174,15 +169,15 @@ define {i32, i1} @knownbits_uaddo_saddo(i64 %a0, i64 %a1) nounwind {
 }
 
 define {i32, i1} @knownbits_usubo_ssubo(i64 %a0, i64 %a1) nounwind {
-; X32-LABEL: knownbits_usubo_ssubo:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    setb %al
-; X32-NEXT:    seto %dl
-; X32-NEXT:    orb %al, %dl
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    retl
+; X86-LABEL: knownbits_usubo_ssubo:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    setb %al
+; X86-NEXT:    seto %dl
+; X86-NEXT:    orb %al, %dl
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: knownbits_usubo_ssubo:
 ; X64:       # %bb.0:
@@ -216,30 +211,20 @@ declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
 declare {i64, i1} @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
 
 define i32 @knownbits_fshl(i32 %a0) nounwind {
-; X32-LABEL: knownbits_fshl:
-; X32:       # %bb.0:
-; X32-NEXT:    movl $3, %eax
-; X32-NEXT:    retl
-;
-; X64-LABEL: knownbits_fshl:
-; X64:       # %bb.0:
-; X64-NEXT:    movl $3, %eax
-; X64-NEXT:    retq
+; CHECK-LABEL: knownbits_fshl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl $3, %eax
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = tail call i32 @llvm.fshl.i32(i32 %a0, i32 -1, i32 5)
   %2 = and i32 %1, 3
   ret i32 %2
 }
 
 define i32 @knownbits_fshr(i32 %a0) nounwind {
-; X32-LABEL: knownbits_fshr:
-; X32:       # %bb.0:
-; X32-NEXT:    movl $3, %eax
-; X32-NEXT:    retl
-;
-; X64-LABEL: knownbits_fshr:
-; X64:       # %bb.0:
-; X64-NEXT:    movl $3, %eax
-; X64-NEXT:    retq
+; CHECK-LABEL: knownbits_fshr:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl $3, %eax
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = tail call i32 @llvm.fshr.i32(i32 %a0, i32 -1, i32 5)
   %2 = and i32 %1, 3
   ret i32 %2


        


More information about the llvm-commits mailing list