[llvm] dd04e18 - [X86]fold-pcmpeqd-2.ll - replace X32 check prefix with X86

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 5 04:40:36 PST 2023


Author: Simon Pilgrim
Date: 2023-12-05T12:34:55Z
New Revision: dd04e183045f9fb2fb2322df4d623b60c0942850

URL: https://github.com/llvm/llvm-project/commit/dd04e183045f9fb2fb2322df4d623b60c0942850
DIFF: https://github.com/llvm/llvm-project/commit/dd04e183045f9fb2fb2322df4d623b60c0942850.diff

LOG: [X86]fold-pcmpeqd-2.ll - replace X32 check prefix with X86

We use X32 for gnux32 triples - X86 should be used for 32-bit triples

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/fold-pcmpeqd-2.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/fold-pcmpeqd-2.ll b/llvm/test/CodeGen/X86/fold-pcmpeqd-2.ll
index 88425ea87845d..5dcb1d63207d1 100644
--- a/llvm/test/CodeGen/X86/fold-pcmpeqd-2.ll
+++ b/llvm/test/CodeGen/X86/fold-pcmpeqd-2.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=yonah -regalloc=basic | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=yonah -regalloc=basic | FileCheck %s --check-prefix=X86
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -regalloc=basic | FileCheck %s --check-prefix=X64
 
 ; This testcase should need to spill the -1 value on both x86-32 and x86-64,
@@ -15,87 +15,87 @@
 	%struct._image2d_t = type <{ ptr, %struct._cl_image_format_t, i32, i32, i32, i32, i32, i32 }>
 
 define void @program_1(ptr %dest, ptr %t0, <4 x float> %p0, <4 x float> %p1, <4 x float> %p4, <4 x float> %p5, <4 x float> %p6) nounwind {
-; X32-LABEL: program_1:
-; X32:       ## %bb.0: ## %entry
-; X32-NEXT:    cmpl $0, 0
-; X32-NEXT:    jle LBB0_2
-; X32-NEXT:  ## %bb.1: ## %forcond
-; X32-NEXT:    cmpl $0, 0
-; X32-NEXT:    jg LBB0_3
-; X32-NEXT:  LBB0_2: ## %ifthen
-; X32-NEXT:    retl
-; X32-NEXT:  LBB0_3: ## %forbody
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    subl $88, %esp
-; X32-NEXT:    movaps {{.*#+}} xmm1 = [1.28E+2,1.28E+2,1.28E+2,1.28E+2]
-; X32-NEXT:    minps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
-; X32-NEXT:    cvttps2dq %xmm1, %xmm0
-; X32-NEXT:    cvtdq2ps %xmm0, %xmm0
-; X32-NEXT:    subps %xmm0, %xmm1
-; X32-NEXT:    movaps %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
-; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
-; X32-NEXT:    mulps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
-; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
-; X32-NEXT:    addps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
-; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
-; X32-NEXT:    mulps %xmm1, %xmm0
-; X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
-; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
-; X32-NEXT:    addps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
-; X32-NEXT:    movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
-; X32-NEXT:    psubd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
-; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
-; X32-NEXT:    mulps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
-; X32-NEXT:    xorps %xmm0, %xmm0
-; X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
-; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
-; X32-NEXT:    mulps %xmm0, %xmm0
-; X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
-; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
-; X32-NEXT:    mulps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
-; X32-NEXT:    xorps %xmm0, %xmm0
-; X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
-; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
-; X32-NEXT:    cmpunordps %xmm0, %xmm0
-; X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
-; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
-; X32-NEXT:    minps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
-; X32-NEXT:    xorps %xmm0, %xmm0
-; X32-NEXT:    movaps %xmm0, {{[0-9]+}}(%esp)
-; X32-NEXT:    movl $0, (%esp)
-; X32-NEXT:    xorl %esi, %esi
-; X32-NEXT:    xorps %xmm3, %xmm3
-; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
-; X32-NEXT:    movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 ## 16-byte Reload
-; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 ## 16-byte Reload
-; X32-NEXT:    calll *%esi
-; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
-; X32-NEXT:    minps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
-; X32-NEXT:    pxor %xmm1, %xmm1
-; X32-NEXT:    psubd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 ## 16-byte Folded Reload
-; X32-NEXT:    movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
-; X32-NEXT:    psubd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
-; X32-NEXT:    movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
-; X32-NEXT:    por %xmm1, %xmm0
-; X32-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
-; X32-NEXT:    pxor %xmm0, %xmm0
-; X32-NEXT:    movdqa %xmm0, {{[0-9]+}}(%esp)
-; X32-NEXT:    movl $0, (%esp)
-; X32-NEXT:    xorps %xmm3, %xmm3
-; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
-; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 ## 16-byte Reload
-; X32-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 ## 16-byte Reload
-; X32-NEXT:    calll *%esi
-; X32-NEXT:    ud2
+; X86-LABEL: program_1:
+; X86:       ## %bb.0: ## %entry
+; X86-NEXT:    cmpl $0, 0
+; X86-NEXT:    jle LBB0_2
+; X86-NEXT:  ## %bb.1: ## %forcond
+; X86-NEXT:    cmpl $0, 0
+; X86-NEXT:    jg LBB0_3
+; X86-NEXT:  LBB0_2: ## %ifthen
+; X86-NEXT:    retl
+; X86-NEXT:  LBB0_3: ## %forbody
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    subl $88, %esp
+; X86-NEXT:    movaps {{.*#+}} xmm1 = [1.28E+2,1.28E+2,1.28E+2,1.28E+2]
+; X86-NEXT:    minps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-NEXT:    cvttps2dq %xmm1, %xmm0
+; X86-NEXT:    cvtdq2ps %xmm0, %xmm0
+; X86-NEXT:    subps %xmm0, %xmm1
+; X86-NEXT:    movaps %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
+; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
+; X86-NEXT:    mulps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
+; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
+; X86-NEXT:    addps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
+; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
+; X86-NEXT:    mulps %xmm1, %xmm0
+; X86-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
+; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
+; X86-NEXT:    addps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
+; X86-NEXT:    movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
+; X86-NEXT:    psubd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
+; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
+; X86-NEXT:    mulps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
+; X86-NEXT:    xorps %xmm0, %xmm0
+; X86-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
+; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
+; X86-NEXT:    mulps %xmm0, %xmm0
+; X86-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
+; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
+; X86-NEXT:    mulps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
+; X86-NEXT:    xorps %xmm0, %xmm0
+; X86-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
+; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
+; X86-NEXT:    cmpunordps %xmm0, %xmm0
+; X86-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
+; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
+; X86-NEXT:    minps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
+; X86-NEXT:    xorps %xmm0, %xmm0
+; X86-NEXT:    movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl $0, (%esp)
+; X86-NEXT:    xorl %esi, %esi
+; X86-NEXT:    xorps %xmm3, %xmm3
+; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
+; X86-NEXT:    movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 ## 16-byte Reload
+; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 ## 16-byte Reload
+; X86-NEXT:    calll *%esi
+; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
+; X86-NEXT:    minps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
+; X86-NEXT:    pxor %xmm1, %xmm1
+; X86-NEXT:    psubd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 ## 16-byte Folded Reload
+; X86-NEXT:    movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
+; X86-NEXT:    psubd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
+; X86-NEXT:    movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
+; X86-NEXT:    por %xmm1, %xmm0
+; X86-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
+; X86-NEXT:    pxor %xmm0, %xmm0
+; X86-NEXT:    movdqa %xmm0, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl $0, (%esp)
+; X86-NEXT:    xorps %xmm3, %xmm3
+; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
+; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 ## 16-byte Reload
+; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 ## 16-byte Reload
+; X86-NEXT:    calll *%esi
+; X86-NEXT:    ud2
 ;
 ; X64-LABEL: program_1:
 ; X64:       ## %bb.0: ## %entry


        


More information about the llvm-commits mailing list