[llvm] 8d450b4 - [X86] mmx-arith.ll - replace X32 check prefixes with X86 + strip cfi noise

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 31 03:04:49 PST 2024


Author: Simon Pilgrim
Date: 2024-01-31T11:04:19Z
New Revision: 8d450b47ba28748edfe975b35ab603bd43688d9b

URL: https://github.com/llvm/llvm-project/commit/8d450b47ba28748edfe975b35ab603bd43688d9b
DIFF: https://github.com/llvm/llvm-project/commit/8d450b47ba28748edfe975b35ab603bd43688d9b.diff

LOG: [X86] mmx-arith.ll - replace X32 check prefixes with X86 + strip cfi noise

We try to only use X32 for gnux32 triple tests.

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/mmx-arith.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/mmx-arith.ll b/llvm/test/CodeGen/X86/mmx-arith.ll
index 27a9acf181ea2..68287a4feee47 100644
--- a/llvm/test/CodeGen/X86/mmx-arith.ll
+++ b/llvm/test/CodeGen/X86/mmx-arith.ll
@@ -1,52 +1,52 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+mmx,+sse2 | FileCheck -check-prefix=X32 %s
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+mmx,+sse2 | FileCheck -check-prefix=X86 %s
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+sse2 | FileCheck -check-prefix=X64 %s
 
 ;; A basic functional check to make sure that MMX arithmetic actually compiles.
 ;; First is a straight translation of the original with bitcasts as needed.
 
-define void @test0(ptr %A, ptr %B) {
-; X32-LABEL: test0:
-; X32:       # %bb.0: # %entry
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; X32-NEXT:    paddb %xmm0, %xmm1
-; X32-NEXT:    movdq2q %xmm1, %mm0
-; X32-NEXT:    movq %xmm1, (%eax)
-; X32-NEXT:    paddsb (%ecx), %mm0
-; X32-NEXT:    movq %mm0, (%eax)
-; X32-NEXT:    paddusb (%ecx), %mm0
-; X32-NEXT:    movq %mm0, (%eax)
-; X32-NEXT:    movq2dq %mm0, %xmm0
-; X32-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; X32-NEXT:    psubb %xmm1, %xmm0
-; X32-NEXT:    movdq2q %xmm0, %mm0
-; X32-NEXT:    movq %xmm0, (%eax)
-; X32-NEXT:    psubsb (%ecx), %mm0
-; X32-NEXT:    movq %mm0, (%eax)
-; X32-NEXT:    psubusb (%ecx), %mm0
-; X32-NEXT:    movq %mm0, (%eax)
-; X32-NEXT:    movq2dq %mm0, %xmm0
-; X32-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; X32-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-NEXT:    pmullw %xmm0, %xmm1
-; X32-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
-; X32-NEXT:    packuswb %xmm1, %xmm1
-; X32-NEXT:    movq %xmm1, (%eax)
-; X32-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    pand %xmm1, %xmm0
-; X32-NEXT:    movq %xmm0, (%eax)
-; X32-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; X32-NEXT:    por %xmm0, %xmm1
-; X32-NEXT:    movq %xmm1, (%eax)
-; X32-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    pxor %xmm1, %xmm0
-; X32-NEXT:    movq %xmm0, (%eax)
-; X32-NEXT:    emms
-; X32-NEXT:    retl
+define void @test0(ptr %A, ptr %B) nounwind {
+; X86-LABEL: test0:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; X86-NEXT:    paddb %xmm0, %xmm1
+; X86-NEXT:    movdq2q %xmm1, %mm0
+; X86-NEXT:    movq %xmm1, (%eax)
+; X86-NEXT:    paddsb (%ecx), %mm0
+; X86-NEXT:    movq %mm0, (%eax)
+; X86-NEXT:    paddusb (%ecx), %mm0
+; X86-NEXT:    movq %mm0, (%eax)
+; X86-NEXT:    movq2dq %mm0, %xmm0
+; X86-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; X86-NEXT:    psubb %xmm1, %xmm0
+; X86-NEXT:    movdq2q %xmm0, %mm0
+; X86-NEXT:    movq %xmm0, (%eax)
+; X86-NEXT:    psubsb (%ecx), %mm0
+; X86-NEXT:    movq %mm0, (%eax)
+; X86-NEXT:    psubusb (%ecx), %mm0
+; X86-NEXT:    movq %mm0, (%eax)
+; X86-NEXT:    movq2dq %mm0, %xmm0
+; X86-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; X86-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X86-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X86-NEXT:    pmullw %xmm0, %xmm1
+; X86-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-NEXT:    packuswb %xmm1, %xmm1
+; X86-NEXT:    movq %xmm1, (%eax)
+; X86-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    pand %xmm1, %xmm0
+; X86-NEXT:    movq %xmm0, (%eax)
+; X86-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; X86-NEXT:    por %xmm0, %xmm1
+; X86-NEXT:    movq %xmm1, (%eax)
+; X86-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    pxor %xmm1, %xmm0
+; X86-NEXT:    movq %xmm0, (%eax)
+; X86-NEXT:    emms
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test0:
 ; X64:       # %bb.0: # %entry
@@ -139,35 +139,35 @@ entry:
   ret void
 }
 
-define void @test1(ptr %A, ptr %B) {
-; X32-LABEL: test1:
-; X32:       # %bb.0: # %entry
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; X32-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    paddd %xmm1, %xmm0
-; X32-NEXT:    movq %xmm0, (%eax)
-; X32-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; X32-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; X32-NEXT:    pmuludq %xmm1, %xmm0
-; X32-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
-; X32-NEXT:    pmuludq %xmm1, %xmm2
-; X32-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
-; X32-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X32-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X32-NEXT:    movq %xmm0, (%eax)
-; X32-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; X32-NEXT:    pand %xmm0, %xmm1
-; X32-NEXT:    movq %xmm1, (%eax)
-; X32-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    por %xmm1, %xmm0
-; X32-NEXT:    movq %xmm0, (%eax)
-; X32-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; X32-NEXT:    pxor %xmm0, %xmm1
-; X32-NEXT:    movq %xmm1, (%eax)
-; X32-NEXT:    emms
-; X32-NEXT:    retl
+define void @test1(ptr %A, ptr %B) nounwind {
+; X86-LABEL: test1:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; X86-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    paddd %xmm1, %xmm0
+; X86-NEXT:    movq %xmm0, (%eax)
+; X86-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; X86-NEXT:    pmuludq %xmm1, %xmm0
+; X86-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; X86-NEXT:    pmuludq %xmm1, %xmm2
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-NEXT:    movq %xmm0, (%eax)
+; X86-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; X86-NEXT:    pand %xmm0, %xmm1
+; X86-NEXT:    movq %xmm1, (%eax)
+; X86-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    por %xmm1, %xmm0
+; X86-NEXT:    movq %xmm0, (%eax)
+; X86-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; X86-NEXT:    pxor %xmm0, %xmm1
+; X86-NEXT:    movq %xmm1, (%eax)
+; X86-NEXT:    emms
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test1:
 ; X64:       # %bb.0: # %entry
@@ -236,50 +236,50 @@ entry:
   ret void
 }
 
-define void @test2(ptr %A, ptr %B) {
-; X32-LABEL: test2:
-; X32:       # %bb.0: # %entry
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; X32-NEXT:    paddw %xmm0, %xmm1
-; X32-NEXT:    movdq2q %xmm1, %mm0
-; X32-NEXT:    movq %xmm1, (%eax)
-; X32-NEXT:    paddsw (%ecx), %mm0
-; X32-NEXT:    movq %mm0, (%eax)
-; X32-NEXT:    paddusw (%ecx), %mm0
-; X32-NEXT:    movq %mm0, (%eax)
-; X32-NEXT:    movq2dq %mm0, %xmm0
-; X32-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; X32-NEXT:    psubw %xmm1, %xmm0
-; X32-NEXT:    movdq2q %xmm0, %mm0
-; X32-NEXT:    movq %xmm0, (%eax)
-; X32-NEXT:    psubsw (%ecx), %mm0
-; X32-NEXT:    movq %mm0, (%eax)
-; X32-NEXT:    psubusw (%ecx), %mm0
-; X32-NEXT:    movq %mm0, (%eax)
-; X32-NEXT:    movq2dq %mm0, %xmm0
-; X32-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; X32-NEXT:    pmullw %xmm0, %xmm1
-; X32-NEXT:    movdq2q %xmm1, %mm0
-; X32-NEXT:    movq %xmm1, (%eax)
-; X32-NEXT:    pmulhw (%ecx), %mm0
-; X32-NEXT:    movq %mm0, (%eax)
-; X32-NEXT:    pmaddwd (%ecx), %mm0
-; X32-NEXT:    movq %mm0, (%eax)
-; X32-NEXT:    movq2dq %mm0, %xmm0
-; X32-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; X32-NEXT:    andps %xmm0, %xmm1
-; X32-NEXT:    movlps %xmm1, (%eax)
-; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    orps %xmm1, %xmm0
-; X32-NEXT:    movlps %xmm0, (%eax)
-; X32-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; X32-NEXT:    xorps %xmm0, %xmm1
-; X32-NEXT:    movlps %xmm1, (%eax)
-; X32-NEXT:    emms
-; X32-NEXT:    retl
+define void @test2(ptr %A, ptr %B) nounwind {
+; X86-LABEL: test2:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; X86-NEXT:    paddw %xmm0, %xmm1
+; X86-NEXT:    movdq2q %xmm1, %mm0
+; X86-NEXT:    movq %xmm1, (%eax)
+; X86-NEXT:    paddsw (%ecx), %mm0
+; X86-NEXT:    movq %mm0, (%eax)
+; X86-NEXT:    paddusw (%ecx), %mm0
+; X86-NEXT:    movq %mm0, (%eax)
+; X86-NEXT:    movq2dq %mm0, %xmm0
+; X86-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; X86-NEXT:    psubw %xmm1, %xmm0
+; X86-NEXT:    movdq2q %xmm0, %mm0
+; X86-NEXT:    movq %xmm0, (%eax)
+; X86-NEXT:    psubsw (%ecx), %mm0
+; X86-NEXT:    movq %mm0, (%eax)
+; X86-NEXT:    psubusw (%ecx), %mm0
+; X86-NEXT:    movq %mm0, (%eax)
+; X86-NEXT:    movq2dq %mm0, %xmm0
+; X86-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; X86-NEXT:    pmullw %xmm0, %xmm1
+; X86-NEXT:    movdq2q %xmm1, %mm0
+; X86-NEXT:    movq %xmm1, (%eax)
+; X86-NEXT:    pmulhw (%ecx), %mm0
+; X86-NEXT:    movq %mm0, (%eax)
+; X86-NEXT:    pmaddwd (%ecx), %mm0
+; X86-NEXT:    movq %mm0, (%eax)
+; X86-NEXT:    movq2dq %mm0, %xmm0
+; X86-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-NEXT:    andps %xmm0, %xmm1
+; X86-NEXT:    movlps %xmm1, (%eax)
+; X86-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    orps %xmm1, %xmm0
+; X86-NEXT:    movlps %xmm0, (%eax)
+; X86-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-NEXT:    xorps %xmm0, %xmm1
+; X86-NEXT:    movlps %xmm1, (%eax)
+; X86-NEXT:    emms
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test2:
 ; X64:       # %bb.0: # %entry
@@ -384,45 +384,45 @@ entry:
 }
 
 define <1 x i64> @test3(ptr %a, ptr %b, i32 %count) nounwind {
-; X32-LABEL: test3:
-; X32:       # %bb.0: # %entry
-; X32-NEXT:    pushl %ebp
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    pushl %edi
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    testl %ecx, %ecx
-; X32-NEXT:    je .LBB3_1
-; X32-NEXT:  # %bb.2: # %bb26.preheader
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    .p2align 4, 0x90
-; X32-NEXT:  .LBB3_3: # %bb26
-; X32-NEXT:    # =>This Inner Loop Header: Depth=1
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT:    movl (%edi,%ebx,8), %ebp
-; X32-NEXT:    movl %ecx, %esi
-; X32-NEXT:    movl 4(%edi,%ebx,8), %ecx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT:    addl (%edi,%ebx,8), %ebp
-; X32-NEXT:    adcl 4(%edi,%ebx,8), %ecx
-; X32-NEXT:    addl %ebp, %eax
-; X32-NEXT:    adcl %ecx, %edx
-; X32-NEXT:    movl %esi, %ecx
-; X32-NEXT:    incl %ebx
-; X32-NEXT:    cmpl %esi, %ebx
-; X32-NEXT:    jb .LBB3_3
-; X32-NEXT:    jmp .LBB3_4
-; X32-NEXT:  .LBB3_1:
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:  .LBB3_4: # %bb31
-; X32-NEXT:    popl %esi
-; X32-NEXT:    popl %edi
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    popl %ebp
-; X32-NEXT:    retl
+; X86-LABEL: test3:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    testl %ecx, %ecx
+; X86-NEXT:    je .LBB3_1
+; X86-NEXT:  # %bb.2: # %bb26.preheader
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:  .LBB3_3: # %bb26
+; X86-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl (%edi,%ebx,8), %ebp
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    movl 4(%edi,%ebx,8), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    addl (%edi,%ebx,8), %ebp
+; X86-NEXT:    adcl 4(%edi,%ebx,8), %ecx
+; X86-NEXT:    addl %ebp, %eax
+; X86-NEXT:    adcl %ecx, %edx
+; X86-NEXT:    movl %esi, %ecx
+; X86-NEXT:    incl %ebx
+; X86-NEXT:    cmpl %esi, %ebx
+; X86-NEXT:    jb .LBB3_3
+; X86-NEXT:    jmp .LBB3_4
+; X86-NEXT:  .LBB3_1:
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:  .LBB3_4: # %bb31
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test3:
 ; X64:       # %bb.0: # %entry
@@ -466,13 +466,13 @@ bb31:
 
 ; There are no MMX operations here, so we use XMM or i64.
 define void @ti8(double %a, double %b) nounwind {
-; X32-LABEL: ti8:
-; X32:       # %bb.0: # %entry
-; X32-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; X32-NEXT:    paddb %xmm0, %xmm1
-; X32-NEXT:    movq %xmm1, 0
-; X32-NEXT:    retl
+; X86-LABEL: ti8:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; X86-NEXT:    paddb %xmm0, %xmm1
+; X86-NEXT:    movq %xmm1, 0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: ti8:
 ; X64:       # %bb.0: # %entry
@@ -488,13 +488,13 @@ entry:
 }
 
 define void @ti16(double %a, double %b) nounwind {
-; X32-LABEL: ti16:
-; X32:       # %bb.0: # %entry
-; X32-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; X32-NEXT:    paddw %xmm0, %xmm1
-; X32-NEXT:    movq %xmm1, 0
-; X32-NEXT:    retl
+; X86-LABEL: ti16:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; X86-NEXT:    paddw %xmm0, %xmm1
+; X86-NEXT:    movq %xmm1, 0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: ti16:
 ; X64:       # %bb.0: # %entry
@@ -510,13 +510,13 @@ entry:
 }
 
 define void @ti32(double %a, double %b) nounwind {
-; X32-LABEL: ti32:
-; X32:       # %bb.0: # %entry
-; X32-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; X32-NEXT:    paddd %xmm0, %xmm1
-; X32-NEXT:    movq %xmm1, 0
-; X32-NEXT:    retl
+; X86-LABEL: ti32:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; X86-NEXT:    paddd %xmm0, %xmm1
+; X86-NEXT:    movq %xmm1, 0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: ti32:
 ; X64:       # %bb.0: # %entry
@@ -532,15 +532,15 @@ entry:
 }
 
 define void @ti64(double %a, double %b) nounwind {
-; X32-LABEL: ti64:
-; X32:       # %bb.0: # %entry
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl %eax, 0
-; X32-NEXT:    movl %ecx, 4
-; X32-NEXT:    retl
+; X86-LABEL: ti64:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    adcl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl %eax, 0
+; X86-NEXT:    movl %ecx, 4
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: ti64:
 ; X64:       # %bb.0: # %entry
@@ -559,12 +559,12 @@ entry:
 
 ; MMX intrinsics calls get us MMX instructions.
 define void @ti8a(double %a, double %b) nounwind {
-; X32-LABEL: ti8a:
-; X32:       # %bb.0: # %entry
-; X32-NEXT:    movq {{[0-9]+}}(%esp), %mm0
-; X32-NEXT:    paddb {{[0-9]+}}(%esp), %mm0
-; X32-NEXT:    movq %mm0, 0
-; X32-NEXT:    retl
+; X86-LABEL: ti8a:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movq {{[0-9]+}}(%esp), %mm0
+; X86-NEXT:    paddb {{[0-9]+}}(%esp), %mm0
+; X86-NEXT:    movq %mm0, 0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: ti8a:
 ; X64:       # %bb.0: # %entry
@@ -582,12 +582,12 @@ entry:
 }
 
 define void @ti16a(double %a, double %b) nounwind {
-; X32-LABEL: ti16a:
-; X32:       # %bb.0: # %entry
-; X32-NEXT:    movq {{[0-9]+}}(%esp), %mm0
-; X32-NEXT:    paddw {{[0-9]+}}(%esp), %mm0
-; X32-NEXT:    movq %mm0, 0
-; X32-NEXT:    retl
+; X86-LABEL: ti16a:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movq {{[0-9]+}}(%esp), %mm0
+; X86-NEXT:    paddw {{[0-9]+}}(%esp), %mm0
+; X86-NEXT:    movq %mm0, 0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: ti16a:
 ; X64:       # %bb.0: # %entry
@@ -605,12 +605,12 @@ entry:
 }
 
 define void @ti32a(double %a, double %b) nounwind {
-; X32-LABEL: ti32a:
-; X32:       # %bb.0: # %entry
-; X32-NEXT:    movq {{[0-9]+}}(%esp), %mm0
-; X32-NEXT:    paddd {{[0-9]+}}(%esp), %mm0
-; X32-NEXT:    movq %mm0, 0
-; X32-NEXT:    retl
+; X86-LABEL: ti32a:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movq {{[0-9]+}}(%esp), %mm0
+; X86-NEXT:    paddd {{[0-9]+}}(%esp), %mm0
+; X86-NEXT:    movq %mm0, 0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: ti32a:
 ; X64:       # %bb.0: # %entry
@@ -628,12 +628,12 @@ entry:
 }
 
 define void @ti64a(double %a, double %b) nounwind {
-; X32-LABEL: ti64a:
-; X32:       # %bb.0: # %entry
-; X32-NEXT:    movq {{[0-9]+}}(%esp), %mm0
-; X32-NEXT:    paddq {{[0-9]+}}(%esp), %mm0
-; X32-NEXT:    movq %mm0, 0
-; X32-NEXT:    retl
+; X86-LABEL: ti64a:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movq {{[0-9]+}}(%esp), %mm0
+; X86-NEXT:    paddq {{[0-9]+}}(%esp), %mm0
+; X86-NEXT:    movq %mm0, 0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: ti64a:
 ; X64:       # %bb.0: # %entry
@@ -651,25 +651,21 @@ entry:
 }
 
 ; Make sure we clamp large shift amounts to 255
-define i64 @pr43922() {
-; X32-LABEL: pr43922:
-; X32:       # %bb.0: # %entry
-; X32-NEXT:    pushl %ebp
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    .cfi_offset %ebp, -8
-; X32-NEXT:    movl %esp, %ebp
-; X32-NEXT:    .cfi_def_cfa_register %ebp
-; X32-NEXT:    andl $-8, %esp
-; X32-NEXT:    subl $8, %esp
-; X32-NEXT:    movq {{\.?LCPI[0-9]+_[0-9]+}}, %mm0 # mm0 = 0x7AAAAAAA7AAAAAAA
-; X32-NEXT:    psrad $255, %mm0
-; X32-NEXT:    movq %mm0, (%esp)
-; X32-NEXT:    movl (%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    movl %ebp, %esp
-; X32-NEXT:    popl %ebp
-; X32-NEXT:    .cfi_def_cfa %esp, 4
-; X32-NEXT:    retl
+define i64 @pr43922() nounwind {
+; X86-LABEL: pr43922:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-8, %esp
+; X86-NEXT:    subl $8, %esp
+; X86-NEXT:    movq {{\.?LCPI[0-9]+_[0-9]+}}, %mm0 # mm0 = 0x7AAAAAAA7AAAAAAA
+; X86-NEXT:    psrad $255, %mm0
+; X86-NEXT:    movq %mm0, (%esp)
+; X86-NEXT:    movl (%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: pr43922:
 ; X64:       # %bb.0: # %entry


        


More information about the llvm-commits mailing list