[llvm] 9c766b8 - [X86] Regenerate midpoint-int.ll tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 4 01:45:17 PST 2020


Author: Simon Pilgrim
Date: 2020-12-04T09:44:56Z
New Revision: 9c766b8418b9b0a4196014f5a2e983d98f33eeef

URL: https://github.com/llvm/llvm-project/commit/9c766b8418b9b0a4196014f5a2e983d98f33eeef
DIFF: https://github.com/llvm/llvm-project/commit/9c766b8418b9b0a4196014f5a2e983d98f33eeef.diff

LOG: [X86] Regenerate midpoint-int.ll tests

Replace X32 check prefixes with X86 - X32 is generally used for gnux triple tests

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/midpoint-int.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/midpoint-int.ll b/llvm/test/CodeGen/X86/midpoint-int.ll
index 0e63227f74ef..6e7ca42025be 100644
--- a/llvm/test/CodeGen/X86/midpoint-int.ll
+++ b/llvm/test/CodeGen/X86/midpoint-int.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
-; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86
 
 ; These test cases are inspired by C++2a std::midpoint().
 ; See https://bugs.llvm.org/show_bug.cgi?id=40965
@@ -27,29 +27,29 @@ define i32 @scalar_i32_signed_reg_reg(i32 %a1, i32 %a2) nounwind {
 ; X64-NEXT:    addl %edi, %eax
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: scalar_i32_signed_reg_reg:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    cmpl %eax, %ecx
-; X32-NEXT:    setle %dl
-; X32-NEXT:    leal -1(%edx,%edx), %edx
-; X32-NEXT:    jg .LBB0_1
-; X32-NEXT:  # %bb.2:
-; X32-NEXT:    movl %ecx, %esi
-; X32-NEXT:    jmp .LBB0_3
-; X32-NEXT:  .LBB0_1:
-; X32-NEXT:    movl %eax, %esi
-; X32-NEXT:    movl %ecx, %eax
-; X32-NEXT:  .LBB0_3:
-; X32-NEXT:    subl %esi, %eax
-; X32-NEXT:    shrl %eax
-; X32-NEXT:    imull %edx, %eax
-; X32-NEXT:    addl %ecx, %eax
-; X32-NEXT:    popl %esi
-; X32-NEXT:    retl
+; X86-LABEL: scalar_i32_signed_reg_reg:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpl %eax, %ecx
+; X86-NEXT:    setle %dl
+; X86-NEXT:    leal -1(%edx,%edx), %edx
+; X86-NEXT:    jg .LBB0_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    jmp .LBB0_3
+; X86-NEXT:  .LBB0_1:
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB0_3:
+; X86-NEXT:    subl %esi, %eax
+; X86-NEXT:    shrl %eax
+; X86-NEXT:    imull %edx, %eax
+; X86-NEXT:    addl %ecx, %eax
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
   %t3 = icmp sgt i32 %a1, %a2 ; signed
   %t4 = select i1 %t3, i32 -1, i32 1
   %t5 = select i1 %t3, i32 %a2, i32 %a1
@@ -77,29 +77,29 @@ define i32 @scalar_i32_unsigned_reg_reg(i32 %a1, i32 %a2) nounwind {
 ; X64-NEXT:    addl %edi, %eax
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: scalar_i32_unsigned_reg_reg:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    cmpl %eax, %ecx
-; X32-NEXT:    setbe %dl
-; X32-NEXT:    leal -1(%edx,%edx), %edx
-; X32-NEXT:    ja .LBB1_1
-; X32-NEXT:  # %bb.2:
-; X32-NEXT:    movl %ecx, %esi
-; X32-NEXT:    jmp .LBB1_3
-; X32-NEXT:  .LBB1_1:
-; X32-NEXT:    movl %eax, %esi
-; X32-NEXT:    movl %ecx, %eax
-; X32-NEXT:  .LBB1_3:
-; X32-NEXT:    subl %esi, %eax
-; X32-NEXT:    shrl %eax
-; X32-NEXT:    imull %edx, %eax
-; X32-NEXT:    addl %ecx, %eax
-; X32-NEXT:    popl %esi
-; X32-NEXT:    retl
+; X86-LABEL: scalar_i32_unsigned_reg_reg:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpl %eax, %ecx
+; X86-NEXT:    setbe %dl
+; X86-NEXT:    leal -1(%edx,%edx), %edx
+; X86-NEXT:    ja .LBB1_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    jmp .LBB1_3
+; X86-NEXT:  .LBB1_1:
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB1_3:
+; X86-NEXT:    subl %esi, %eax
+; X86-NEXT:    shrl %eax
+; X86-NEXT:    imull %edx, %eax
+; X86-NEXT:    addl %ecx, %eax
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
   %t3 = icmp ugt i32 %a1, %a2
   %t4 = select i1 %t3, i32 -1, i32 1
   %t5 = select i1 %t3, i32 %a2, i32 %a1
@@ -130,30 +130,30 @@ define i32 @scalar_i32_signed_mem_reg(i32* %a1_addr, i32 %a2) nounwind {
 ; X64-NEXT:    addl %ecx, %eax
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: scalar_i32_signed_mem_reg:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl (%ecx), %ecx
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    cmpl %eax, %ecx
-; X32-NEXT:    setle %dl
-; X32-NEXT:    leal -1(%edx,%edx), %edx
-; X32-NEXT:    jg .LBB2_1
-; X32-NEXT:  # %bb.2:
-; X32-NEXT:    movl %ecx, %esi
-; X32-NEXT:    jmp .LBB2_3
-; X32-NEXT:  .LBB2_1:
-; X32-NEXT:    movl %eax, %esi
-; X32-NEXT:    movl %ecx, %eax
-; X32-NEXT:  .LBB2_3:
-; X32-NEXT:    subl %esi, %eax
-; X32-NEXT:    shrl %eax
-; X32-NEXT:    imull %edx, %eax
-; X32-NEXT:    addl %ecx, %eax
-; X32-NEXT:    popl %esi
-; X32-NEXT:    retl
+; X86-LABEL: scalar_i32_signed_mem_reg:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl (%ecx), %ecx
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpl %eax, %ecx
+; X86-NEXT:    setle %dl
+; X86-NEXT:    leal -1(%edx,%edx), %edx
+; X86-NEXT:    jg .LBB2_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    jmp .LBB2_3
+; X86-NEXT:  .LBB2_1:
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB2_3:
+; X86-NEXT:    subl %esi, %eax
+; X86-NEXT:    shrl %eax
+; X86-NEXT:    imull %edx, %eax
+; X86-NEXT:    addl %ecx, %eax
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
   %a1 = load i32, i32* %a1_addr
   %t3 = icmp sgt i32 %a1, %a2 ; signed
   %t4 = select i1 %t3, i32 -1, i32 1
@@ -183,30 +183,30 @@ define i32 @scalar_i32_signed_reg_mem(i32 %a1, i32* %a2_addr) nounwind {
 ; X64-NEXT:    addl %edi, %eax
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: scalar_i32_signed_reg_mem:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl (%eax), %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    cmpl %eax, %ecx
-; X32-NEXT:    setle %dl
-; X32-NEXT:    leal -1(%edx,%edx), %edx
-; X32-NEXT:    jg .LBB3_1
-; X32-NEXT:  # %bb.2:
-; X32-NEXT:    movl %ecx, %esi
-; X32-NEXT:    jmp .LBB3_3
-; X32-NEXT:  .LBB3_1:
-; X32-NEXT:    movl %eax, %esi
-; X32-NEXT:    movl %ecx, %eax
-; X32-NEXT:  .LBB3_3:
-; X32-NEXT:    subl %esi, %eax
-; X32-NEXT:    shrl %eax
-; X32-NEXT:    imull %edx, %eax
-; X32-NEXT:    addl %ecx, %eax
-; X32-NEXT:    popl %esi
-; X32-NEXT:    retl
+; X86-LABEL: scalar_i32_signed_reg_mem:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpl %eax, %ecx
+; X86-NEXT:    setle %dl
+; X86-NEXT:    leal -1(%edx,%edx), %edx
+; X86-NEXT:    jg .LBB3_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    jmp .LBB3_3
+; X86-NEXT:  .LBB3_1:
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB3_3:
+; X86-NEXT:    subl %esi, %eax
+; X86-NEXT:    shrl %eax
+; X86-NEXT:    imull %edx, %eax
+; X86-NEXT:    addl %ecx, %eax
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
   %a2 = load i32, i32* %a2_addr
   %t3 = icmp sgt i32 %a1, %a2 ; signed
   %t4 = select i1 %t3, i32 -1, i32 1
@@ -237,31 +237,31 @@ define i32 @scalar_i32_signed_mem_mem(i32* %a1_addr, i32* %a2_addr) nounwind {
 ; X64-NEXT:    addl %ecx, %eax
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: scalar_i32_signed_mem_mem:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl (%ecx), %ecx
-; X32-NEXT:    movl (%eax), %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    cmpl %eax, %ecx
-; X32-NEXT:    setle %dl
-; X32-NEXT:    leal -1(%edx,%edx), %edx
-; X32-NEXT:    jg .LBB4_1
-; X32-NEXT:  # %bb.2:
-; X32-NEXT:    movl %ecx, %esi
-; X32-NEXT:    jmp .LBB4_3
-; X32-NEXT:  .LBB4_1:
-; X32-NEXT:    movl %eax, %esi
-; X32-NEXT:    movl %ecx, %eax
-; X32-NEXT:  .LBB4_3:
-; X32-NEXT:    subl %esi, %eax
-; X32-NEXT:    shrl %eax
-; X32-NEXT:    imull %edx, %eax
-; X32-NEXT:    addl %ecx, %eax
-; X32-NEXT:    popl %esi
-; X32-NEXT:    retl
+; X86-LABEL: scalar_i32_signed_mem_mem:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl (%ecx), %ecx
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpl %eax, %ecx
+; X86-NEXT:    setle %dl
+; X86-NEXT:    leal -1(%edx,%edx), %edx
+; X86-NEXT:    jg .LBB4_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    jmp .LBB4_3
+; X86-NEXT:  .LBB4_1:
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB4_3:
+; X86-NEXT:    subl %esi, %eax
+; X86-NEXT:    shrl %eax
+; X86-NEXT:    imull %edx, %eax
+; X86-NEXT:    addl %ecx, %eax
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
   %a1 = load i32, i32* %a1_addr
   %a2 = load i32, i32* %a2_addr
   %t3 = icmp sgt i32 %a1, %a2 ; signed
@@ -297,49 +297,49 @@ define i64 @scalar_i64_signed_reg_reg(i64 %a1, i64 %a2) nounwind {
 ; X64-NEXT:    addq %rdi, %rax
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: scalar_i64_signed_reg_reg:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebp
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    pushl %edi
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT:    cmpl %ecx, %eax
-; X32-NEXT:    movl %edi, %edx
-; X32-NEXT:    sbbl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    movl $-1, %ebx
-; X32-NEXT:    jl .LBB5_1
-; X32-NEXT:  # %bb.2:
-; X32-NEXT:    xorl %ebp, %ebp
-; X32-NEXT:    movl $1, %ebx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    movl %ecx, %esi
-; X32-NEXT:    jmp .LBB5_3
-; X32-NEXT:  .LBB5_1:
-; X32-NEXT:    movl $-1, %ebp
-; X32-NEXT:    movl %edi, %edx
-; X32-NEXT:    movl %eax, %esi
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT:    movl %ecx, %eax
-; X32-NEXT:  .LBB5_3:
-; X32-NEXT:    subl %esi, %eax
-; X32-NEXT:    sbbl %edx, %edi
-; X32-NEXT:    shrdl $1, %edi, %eax
-; X32-NEXT:    imull %eax, %ebp
-; X32-NEXT:    mull %ebx
-; X32-NEXT:    addl %ebp, %edx
-; X32-NEXT:    shrl %edi
-; X32-NEXT:    imull %ebx, %edi
-; X32-NEXT:    addl %edi, %edx
-; X32-NEXT:    addl %ecx, %eax
-; X32-NEXT:    adcl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    popl %esi
-; X32-NEXT:    popl %edi
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    popl %ebp
-; X32-NEXT:    retl
+; X86-LABEL: scalar_i64_signed_reg_reg:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    cmpl %ecx, %eax
+; X86-NEXT:    movl %edi, %edx
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl $-1, %ebx
+; X86-NEXT:    jl .LBB5_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    xorl %ebp, %ebp
+; X86-NEXT:    movl $1, %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    jmp .LBB5_3
+; X86-NEXT:  .LBB5_1:
+; X86-NEXT:    movl $-1, %ebp
+; X86-NEXT:    movl %edi, %edx
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB5_3:
+; X86-NEXT:    subl %esi, %eax
+; X86-NEXT:    sbbl %edx, %edi
+; X86-NEXT:    shrdl $1, %edi, %eax
+; X86-NEXT:    imull %eax, %ebp
+; X86-NEXT:    mull %ebx
+; X86-NEXT:    addl %ebp, %edx
+; X86-NEXT:    shrl %edi
+; X86-NEXT:    imull %ebx, %edi
+; X86-NEXT:    addl %edi, %edx
+; X86-NEXT:    addl %ecx, %eax
+; X86-NEXT:    adcl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
   %t3 = icmp sgt i64 %a1, %a2 ; signed
   %t4 = select i1 %t3, i64 -1, i64 1
   %t5 = select i1 %t3, i64 %a2, i64 %a1
@@ -367,49 +367,49 @@ define i64 @scalar_i64_unsigned_reg_reg(i64 %a1, i64 %a2) nounwind {
 ; X64-NEXT:    addq %rdi, %rax
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: scalar_i64_unsigned_reg_reg:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebp
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    pushl %edi
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT:    cmpl %ecx, %eax
-; X32-NEXT:    movl %edi, %edx
-; X32-NEXT:    sbbl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    movl $-1, %ebx
-; X32-NEXT:    jb .LBB6_1
-; X32-NEXT:  # %bb.2:
-; X32-NEXT:    xorl %ebp, %ebp
-; X32-NEXT:    movl $1, %ebx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    movl %ecx, %esi
-; X32-NEXT:    jmp .LBB6_3
-; X32-NEXT:  .LBB6_1:
-; X32-NEXT:    movl $-1, %ebp
-; X32-NEXT:    movl %edi, %edx
-; X32-NEXT:    movl %eax, %esi
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT:    movl %ecx, %eax
-; X32-NEXT:  .LBB6_3:
-; X32-NEXT:    subl %esi, %eax
-; X32-NEXT:    sbbl %edx, %edi
-; X32-NEXT:    shrdl $1, %edi, %eax
-; X32-NEXT:    imull %eax, %ebp
-; X32-NEXT:    mull %ebx
-; X32-NEXT:    addl %ebp, %edx
-; X32-NEXT:    shrl %edi
-; X32-NEXT:    imull %ebx, %edi
-; X32-NEXT:    addl %edi, %edx
-; X32-NEXT:    addl %ecx, %eax
-; X32-NEXT:    adcl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    popl %esi
-; X32-NEXT:    popl %edi
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    popl %ebp
-; X32-NEXT:    retl
+; X86-LABEL: scalar_i64_unsigned_reg_reg:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    cmpl %ecx, %eax
+; X86-NEXT:    movl %edi, %edx
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl $-1, %ebx
+; X86-NEXT:    jb .LBB6_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    xorl %ebp, %ebp
+; X86-NEXT:    movl $1, %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    jmp .LBB6_3
+; X86-NEXT:  .LBB6_1:
+; X86-NEXT:    movl $-1, %ebp
+; X86-NEXT:    movl %edi, %edx
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB6_3:
+; X86-NEXT:    subl %esi, %eax
+; X86-NEXT:    sbbl %edx, %edi
+; X86-NEXT:    shrdl $1, %edi, %eax
+; X86-NEXT:    imull %eax, %ebp
+; X86-NEXT:    mull %ebx
+; X86-NEXT:    addl %ebp, %edx
+; X86-NEXT:    shrl %edi
+; X86-NEXT:    imull %ebx, %edi
+; X86-NEXT:    addl %edi, %edx
+; X86-NEXT:    addl %ecx, %eax
+; X86-NEXT:    adcl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
   %t3 = icmp ugt i64 %a1, %a2
   %t4 = select i1 %t3, i64 -1, i64 1
   %t5 = select i1 %t3, i64 %a2, i64 %a1
@@ -440,53 +440,53 @@ define i64 @scalar_i64_signed_mem_reg(i64* %a1_addr, i64 %a2) nounwind {
 ; X64-NEXT:    addq %rcx, %rax
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: scalar_i64_signed_mem_reg:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebp
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    pushl %edi
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    pushl %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl (%ecx), %esi
-; X32-NEXT:    movl 4(%ecx), %ecx
-; X32-NEXT:    cmpl %esi, %eax
-; X32-NEXT:    movl %edi, %edx
-; X32-NEXT:    sbbl %ecx, %edx
-; X32-NEXT:    movl $-1, %ebx
-; X32-NEXT:    jl .LBB7_1
-; X32-NEXT:  # %bb.2:
-; X32-NEXT:    xorl %ebp, %ebp
-; X32-NEXT:    movl $1, %ebx
-; X32-NEXT:    movl %ecx, (%esp) # 4-byte Spill
-; X32-NEXT:    movl %esi, %edx
-; X32-NEXT:    jmp .LBB7_3
-; X32-NEXT:  .LBB7_1:
-; X32-NEXT:    movl $-1, %ebp
-; X32-NEXT:    movl %edi, (%esp) # 4-byte Spill
-; X32-NEXT:    movl %eax, %edx
-; X32-NEXT:    movl %ecx, %edi
-; X32-NEXT:    movl %esi, %eax
-; X32-NEXT:  .LBB7_3:
-; X32-NEXT:    subl %edx, %eax
-; X32-NEXT:    sbbl (%esp), %edi # 4-byte Folded Reload
-; X32-NEXT:    shrdl $1, %edi, %eax
-; X32-NEXT:    imull %eax, %ebp
-; X32-NEXT:    mull %ebx
-; X32-NEXT:    addl %ebp, %edx
-; X32-NEXT:    shrl %edi
-; X32-NEXT:    imull %ebx, %edi
-; X32-NEXT:    addl %edi, %edx
-; X32-NEXT:    addl %esi, %eax
-; X32-NEXT:    adcl %ecx, %edx
-; X32-NEXT:    addl $4, %esp
-; X32-NEXT:    popl %esi
-; X32-NEXT:    popl %edi
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    popl %ebp
-; X32-NEXT:    retl
+; X86-LABEL: scalar_i64_signed_mem_reg:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    pushl %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl (%ecx), %esi
+; X86-NEXT:    movl 4(%ecx), %ecx
+; X86-NEXT:    cmpl %esi, %eax
+; X86-NEXT:    movl %edi, %edx
+; X86-NEXT:    sbbl %ecx, %edx
+; X86-NEXT:    movl $-1, %ebx
+; X86-NEXT:    jl .LBB7_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    xorl %ebp, %ebp
+; X86-NEXT:    movl $1, %ebx
+; X86-NEXT:    movl %ecx, (%esp) # 4-byte Spill
+; X86-NEXT:    movl %esi, %edx
+; X86-NEXT:    jmp .LBB7_3
+; X86-NEXT:  .LBB7_1:
+; X86-NEXT:    movl $-1, %ebp
+; X86-NEXT:    movl %edi, (%esp) # 4-byte Spill
+; X86-NEXT:    movl %eax, %edx
+; X86-NEXT:    movl %ecx, %edi
+; X86-NEXT:    movl %esi, %eax
+; X86-NEXT:  .LBB7_3:
+; X86-NEXT:    subl %edx, %eax
+; X86-NEXT:    sbbl (%esp), %edi # 4-byte Folded Reload
+; X86-NEXT:    shrdl $1, %edi, %eax
+; X86-NEXT:    imull %eax, %ebp
+; X86-NEXT:    mull %ebx
+; X86-NEXT:    addl %ebp, %edx
+; X86-NEXT:    shrl %edi
+; X86-NEXT:    imull %ebx, %edi
+; X86-NEXT:    addl %edi, %edx
+; X86-NEXT:    addl %esi, %eax
+; X86-NEXT:    adcl %ecx, %edx
+; X86-NEXT:    addl $4, %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
   %a1 = load i64, i64* %a1_addr
   %t3 = icmp sgt i64 %a1, %a2 ; signed
   %t4 = select i1 %t3, i64 -1, i64 1
@@ -516,50 +516,50 @@ define i64 @scalar_i64_signed_reg_mem(i64 %a1, i64* %a2_addr) nounwind {
 ; X64-NEXT:    addq %rdi, %rax
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: scalar_i64_signed_reg_mem:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebp
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    pushl %edi
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    movl (%edx), %eax
-; X32-NEXT:    movl 4(%edx), %edi
-; X32-NEXT:    cmpl %ecx, %eax
-; X32-NEXT:    movl %edi, %edx
-; X32-NEXT:    sbbl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    movl $-1, %ebx
-; X32-NEXT:    jl .LBB8_1
-; X32-NEXT:  # %bb.2:
-; X32-NEXT:    xorl %ebp, %ebp
-; X32-NEXT:    movl $1, %ebx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    movl %ecx, %esi
-; X32-NEXT:    jmp .LBB8_3
-; X32-NEXT:  .LBB8_1:
-; X32-NEXT:    movl $-1, %ebp
-; X32-NEXT:    movl %edi, %edx
-; X32-NEXT:    movl %eax, %esi
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT:    movl %ecx, %eax
-; X32-NEXT:  .LBB8_3:
-; X32-NEXT:    subl %esi, %eax
-; X32-NEXT:    sbbl %edx, %edi
-; X32-NEXT:    shrdl $1, %edi, %eax
-; X32-NEXT:    imull %eax, %ebp
-; X32-NEXT:    mull %ebx
-; X32-NEXT:    addl %ebp, %edx
-; X32-NEXT:    shrl %edi
-; X32-NEXT:    imull %ebx, %edi
-; X32-NEXT:    addl %edi, %edx
-; X32-NEXT:    addl %ecx, %eax
-; X32-NEXT:    adcl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    popl %esi
-; X32-NEXT:    popl %edi
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    popl %ebp
-; X32-NEXT:    retl
+; X86-LABEL: scalar_i64_signed_reg_mem:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl (%edx), %eax
+; X86-NEXT:    movl 4(%edx), %edi
+; X86-NEXT:    cmpl %ecx, %eax
+; X86-NEXT:    movl %edi, %edx
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl $-1, %ebx
+; X86-NEXT:    jl .LBB8_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    xorl %ebp, %ebp
+; X86-NEXT:    movl $1, %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    jmp .LBB8_3
+; X86-NEXT:  .LBB8_1:
+; X86-NEXT:    movl $-1, %ebp
+; X86-NEXT:    movl %edi, %edx
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB8_3:
+; X86-NEXT:    subl %esi, %eax
+; X86-NEXT:    sbbl %edx, %edi
+; X86-NEXT:    shrdl $1, %edi, %eax
+; X86-NEXT:    imull %eax, %ebp
+; X86-NEXT:    mull %ebx
+; X86-NEXT:    addl %ebp, %edx
+; X86-NEXT:    shrl %edi
+; X86-NEXT:    imull %ebx, %edi
+; X86-NEXT:    addl %edi, %edx
+; X86-NEXT:    addl %ecx, %eax
+; X86-NEXT:    adcl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
   %a2 = load i64, i64* %a2_addr
   %t3 = icmp sgt i64 %a1, %a2 ; signed
   %t4 = select i1 %t3, i64 -1, i64 1
@@ -590,54 +590,54 @@ define i64 @scalar_i64_signed_mem_mem(i64* %a1_addr, i64* %a2_addr) nounwind {
 ; X64-NEXT:    addq %rcx, %rax
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: scalar_i64_signed_mem_mem:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebp
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    pushl %edi
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    pushl %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl (%eax), %esi
-; X32-NEXT:    movl 4(%eax), %ecx
-; X32-NEXT:    movl (%edx), %eax
-; X32-NEXT:    movl 4(%edx), %edi
-; X32-NEXT:    cmpl %esi, %eax
-; X32-NEXT:    movl %edi, %edx
-; X32-NEXT:    sbbl %ecx, %edx
-; X32-NEXT:    movl $-1, %ebx
-; X32-NEXT:    jl .LBB9_1
-; X32-NEXT:  # %bb.2:
-; X32-NEXT:    xorl %ebp, %ebp
-; X32-NEXT:    movl $1, %ebx
-; X32-NEXT:    movl %ecx, (%esp) # 4-byte Spill
-; X32-NEXT:    movl %esi, %edx
-; X32-NEXT:    jmp .LBB9_3
-; X32-NEXT:  .LBB9_1:
-; X32-NEXT:    movl $-1, %ebp
-; X32-NEXT:    movl %edi, (%esp) # 4-byte Spill
-; X32-NEXT:    movl %eax, %edx
-; X32-NEXT:    movl %ecx, %edi
-; X32-NEXT:    movl %esi, %eax
-; X32-NEXT:  .LBB9_3:
-; X32-NEXT:    subl %edx, %eax
-; X32-NEXT:    sbbl (%esp), %edi # 4-byte Folded Reload
-; X32-NEXT:    shrdl $1, %edi, %eax
-; X32-NEXT:    imull %eax, %ebp
-; X32-NEXT:    mull %ebx
-; X32-NEXT:    addl %ebp, %edx
-; X32-NEXT:    shrl %edi
-; X32-NEXT:    imull %ebx, %edi
-; X32-NEXT:    addl %edi, %edx
-; X32-NEXT:    addl %esi, %eax
-; X32-NEXT:    adcl %ecx, %edx
-; X32-NEXT:    addl $4, %esp
-; X32-NEXT:    popl %esi
-; X32-NEXT:    popl %edi
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    popl %ebp
-; X32-NEXT:    retl
+; X86-LABEL: scalar_i64_signed_mem_mem:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    pushl %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %esi
+; X86-NEXT:    movl 4(%eax), %ecx
+; X86-NEXT:    movl (%edx), %eax
+; X86-NEXT:    movl 4(%edx), %edi
+; X86-NEXT:    cmpl %esi, %eax
+; X86-NEXT:    movl %edi, %edx
+; X86-NEXT:    sbbl %ecx, %edx
+; X86-NEXT:    movl $-1, %ebx
+; X86-NEXT:    jl .LBB9_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    xorl %ebp, %ebp
+; X86-NEXT:    movl $1, %ebx
+; X86-NEXT:    movl %ecx, (%esp) # 4-byte Spill
+; X86-NEXT:    movl %esi, %edx
+; X86-NEXT:    jmp .LBB9_3
+; X86-NEXT:  .LBB9_1:
+; X86-NEXT:    movl $-1, %ebp
+; X86-NEXT:    movl %edi, (%esp) # 4-byte Spill
+; X86-NEXT:    movl %eax, %edx
+; X86-NEXT:    movl %ecx, %edi
+; X86-NEXT:    movl %esi, %eax
+; X86-NEXT:  .LBB9_3:
+; X86-NEXT:    subl %edx, %eax
+; X86-NEXT:    sbbl (%esp), %edi # 4-byte Folded Reload
+; X86-NEXT:    shrdl $1, %edi, %eax
+; X86-NEXT:    imull %eax, %ebp
+; X86-NEXT:    mull %ebx
+; X86-NEXT:    addl %ebp, %edx
+; X86-NEXT:    shrl %edi
+; X86-NEXT:    imull %ebx, %edi
+; X86-NEXT:    addl %edi, %edx
+; X86-NEXT:    addl %esi, %eax
+; X86-NEXT:    adcl %ecx, %edx
+; X86-NEXT:    addl $4, %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
   %a1 = load i64, i64* %a1_addr
   %a2 = load i64, i64* %a2_addr
   %t3 = icmp sgt i64 %a1, %a2 ; signed
@@ -675,31 +675,31 @@ define i16 @scalar_i16_signed_reg_reg(i16 %a1, i16 %a2) nounwind {
 ; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: scalar_i16_signed_reg_reg:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    cmpw %ax, %cx
-; X32-NEXT:    setle %dl
-; X32-NEXT:    leal -1(%edx,%edx), %edx
-; X32-NEXT:    jg .LBB10_1
-; X32-NEXT:  # %bb.2:
-; X32-NEXT:    movl %ecx, %esi
-; X32-NEXT:    jmp .LBB10_3
-; X32-NEXT:  .LBB10_1:
-; X32-NEXT:    movl %eax, %esi
-; X32-NEXT:    movl %ecx, %eax
-; X32-NEXT:  .LBB10_3:
-; X32-NEXT:    subl %esi, %eax
-; X32-NEXT:    movzwl %ax, %eax
-; X32-NEXT:    shrl %eax
-; X32-NEXT:    imull %edx, %eax
-; X32-NEXT:    addl %ecx, %eax
-; X32-NEXT:    # kill: def $ax killed $ax killed $eax
-; X32-NEXT:    popl %esi
-; X32-NEXT:    retl
+; X86-LABEL: scalar_i16_signed_reg_reg:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpw %ax, %cx
+; X86-NEXT:    setle %dl
+; X86-NEXT:    leal -1(%edx,%edx), %edx
+; X86-NEXT:    jg .LBB10_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    jmp .LBB10_3
+; X86-NEXT:  .LBB10_1:
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB10_3:
+; X86-NEXT:    subl %esi, %eax
+; X86-NEXT:    movzwl %ax, %eax
+; X86-NEXT:    shrl %eax
+; X86-NEXT:    imull %edx, %eax
+; X86-NEXT:    addl %ecx, %eax
+; X86-NEXT:    # kill: def $ax killed $ax killed $eax
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
   %t3 = icmp sgt i16 %a1, %a2 ; signed
   %t4 = select i1 %t3, i16 -1, i16 1
   %t5 = select i1 %t3, i16 %a2, i16 %a1
@@ -729,31 +729,31 @@ define i16 @scalar_i16_unsigned_reg_reg(i16 %a1, i16 %a2) nounwind {
 ; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: scalar_i16_unsigned_reg_reg:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    cmpw %ax, %cx
-; X32-NEXT:    setbe %dl
-; X32-NEXT:    leal -1(%edx,%edx), %edx
-; X32-NEXT:    ja .LBB11_1
-; X32-NEXT:  # %bb.2:
-; X32-NEXT:    movl %ecx, %esi
-; X32-NEXT:    jmp .LBB11_3
-; X32-NEXT:  .LBB11_1:
-; X32-NEXT:    movl %eax, %esi
-; X32-NEXT:    movl %ecx, %eax
-; X32-NEXT:  .LBB11_3:
-; X32-NEXT:    subl %esi, %eax
-; X32-NEXT:    movzwl %ax, %eax
-; X32-NEXT:    shrl %eax
-; X32-NEXT:    imull %edx, %eax
-; X32-NEXT:    addl %ecx, %eax
-; X32-NEXT:    # kill: def $ax killed $ax killed $eax
-; X32-NEXT:    popl %esi
-; X32-NEXT:    retl
+; X86-LABEL: scalar_i16_unsigned_reg_reg:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpw %ax, %cx
+; X86-NEXT:    setbe %dl
+; X86-NEXT:    leal -1(%edx,%edx), %edx
+; X86-NEXT:    ja .LBB11_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    jmp .LBB11_3
+; X86-NEXT:  .LBB11_1:
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB11_3:
+; X86-NEXT:    subl %esi, %eax
+; X86-NEXT:    movzwl %ax, %eax
+; X86-NEXT:    shrl %eax
+; X86-NEXT:    imull %edx, %eax
+; X86-NEXT:    addl %ecx, %eax
+; X86-NEXT:    # kill: def $ax killed $ax killed $eax
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
   %t3 = icmp ugt i16 %a1, %a2
   %t4 = select i1 %t3, i16 -1, i16 1
   %t5 = select i1 %t3, i16 %a2, i16 %a1
@@ -786,32 +786,32 @@ define i16 @scalar_i16_signed_mem_reg(i16* %a1_addr, i16 %a2) nounwind {
 ; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: scalar_i16_signed_mem_reg:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movzwl (%ecx), %ecx
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    cmpw %ax, %cx
-; X32-NEXT:    setle %dl
-; X32-NEXT:    leal -1(%edx,%edx), %edx
-; X32-NEXT:    jg .LBB12_1
-; X32-NEXT:  # %bb.2:
-; X32-NEXT:    movl %ecx, %esi
-; X32-NEXT:    jmp .LBB12_3
-; X32-NEXT:  .LBB12_1:
-; X32-NEXT:    movl %eax, %esi
-; X32-NEXT:    movl %ecx, %eax
-; X32-NEXT:  .LBB12_3:
-; X32-NEXT:    subl %esi, %eax
-; X32-NEXT:    movzwl %ax, %eax
-; X32-NEXT:    shrl %eax
-; X32-NEXT:    imull %edx, %eax
-; X32-NEXT:    addl %ecx, %eax
-; X32-NEXT:    # kill: def $ax killed $ax killed $eax
-; X32-NEXT:    popl %esi
-; X32-NEXT:    retl
+; X86-LABEL: scalar_i16_signed_mem_reg:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movzwl (%ecx), %ecx
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpw %ax, %cx
+; X86-NEXT:    setle %dl
+; X86-NEXT:    leal -1(%edx,%edx), %edx
+; X86-NEXT:    jg .LBB12_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    jmp .LBB12_3
+; X86-NEXT:  .LBB12_1:
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB12_3:
+; X86-NEXT:    subl %esi, %eax
+; X86-NEXT:    movzwl %ax, %eax
+; X86-NEXT:    shrl %eax
+; X86-NEXT:    imull %edx, %eax
+; X86-NEXT:    addl %ecx, %eax
+; X86-NEXT:    # kill: def $ax killed $ax killed $eax
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
   %a1 = load i16, i16* %a1_addr
   %t3 = icmp sgt i16 %a1, %a2 ; signed
   %t4 = select i1 %t3, i16 -1, i16 1
@@ -843,32 +843,32 @@ define i16 @scalar_i16_signed_reg_mem(i16 %a1, i16* %a2_addr) nounwind {
 ; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: scalar_i16_signed_reg_mem:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movzwl (%eax), %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    cmpw %ax, %cx
-; X32-NEXT:    setle %dl
-; X32-NEXT:    leal -1(%edx,%edx), %edx
-; X32-NEXT:    jg .LBB13_1
-; X32-NEXT:  # %bb.2:
-; X32-NEXT:    movl %ecx, %esi
-; X32-NEXT:    jmp .LBB13_3
-; X32-NEXT:  .LBB13_1:
-; X32-NEXT:    movl %eax, %esi
-; X32-NEXT:    movl %ecx, %eax
-; X32-NEXT:  .LBB13_3:
-; X32-NEXT:    subl %esi, %eax
-; X32-NEXT:    movzwl %ax, %eax
-; X32-NEXT:    shrl %eax
-; X32-NEXT:    imull %edx, %eax
-; X32-NEXT:    addl %ecx, %eax
-; X32-NEXT:    # kill: def $ax killed $ax killed $eax
-; X32-NEXT:    popl %esi
-; X32-NEXT:    retl
+; X86-LABEL: scalar_i16_signed_reg_mem:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movzwl (%eax), %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpw %ax, %cx
+; X86-NEXT:    setle %dl
+; X86-NEXT:    leal -1(%edx,%edx), %edx
+; X86-NEXT:    jg .LBB13_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    jmp .LBB13_3
+; X86-NEXT:  .LBB13_1:
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB13_3:
+; X86-NEXT:    subl %esi, %eax
+; X86-NEXT:    movzwl %ax, %eax
+; X86-NEXT:    shrl %eax
+; X86-NEXT:    imull %edx, %eax
+; X86-NEXT:    addl %ecx, %eax
+; X86-NEXT:    # kill: def $ax killed $ax killed $eax
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
   %a2 = load i16, i16* %a2_addr
   %t3 = icmp sgt i16 %a1, %a2 ; signed
   %t4 = select i1 %t3, i16 -1, i16 1
@@ -901,33 +901,33 @@ define i16 @scalar_i16_signed_mem_mem(i16* %a1_addr, i16* %a2_addr) nounwind {
 ; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: scalar_i16_signed_mem_mem:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movzwl (%ecx), %ecx
-; X32-NEXT:    movzwl (%eax), %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    cmpw %ax, %cx
-; X32-NEXT:    setle %dl
-; X32-NEXT:    leal -1(%edx,%edx), %edx
-; X32-NEXT:    jg .LBB14_1
-; X32-NEXT:  # %bb.2:
-; X32-NEXT:    movl %ecx, %esi
-; X32-NEXT:    jmp .LBB14_3
-; X32-NEXT:  .LBB14_1:
-; X32-NEXT:    movl %eax, %esi
-; X32-NEXT:    movl %ecx, %eax
-; X32-NEXT:  .LBB14_3:
-; X32-NEXT:    subl %esi, %eax
-; X32-NEXT:    movzwl %ax, %eax
-; X32-NEXT:    shrl %eax
-; X32-NEXT:    imull %edx, %eax
-; X32-NEXT:    addl %ecx, %eax
-; X32-NEXT:    # kill: def $ax killed $ax killed $eax
-; X32-NEXT:    popl %esi
-; X32-NEXT:    retl
+; X86-LABEL: scalar_i16_signed_mem_mem:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movzwl (%ecx), %ecx
+; X86-NEXT:    movzwl (%eax), %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpw %ax, %cx
+; X86-NEXT:    setle %dl
+; X86-NEXT:    leal -1(%edx,%edx), %edx
+; X86-NEXT:    jg .LBB14_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    jmp .LBB14_3
+; X86-NEXT:  .LBB14_1:
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB14_3:
+; X86-NEXT:    subl %esi, %eax
+; X86-NEXT:    movzwl %ax, %eax
+; X86-NEXT:    shrl %eax
+; X86-NEXT:    imull %edx, %eax
+; X86-NEXT:    addl %ecx, %eax
+; X86-NEXT:    # kill: def $ax killed $ax killed $eax
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
   %a1 = load i16, i16* %a1_addr
   %a2 = load i16, i16* %a2_addr
   %t3 = icmp sgt i16 %a1, %a2 ; signed
@@ -965,27 +965,27 @@ define i8 @scalar_i8_signed_reg_reg(i8 %a1, i8 %a2) nounwind {
 ; X64-NEXT:    addb %dil, %al
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: scalar_i8_signed_reg_reg:
-; X32:       # %bb.0:
-; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
-; X32-NEXT:    movb {{[0-9]+}}(%esp), %cl
-; X32-NEXT:    cmpb %al, %cl
-; X32-NEXT:    setle %dl
-; X32-NEXT:    jg .LBB15_1
-; X32-NEXT:  # %bb.2:
-; X32-NEXT:    movb %cl, %ah
-; X32-NEXT:    jmp .LBB15_3
-; X32-NEXT:  .LBB15_1:
-; X32-NEXT:    movb %al, %ah
-; X32-NEXT:    movb %cl, %al
-; X32-NEXT:  .LBB15_3:
-; X32-NEXT:    subb %ah, %al
-; X32-NEXT:    addb %dl, %dl
-; X32-NEXT:    decb %dl
-; X32-NEXT:    shrb %al
-; X32-NEXT:    mulb %dl
-; X32-NEXT:    addb %cl, %al
-; X32-NEXT:    retl
+; X86-LABEL: scalar_i8_signed_reg_reg:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    cmpb %al, %cl
+; X86-NEXT:    setle %dl
+; X86-NEXT:    jg .LBB15_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    movb %cl, %ah
+; X86-NEXT:    jmp .LBB15_3
+; X86-NEXT:  .LBB15_1:
+; X86-NEXT:    movb %al, %ah
+; X86-NEXT:    movb %cl, %al
+; X86-NEXT:  .LBB15_3:
+; X86-NEXT:    subb %ah, %al
+; X86-NEXT:    addb %dl, %dl
+; X86-NEXT:    decb %dl
+; X86-NEXT:    shrb %al
+; X86-NEXT:    mulb %dl
+; X86-NEXT:    addb %cl, %al
+; X86-NEXT:    retl
   %t3 = icmp sgt i8 %a1, %a2 ; signed
   %t4 = select i1 %t3, i8 -1, i8 1
   %t5 = select i1 %t3, i8 %a2, i8 %a1
@@ -1015,27 +1015,27 @@ define i8 @scalar_i8_unsigned_reg_reg(i8 %a1, i8 %a2) nounwind {
 ; X64-NEXT:    addb %dil, %al
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: scalar_i8_unsigned_reg_reg:
-; X32:       # %bb.0:
-; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
-; X32-NEXT:    movb {{[0-9]+}}(%esp), %cl
-; X32-NEXT:    cmpb %al, %cl
-; X32-NEXT:    setbe %dl
-; X32-NEXT:    ja .LBB16_1
-; X32-NEXT:  # %bb.2:
-; X32-NEXT:    movb %cl, %ah
-; X32-NEXT:    jmp .LBB16_3
-; X32-NEXT:  .LBB16_1:
-; X32-NEXT:    movb %al, %ah
-; X32-NEXT:    movb %cl, %al
-; X32-NEXT:  .LBB16_3:
-; X32-NEXT:    subb %ah, %al
-; X32-NEXT:    addb %dl, %dl
-; X32-NEXT:    decb %dl
-; X32-NEXT:    shrb %al
-; X32-NEXT:    mulb %dl
-; X32-NEXT:    addb %cl, %al
-; X32-NEXT:    retl
+; X86-LABEL: scalar_i8_unsigned_reg_reg:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    cmpb %al, %cl
+; X86-NEXT:    setbe %dl
+; X86-NEXT:    ja .LBB16_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    movb %cl, %ah
+; X86-NEXT:    jmp .LBB16_3
+; X86-NEXT:  .LBB16_1:
+; X86-NEXT:    movb %al, %ah
+; X86-NEXT:    movb %cl, %al
+; X86-NEXT:  .LBB16_3:
+; X86-NEXT:    subb %ah, %al
+; X86-NEXT:    addb %dl, %dl
+; X86-NEXT:    decb %dl
+; X86-NEXT:    shrb %al
+; X86-NEXT:    mulb %dl
+; X86-NEXT:    addb %cl, %al
+; X86-NEXT:    retl
   %t3 = icmp ugt i8 %a1, %a2
   %t4 = select i1 %t3, i8 -1, i8 1
   %t5 = select i1 %t3, i8 %a2, i8 %a1
@@ -1068,28 +1068,28 @@ define i8 @scalar_i8_signed_mem_reg(i8* %a1_addr, i8 %a2) nounwind {
 ; X64-NEXT:    addb %cl, %al
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: scalar_i8_signed_mem_reg:
-; X32:       # %bb.0:
-; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movb (%ecx), %cl
-; X32-NEXT:    cmpb %al, %cl
-; X32-NEXT:    setle %dl
-; X32-NEXT:    jg .LBB17_1
-; X32-NEXT:  # %bb.2:
-; X32-NEXT:    movb %cl, %ah
-; X32-NEXT:    jmp .LBB17_3
-; X32-NEXT:  .LBB17_1:
-; X32-NEXT:    movb %al, %ah
-; X32-NEXT:    movb %cl, %al
-; X32-NEXT:  .LBB17_3:
-; X32-NEXT:    subb %ah, %al
-; X32-NEXT:    addb %dl, %dl
-; X32-NEXT:    decb %dl
-; X32-NEXT:    shrb %al
-; X32-NEXT:    mulb %dl
-; X32-NEXT:    addb %cl, %al
-; X32-NEXT:    retl
+; X86-LABEL: scalar_i8_signed_mem_reg:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movb (%ecx), %cl
+; X86-NEXT:    cmpb %al, %cl
+; X86-NEXT:    setle %dl
+; X86-NEXT:    jg .LBB17_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    movb %cl, %ah
+; X86-NEXT:    jmp .LBB17_3
+; X86-NEXT:  .LBB17_1:
+; X86-NEXT:    movb %al, %ah
+; X86-NEXT:    movb %cl, %al
+; X86-NEXT:  .LBB17_3:
+; X86-NEXT:    subb %ah, %al
+; X86-NEXT:    addb %dl, %dl
+; X86-NEXT:    decb %dl
+; X86-NEXT:    shrb %al
+; X86-NEXT:    mulb %dl
+; X86-NEXT:    addb %cl, %al
+; X86-NEXT:    retl
   %a1 = load i8, i8* %a1_addr
   %t3 = icmp sgt i8 %a1, %a2 ; signed
   %t4 = select i1 %t3, i8 -1, i8 1
@@ -1120,28 +1120,28 @@ define i8 @scalar_i8_signed_reg_mem(i8 %a1, i8* %a2_addr) nounwind {
 ; X64-NEXT:    addb %dil, %al
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: scalar_i8_signed_reg_mem:
-; X32:       # %bb.0:
-; X32-NEXT:    movb {{[0-9]+}}(%esp), %cl
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movb (%eax), %al
-; X32-NEXT:    cmpb %al, %cl
-; X32-NEXT:    setle %dl
-; X32-NEXT:    jg .LBB18_1
-; X32-NEXT:  # %bb.2:
-; X32-NEXT:    movb %cl, %ah
-; X32-NEXT:    jmp .LBB18_3
-; X32-NEXT:  .LBB18_1:
-; X32-NEXT:    movb %al, %ah
-; X32-NEXT:    movb %cl, %al
-; X32-NEXT:  .LBB18_3:
-; X32-NEXT:    subb %ah, %al
-; X32-NEXT:    addb %dl, %dl
-; X32-NEXT:    decb %dl
-; X32-NEXT:    shrb %al
-; X32-NEXT:    mulb %dl
-; X32-NEXT:    addb %cl, %al
-; X32-NEXT:    retl
+; X86-LABEL: scalar_i8_signed_reg_mem:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movb (%eax), %al
+; X86-NEXT:    cmpb %al, %cl
+; X86-NEXT:    setle %dl
+; X86-NEXT:    jg .LBB18_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    movb %cl, %ah
+; X86-NEXT:    jmp .LBB18_3
+; X86-NEXT:  .LBB18_1:
+; X86-NEXT:    movb %al, %ah
+; X86-NEXT:    movb %cl, %al
+; X86-NEXT:  .LBB18_3:
+; X86-NEXT:    subb %ah, %al
+; X86-NEXT:    addb %dl, %dl
+; X86-NEXT:    decb %dl
+; X86-NEXT:    shrb %al
+; X86-NEXT:    mulb %dl
+; X86-NEXT:    addb %cl, %al
+; X86-NEXT:    retl
   %a2 = load i8, i8* %a2_addr
   %t3 = icmp sgt i8 %a1, %a2 ; signed
   %t4 = select i1 %t3, i8 -1, i8 1
@@ -1173,29 +1173,29 @@ define i8 @scalar_i8_signed_mem_mem(i8* %a1_addr, i8* %a2_addr) nounwind {
 ; X64-NEXT:    addb %cl, %al
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: scalar_i8_signed_mem_mem:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movb (%ecx), %cl
-; X32-NEXT:    movb (%eax), %al
-; X32-NEXT:    cmpb %al, %cl
-; X32-NEXT:    setle %dl
-; X32-NEXT:    jg .LBB19_1
-; X32-NEXT:  # %bb.2:
-; X32-NEXT:    movb %cl, %ah
-; X32-NEXT:    jmp .LBB19_3
-; X32-NEXT:  .LBB19_1:
-; X32-NEXT:    movb %al, %ah
-; X32-NEXT:    movb %cl, %al
-; X32-NEXT:  .LBB19_3:
-; X32-NEXT:    subb %ah, %al
-; X32-NEXT:    addb %dl, %dl
-; X32-NEXT:    decb %dl
-; X32-NEXT:    shrb %al
-; X32-NEXT:    mulb %dl
-; X32-NEXT:    addb %cl, %al
-; X32-NEXT:    retl
+; X86-LABEL: scalar_i8_signed_mem_mem:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movb (%ecx), %cl
+; X86-NEXT:    movb (%eax), %al
+; X86-NEXT:    cmpb %al, %cl
+; X86-NEXT:    setle %dl
+; X86-NEXT:    jg .LBB19_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    movb %cl, %ah
+; X86-NEXT:    jmp .LBB19_3
+; X86-NEXT:  .LBB19_1:
+; X86-NEXT:    movb %al, %ah
+; X86-NEXT:    movb %cl, %al
+; X86-NEXT:  .LBB19_3:
+; X86-NEXT:    subb %ah, %al
+; X86-NEXT:    addb %dl, %dl
+; X86-NEXT:    decb %dl
+; X86-NEXT:    shrb %al
+; X86-NEXT:    mulb %dl
+; X86-NEXT:    addb %cl, %al
+; X86-NEXT:    retl
   %a1 = load i8, i8* %a1_addr
   %a2 = load i8, i8* %a2_addr
   %t3 = icmp sgt i8 %a1, %a2 ; signed


        


More information about the llvm-commits mailing list