[llvm] 69d117e - [DAG] ExpandIntRes_MINMAX - simplify cases with sufficient number of sign bits

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Oct 28 09:11:01 PDT 2022


Author: Simon Pilgrim
Date: 2022-10-28T17:10:45+01:00
New Revision: 69d117edc29d4c74e034d8474433e981b2702898

URL: https://github.com/llvm/llvm-project/commit/69d117edc29d4c74e034d8474433e981b2702898
DIFF: https://github.com/llvm/llvm-project/commit/69d117edc29d4c74e034d8474433e981b2702898.diff

LOG: [DAG] ExpandIntRes_MINMAX - simplify cases with sufficient number of sign bits

When legalizing a smax/smin/umax/umin op, if we know that the upper half is all sign bits, then we can perform the op on the lower half and then sign extend the result to the upper half.

Alive2: https://alive2.llvm.org/ce/z/rk8Rfd

Fixes #58630

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
    llvm/test/CodeGen/X86/smax.ll
    llvm/test/CodeGen/X86/smin.ll
    llvm/test/CodeGen/X86/umax.ll
    llvm/test/CodeGen/X86/umin.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 5ad4d6004539b..ec16ed8e38336 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -2870,15 +2870,29 @@ void DAGTypeLegalizer::ExpandIntRes_MINMAX(SDNode *N,
   ISD::CondCode CondC;
   std::tie(CondC, LoOpc) = getExpandedMinMaxOps(N->getOpcode());
 
+  SDValue LHS = N->getOperand(0);
+  SDValue RHS = N->getOperand(1);
+
   // Expand the subcomponents.
   SDValue LHSL, LHSH, RHSL, RHSH;
-  GetExpandedInteger(N->getOperand(0), LHSL, LHSH);
-  GetExpandedInteger(N->getOperand(1), RHSL, RHSH);
+  GetExpandedInteger(LHS, LHSL, LHSH);
+  GetExpandedInteger(RHS, RHSL, RHSH);
 
   // Value types
   EVT NVT = LHSL.getValueType();
   EVT CCT = getSetCCResultType(NVT);
 
+  // If the upper halves are all sign bits, then we can perform the MINMAX on
+  // the lower half and sign-extend the result to the upper half.
+  unsigned NumHalfBits = NVT.getScalarSizeInBits();
+  if (DAG.ComputeNumSignBits(LHS) > NumHalfBits &&
+      DAG.ComputeNumSignBits(RHS) > NumHalfBits) {
+    Lo = DAG.getNode(N->getOpcode(), DL, NVT, LHSL, RHSL);
+    Hi = DAG.getNode(ISD::SRA, DL, NVT, Lo,
+                     DAG.getShiftAmountConstant(NumHalfBits - 1, NVT, DL));
+    return;
+  }
+
   // Hi part is always the same op
   Hi = DAG.getNode(N->getOpcode(), DL, NVT, {LHSH, RHSH});
 

diff  --git a/llvm/test/CodeGen/X86/smax.ll b/llvm/test/CodeGen/X86/smax.ll
index a629d042b89c2..11d21cf7ad45e 100644
--- a/llvm/test/CodeGen/X86/smax.ll
+++ b/llvm/test/CodeGen/X86/smax.ll
@@ -718,23 +718,12 @@ define i64 @test_signbits_i64(i64 %a, i64 %b) nounwind {
 ;
 ; X86-LABEL: test_signbits_i64:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl %edi
-; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ecx, %esi
-; X86-NEXT:    sarl $31, %esi
-; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    sarl $31, %edx
 ; X86-NEXT:    cmpl %eax, %ecx
-; X86-NEXT:    movl %eax, %edi
-; X86-NEXT:    cmoval %ecx, %edi
-; X86-NEXT:    cmpl %edx, %esi
 ; X86-NEXT:    cmovgl %ecx, %eax
-; X86-NEXT:    cmovel %edi, %eax
-; X86-NEXT:    cmovgl %esi, %edx
-; X86-NEXT:    popl %esi
-; X86-NEXT:    popl %edi
+; X86-NEXT:    movl %eax, %edx
+; X86-NEXT:    sarl $31, %edx
 ; X86-NEXT:    retl
   %ax = ashr i64 %a, 32
   %bx = ashr i64 %b, 32
@@ -745,70 +734,41 @@ define i64 @test_signbits_i64(i64 %a, i64 %b) nounwind {
 define i128 @test_signbits_i128(i128 %a, i128 %b) nounwind {
 ; X64-LABEL: test_signbits_i128:
 ; X64:       # %bb.0:
-; X64-NEXT:    movq %rsi, %rdi
-; X64-NEXT:    sarq $63, %rdi
-; X64-NEXT:    movq %rcx, %rdx
-; X64-NEXT:    sarq $63, %rdx
-; X64-NEXT:    sarq $28, %rcx
-; X64-NEXT:    cmpq %rcx, %rsi
 ; X64-NEXT:    movq %rcx, %rax
-; X64-NEXT:    cmovaq %rsi, %rax
-; X64-NEXT:    cmpq %rdx, %rdi
-; X64-NEXT:    cmovgq %rsi, %rcx
-; X64-NEXT:    cmovneq %rcx, %rax
-; X64-NEXT:    cmovgq %rdi, %rdx
+; X64-NEXT:    sarq $28, %rax
+; X64-NEXT:    cmpq %rax, %rsi
+; X64-NEXT:    cmovgq %rsi, %rax
+; X64-NEXT:    movq %rax, %rdx
+; X64-NEXT:    sarq $63, %rdx
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test_signbits_i128:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    pushl %ebx
 ; X86-NEXT:    pushl %edi
 ; X86-NEXT:    pushl %esi
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
-; X86-NEXT:    sarl $31, %ebx
-; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    shrdl $28, %eax, %ecx
-; X86-NEXT:    sarl $31, %eax
-; X86-NEXT:    sarl $28, %edx
-; X86-NEXT:    cmpl %ecx, %ebp
-; X86-NEXT:    movl %ecx, %edi
-; X86-NEXT:    cmoval %ebp, %edi
-; X86-NEXT:    movl %ebp, %esi
-; X86-NEXT:    cmpl %edx, {{[0-9]+}}(%esp)
-; X86-NEXT:    movl %ecx, %ebp
-; X86-NEXT:    cmoval %esi, %ebp
-; X86-NEXT:    cmovel %edi, %ebp
-; X86-NEXT:    movl %edx, %edi
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    cmoval %esi, %edi
-; X86-NEXT:    cmpl %ebx, %eax
-; X86-NEXT:    movl %eax, %esi
-; X86-NEXT:    sbbl %ebx, %esi
-; X86-NEXT:    cmovll {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    cmovll {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ebx, %esi
-; X86-NEXT:    xorl %eax, %esi
-; X86-NEXT:    cmovel %ebp, %ecx
-; X86-NEXT:    cmovel %edi, %edx
-; X86-NEXT:    cmpl %eax, %ebx
-; X86-NEXT:    movl %eax, %edi
-; X86-NEXT:    cmoval %ebx, %edi
-; X86-NEXT:    cmovgl %ebx, %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    cmovnel %eax, %edi
-; X86-NEXT:    movl %eax, 12(%esi)
-; X86-NEXT:    movl %edi, 8(%esi)
-; X86-NEXT:    movl %edx, 4(%esi)
-; X86-NEXT:    movl %ecx, (%esi)
-; X86-NEXT:    movl %esi, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    shrdl $28, %edi, %ecx
+; X86-NEXT:    sarl $28, %edi
+; X86-NEXT:    cmpl %ecx, %esi
+; X86-NEXT:    movl %ecx, %ebx
+; X86-NEXT:    cmoval %esi, %ebx
+; X86-NEXT:    cmpl %edi, %edx
+; X86-NEXT:    cmovgl %esi, %ecx
+; X86-NEXT:    cmovel %ebx, %ecx
+; X86-NEXT:    cmovgl %edx, %edi
+; X86-NEXT:    movl %edi, 4(%eax)
+; X86-NEXT:    sarl $31, %edi
+; X86-NEXT:    movl %edi, 12(%eax)
+; X86-NEXT:    movl %edi, 8(%eax)
+; X86-NEXT:    movl %ecx, (%eax)
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    popl %edi
 ; X86-NEXT:    popl %ebx
-; X86-NEXT:    popl %ebp
 ; X86-NEXT:    retl $4
   %ax = ashr i128 %a, 64
   %bx = ashr i128 %b, 92

diff  --git a/llvm/test/CodeGen/X86/smin.ll b/llvm/test/CodeGen/X86/smin.ll
index 9f1ea88abdc5e..e7b318c48ca70 100644
--- a/llvm/test/CodeGen/X86/smin.ll
+++ b/llvm/test/CodeGen/X86/smin.ll
@@ -717,23 +717,12 @@ define i64 @test_signbits_i64(i64 %a, i64 %b) nounwind {
 ;
 ; X86-LABEL: test_signbits_i64:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl %edi
-; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ecx, %esi
-; X86-NEXT:    sarl $31, %esi
-; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    sarl $31, %edx
 ; X86-NEXT:    cmpl %eax, %ecx
-; X86-NEXT:    movl %eax, %edi
-; X86-NEXT:    cmovbl %ecx, %edi
-; X86-NEXT:    cmpl %edx, %esi
 ; X86-NEXT:    cmovll %ecx, %eax
-; X86-NEXT:    cmovel %edi, %eax
-; X86-NEXT:    cmovll %esi, %edx
-; X86-NEXT:    popl %esi
-; X86-NEXT:    popl %edi
+; X86-NEXT:    movl %eax, %edx
+; X86-NEXT:    sarl $31, %edx
 ; X86-NEXT:    retl
   %ax = ashr i64 %a, 32
   %bx = ashr i64 %b, 32
@@ -744,74 +733,41 @@ define i64 @test_signbits_i64(i64 %a, i64 %b) nounwind {
 define i128 @test_signbits_i128(i128 %a, i128 %b) nounwind {
 ; X64-LABEL: test_signbits_i128:
 ; X64:       # %bb.0:
-; X64-NEXT:    movq %rsi, %rdi
-; X64-NEXT:    sarq $63, %rdi
-; X64-NEXT:    movq %rcx, %rdx
-; X64-NEXT:    sarq $63, %rdx
-; X64-NEXT:    sarq $28, %rcx
-; X64-NEXT:    cmpq %rcx, %rsi
 ; X64-NEXT:    movq %rcx, %rax
-; X64-NEXT:    cmovbq %rsi, %rax
-; X64-NEXT:    cmpq %rdx, %rdi
-; X64-NEXT:    cmovlq %rsi, %rcx
-; X64-NEXT:    cmovneq %rcx, %rax
-; X64-NEXT:    cmovlq %rdi, %rdx
+; X64-NEXT:    sarq $28, %rax
+; X64-NEXT:    cmpq %rax, %rsi
+; X64-NEXT:    cmovlq %rsi, %rax
+; X64-NEXT:    movq %rax, %rdx
+; X64-NEXT:    sarq $63, %rdx
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test_signbits_i128:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    pushl %ebx
 ; X86-NEXT:    pushl %edi
 ; X86-NEXT:    pushl %esi
-; X86-NEXT:    subl $8, %esp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    movl %edx, %eax
-; X86-NEXT:    movl %edx, %ebp
-; X86-NEXT:    sarl $31, %eax
-; X86-NEXT:    movl %ebx, %edx
-; X86-NEXT:    shrdl $28, %ebx, %ecx
-; X86-NEXT:    sarl $31, %ebx
-; X86-NEXT:    sarl $28, %edx
-; X86-NEXT:    cmpl %ecx, %edi
-; X86-NEXT:    movl %ecx, %esi
-; X86-NEXT:    cmovbl %edi, %esi
-; X86-NEXT:    cmpl %edx, %ebp
-; X86-NEXT:    movl %ecx, %ebp
-; X86-NEXT:    cmovbl %edi, %ebp
-; X86-NEXT:    cmovel %esi, %ebp
-; X86-NEXT:    movl %edx, %esi
-; X86-NEXT:    cmovbl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    cmpl %ebx, %eax
-; X86-NEXT:    movl %ebx, %edi
-; X86-NEXT:    cmovbl %eax, %edi
-; X86-NEXT:    movl %ebx, %esi
-; X86-NEXT:    cmovll %eax, %esi
-; X86-NEXT:    movl %esi, (%esp) # 4-byte Spill
-; X86-NEXT:    cmovnel %esi, %edi
-; X86-NEXT:    movl %eax, %esi
-; X86-NEXT:    sbbl %ebx, %esi
-; X86-NEXT:    cmovll {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    cmovll {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    xorl %eax, %ebx
-; X86-NEXT:    cmovel %ebp, %ecx
-; X86-NEXT:    cmovel {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    movl (%esp), %eax # 4-byte Reload
-; X86-NEXT:    movl %eax, 12(%esi)
-; X86-NEXT:    movl %edi, 8(%esi)
-; X86-NEXT:    movl %edx, 4(%esi)
-; X86-NEXT:    movl %ecx, (%esi)
-; X86-NEXT:    movl %esi, %eax
-; X86-NEXT:    addl $8, %esp
+; X86-NEXT:    shrdl $28, %edi, %ecx
+; X86-NEXT:    sarl $28, %edi
+; X86-NEXT:    cmpl %ecx, %esi
+; X86-NEXT:    movl %ecx, %ebx
+; X86-NEXT:    cmovbl %esi, %ebx
+; X86-NEXT:    cmpl %edi, %edx
+; X86-NEXT:    cmovll %esi, %ecx
+; X86-NEXT:    cmovel %ebx, %ecx
+; X86-NEXT:    cmovll %edx, %edi
+; X86-NEXT:    movl %edi, 4(%eax)
+; X86-NEXT:    sarl $31, %edi
+; X86-NEXT:    movl %edi, 12(%eax)
+; X86-NEXT:    movl %edi, 8(%eax)
+; X86-NEXT:    movl %ecx, (%eax)
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    popl %edi
 ; X86-NEXT:    popl %ebx
-; X86-NEXT:    popl %ebp
 ; X86-NEXT:    retl $4
   %ax = ashr i128 %a, 64
   %bx = ashr i128 %b, 92

diff  --git a/llvm/test/CodeGen/X86/umax.ll b/llvm/test/CodeGen/X86/umax.ll
index d9faf38c0e085..2d16dcff12745 100644
--- a/llvm/test/CodeGen/X86/umax.ll
+++ b/llvm/test/CodeGen/X86/umax.ll
@@ -726,23 +726,12 @@ define i64 @test_signbits_i64(i64 %a, i64 %b) nounwind {
 ;
 ; X86-LABEL: test_signbits_i64:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl %edi
-; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ecx, %esi
-; X86-NEXT:    sarl $31, %esi
-; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    sarl $31, %edx
 ; X86-NEXT:    cmpl %eax, %ecx
-; X86-NEXT:    movl %eax, %edi
-; X86-NEXT:    cmoval %ecx, %edi
-; X86-NEXT:    cmpl %edx, %esi
 ; X86-NEXT:    cmoval %ecx, %eax
-; X86-NEXT:    cmovel %edi, %eax
-; X86-NEXT:    cmoval %esi, %edx
-; X86-NEXT:    popl %esi
-; X86-NEXT:    popl %edi
+; X86-NEXT:    movl %eax, %edx
+; X86-NEXT:    sarl $31, %edx
 ; X86-NEXT:    retl
   %ax = ashr i64 %a, 32
   %bx = ashr i64 %b, 32
@@ -753,67 +742,41 @@ define i64 @test_signbits_i64(i64 %a, i64 %b) nounwind {
 define i128 @test_signbits_i128(i128 %a, i128 %b) nounwind {
 ; X64-LABEL: test_signbits_i128:
 ; X64:       # %bb.0:
-; X64-NEXT:    movq %rsi, %rdi
-; X64-NEXT:    sarq $63, %rdi
-; X64-NEXT:    movq %rcx, %rdx
-; X64-NEXT:    sarq $63, %rdx
-; X64-NEXT:    sarq $28, %rcx
-; X64-NEXT:    cmpq %rcx, %rsi
 ; X64-NEXT:    movq %rcx, %rax
+; X64-NEXT:    sarq $28, %rax
+; X64-NEXT:    cmpq %rax, %rsi
 ; X64-NEXT:    cmovaq %rsi, %rax
-; X64-NEXT:    cmpq %rdx, %rdi
-; X64-NEXT:    cmovaq %rsi, %rcx
-; X64-NEXT:    cmovneq %rcx, %rax
-; X64-NEXT:    cmovaq %rdi, %rdx
+; X64-NEXT:    movq %rax, %rdx
+; X64-NEXT:    sarq $63, %rdx
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test_signbits_i128:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    pushl %ebx
 ; X86-NEXT:    pushl %edi
 ; X86-NEXT:    pushl %esi
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
-; X86-NEXT:    sarl $31, %ebx
-; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    shrdl $28, %eax, %ecx
-; X86-NEXT:    sarl $31, %eax
-; X86-NEXT:    sarl $28, %edx
-; X86-NEXT:    cmpl %ecx, %ebp
-; X86-NEXT:    movl %ecx, %edi
-; X86-NEXT:    cmoval %ebp, %edi
-; X86-NEXT:    movl %ebp, %esi
-; X86-NEXT:    cmpl %edx, {{[0-9]+}}(%esp)
-; X86-NEXT:    movl %ecx, %ebp
-; X86-NEXT:    cmoval %esi, %ebp
-; X86-NEXT:    cmovel %edi, %ebp
-; X86-NEXT:    movl %edx, %edi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    cmoval %esi, %edi
-; X86-NEXT:    cmpl %ebx, %eax
-; X86-NEXT:    movl %eax, %esi
-; X86-NEXT:    sbbl %ebx, %esi
-; X86-NEXT:    cmovbl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    cmovbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ebx, %esi
-; X86-NEXT:    xorl %eax, %esi
-; X86-NEXT:    cmovel %ebp, %ecx
-; X86-NEXT:    cmovel %edi, %edx
-; X86-NEXT:    cmpl %eax, %ebx
-; X86-NEXT:    cmoval %ebx, %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    movl %eax, 12(%esi)
-; X86-NEXT:    movl %eax, 8(%esi)
-; X86-NEXT:    movl %edx, 4(%esi)
-; X86-NEXT:    movl %ecx, (%esi)
-; X86-NEXT:    movl %esi, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    shrdl $28, %edi, %ecx
+; X86-NEXT:    sarl $28, %edi
+; X86-NEXT:    cmpl %ecx, %esi
+; X86-NEXT:    movl %ecx, %ebx
+; X86-NEXT:    cmoval %esi, %ebx
+; X86-NEXT:    cmpl %edi, %edx
+; X86-NEXT:    cmoval %esi, %ecx
+; X86-NEXT:    cmovel %ebx, %ecx
+; X86-NEXT:    cmoval %edx, %edi
+; X86-NEXT:    movl %edi, 4(%eax)
+; X86-NEXT:    sarl $31, %edi
+; X86-NEXT:    movl %edi, 12(%eax)
+; X86-NEXT:    movl %edi, 8(%eax)
+; X86-NEXT:    movl %ecx, (%eax)
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    popl %edi
 ; X86-NEXT:    popl %ebx
-; X86-NEXT:    popl %ebp
 ; X86-NEXT:    retl $4
   %ax = ashr i128 %a, 64
   %bx = ashr i128 %b, 92

diff  --git a/llvm/test/CodeGen/X86/umin.ll b/llvm/test/CodeGen/X86/umin.ll
index ef0d80f30a8b9..e37950e5399cb 100644
--- a/llvm/test/CodeGen/X86/umin.ll
+++ b/llvm/test/CodeGen/X86/umin.ll
@@ -726,23 +726,12 @@ define i64 @test_signbits_i64(i64 %a, i64 %b) nounwind {
 ;
 ; X86-LABEL: test_signbits_i64:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl %edi
-; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ecx, %esi
-; X86-NEXT:    sarl $31, %esi
-; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    sarl $31, %edx
 ; X86-NEXT:    cmpl %eax, %ecx
-; X86-NEXT:    movl %eax, %edi
-; X86-NEXT:    cmovbl %ecx, %edi
-; X86-NEXT:    cmpl %edx, %esi
 ; X86-NEXT:    cmovbl %ecx, %eax
-; X86-NEXT:    cmovel %edi, %eax
-; X86-NEXT:    cmovbl %esi, %edx
-; X86-NEXT:    popl %esi
-; X86-NEXT:    popl %edi
+; X86-NEXT:    movl %eax, %edx
+; X86-NEXT:    sarl $31, %edx
 ; X86-NEXT:    retl
   %ax = ashr i64 %a, 32
   %bx = ashr i64 %b, 32
@@ -753,69 +742,41 @@ define i64 @test_signbits_i64(i64 %a, i64 %b) nounwind {
 define i128 @test_signbits_i128(i128 %a, i128 %b) nounwind {
 ; X64-LABEL: test_signbits_i128:
 ; X64:       # %bb.0:
-; X64-NEXT:    movq %rsi, %rdi
-; X64-NEXT:    sarq $63, %rdi
-; X64-NEXT:    movq %rcx, %rdx
-; X64-NEXT:    sarq $63, %rdx
-; X64-NEXT:    sarq $28, %rcx
-; X64-NEXT:    cmpq %rcx, %rsi
 ; X64-NEXT:    movq %rcx, %rax
+; X64-NEXT:    sarq $28, %rax
+; X64-NEXT:    cmpq %rax, %rsi
 ; X64-NEXT:    cmovbq %rsi, %rax
-; X64-NEXT:    cmpq %rdx, %rdi
-; X64-NEXT:    cmovbq %rsi, %rcx
-; X64-NEXT:    cmovneq %rcx, %rax
-; X64-NEXT:    cmovbq %rdi, %rdx
+; X64-NEXT:    movq %rax, %rdx
+; X64-NEXT:    sarq $63, %rdx
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test_signbits_i128:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    pushl %ebx
 ; X86-NEXT:    pushl %edi
 ; X86-NEXT:    pushl %esi
-; X86-NEXT:    pushl %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    movl %edx, %eax
-; X86-NEXT:    movl %edx, %ebp
-; X86-NEXT:    sarl $31, %eax
-; X86-NEXT:    movl %ebx, %edx
-; X86-NEXT:    shrdl $28, %ebx, %ecx
-; X86-NEXT:    sarl $31, %ebx
-; X86-NEXT:    sarl $28, %edx
-; X86-NEXT:    cmpl %ecx, %edi
-; X86-NEXT:    movl %ecx, %esi
-; X86-NEXT:    cmovbl %edi, %esi
-; X86-NEXT:    cmpl %edx, %ebp
-; X86-NEXT:    movl %ecx, %ebp
-; X86-NEXT:    cmovbl %edi, %ebp
-; X86-NEXT:    cmovel %esi, %ebp
-; X86-NEXT:    movl %edx, %esi
-; X86-NEXT:    cmovbl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    movl %esi, (%esp) # 4-byte Spill
-; X86-NEXT:    cmpl %ebx, %eax
-; X86-NEXT:    movl %ebx, %edi
-; X86-NEXT:    cmovbl %eax, %edi
-; X86-NEXT:    movl %eax, %esi
-; X86-NEXT:    sbbl %ebx, %esi
-; X86-NEXT:    cmovbl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    cmovbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    xorl %eax, %ebx
-; X86-NEXT:    cmovel %ebp, %ecx
-; X86-NEXT:    cmovel (%esp), %edx # 4-byte Folded Reload
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    movl %edi, 12(%esi)
-; X86-NEXT:    movl %edi, 8(%esi)
-; X86-NEXT:    movl %edx, 4(%esi)
-; X86-NEXT:    movl %ecx, (%esi)
-; X86-NEXT:    movl %esi, %eax
-; X86-NEXT:    addl $4, %esp
+; X86-NEXT:    shrdl $28, %edi, %ecx
+; X86-NEXT:    sarl $28, %edi
+; X86-NEXT:    cmpl %ecx, %esi
+; X86-NEXT:    movl %ecx, %ebx
+; X86-NEXT:    cmovbl %esi, %ebx
+; X86-NEXT:    cmpl %edi, %edx
+; X86-NEXT:    cmovbl %esi, %ecx
+; X86-NEXT:    cmovel %ebx, %ecx
+; X86-NEXT:    cmovbl %edx, %edi
+; X86-NEXT:    movl %edi, 4(%eax)
+; X86-NEXT:    sarl $31, %edi
+; X86-NEXT:    movl %edi, 12(%eax)
+; X86-NEXT:    movl %edi, 8(%eax)
+; X86-NEXT:    movl %ecx, (%eax)
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    popl %edi
 ; X86-NEXT:    popl %ebx
-; X86-NEXT:    popl %ebp
 ; X86-NEXT:    retl $4
   %ax = ashr i128 %a, 64
   %bx = ashr i128 %b, 92


        


More information about the llvm-commits mailing list