[llvm] 7afdc6b - [DAG] Fix typo in i64/i128 abdu/abds tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 4 03:59:28 PDT 2024


Author: Simon Pilgrim
Date: 2024-09-04T11:59:10+01:00
New Revision: 7afdc6bd57d634354597df185fd7037bec9241ff

URL: https://github.com/llvm/llvm-project/commit/7afdc6bd57d634354597df185fd7037bec9241ff
DIFF: https://github.com/llvm/llvm-project/commit/7afdc6bd57d634354597df185fd7037bec9241ff.diff

LOG: [DAG] Fix typo in i64/i128 abdu/abds tests

I'd incorrectly swapped the operands in some of the "cmp" test patterns when I changed the condition code

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/abds-neg.ll
    llvm/test/CodeGen/AArch64/abds.ll
    llvm/test/CodeGen/AArch64/abdu-neg.ll
    llvm/test/CodeGen/AArch64/abdu.ll
    llvm/test/CodeGen/RISCV/abds-neg.ll
    llvm/test/CodeGen/RISCV/abds.ll
    llvm/test/CodeGen/RISCV/abdu-neg.ll
    llvm/test/CodeGen/RISCV/abdu.ll
    llvm/test/CodeGen/X86/abds-neg.ll
    llvm/test/CodeGen/X86/abds.ll
    llvm/test/CodeGen/X86/abdu-neg.ll
    llvm/test/CodeGen/X86/abdu.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/abds-neg.ll b/llvm/test/CodeGen/AArch64/abds-neg.ll
index d4c6a09405e0c4..ac7cb1f619557d 100644
--- a/llvm/test/CodeGen/AArch64/abds-neg.ll
+++ b/llvm/test/CodeGen/AArch64/abds-neg.ll
@@ -377,30 +377,31 @@ define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub x8, x1, x0
 ; CHECK-NEXT:    subs x9, x0, x1
-; CHECK-NEXT:    csel x0, x9, x8, gt
+; CHECK-NEXT:    csel x0, x9, x8, lt
 ; CHECK-NEXT:    ret
   %cmp = icmp slt i64 %a, %b
   %ab = sub i64 %a, %b
   %ba = sub i64 %b, %a
-  %sel = select i1 %cmp, i64 %ba, i64 %ab
+  %sel = select i1 %cmp, i64 %ab, i64 %ba
   ret i64 %sel
 }
 
 define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
 ; CHECK-LABEL: abd_cmp_i128:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    subs x8, x0, x2
-; CHECK-NEXT:    sbc x9, x1, x3
-; CHECK-NEXT:    subs x10, x2, x0
-; CHECK-NEXT:    sbc x11, x3, x1
-; CHECK-NEXT:    sbcs xzr, x3, x1
-; CHECK-NEXT:    csel x0, x8, x10, lt
-; CHECK-NEXT:    csel x1, x9, x11, lt
+; CHECK-NEXT:    cmp x0, x2
+; CHECK-NEXT:    sbc x8, x1, x3
+; CHECK-NEXT:    subs x9, x2, x0
+; CHECK-NEXT:    sbc x10, x3, x1
+; CHECK-NEXT:    subs x11, x0, x2
+; CHECK-NEXT:    sbcs xzr, x1, x3
+; CHECK-NEXT:    csel x0, x11, x9, lt
+; CHECK-NEXT:    csel x1, x8, x10, lt
 ; CHECK-NEXT:    ret
   %cmp = icmp slt i128 %a, %b
   %ab = sub i128 %a, %b
   %ba = sub i128 %b, %a
-  %sel = select i1 %cmp, i128 %ba, i128 %ab
+  %sel = select i1 %cmp, i128 %ab, i128 %ba
   ret i128 %sel
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/abds.ll b/llvm/test/CodeGen/AArch64/abds.ll
index 45bb8749b25ed9..0e35f8240848b1 100644
--- a/llvm/test/CodeGen/AArch64/abds.ll
+++ b/llvm/test/CodeGen/AArch64/abds.ll
@@ -343,31 +343,30 @@ define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub x8, x1, x0
 ; CHECK-NEXT:    subs x9, x0, x1
-; CHECK-NEXT:    csel x0, x8, x9, ge
+; CHECK-NEXT:    csel x0, x9, x8, gt
 ; CHECK-NEXT:    ret
   %cmp = icmp sge i64 %a, %b
   %ab = sub i64 %a, %b
   %ba = sub i64 %b, %a
-  %sel = select i1 %cmp, i64 %ba, i64 %ab
+  %sel = select i1 %cmp, i64 %ab, i64 %ba
   ret i64 %sel
 }
 
 define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
 ; CHECK-LABEL: abd_cmp_i128:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp x0, x2
-; CHECK-NEXT:    sbc x8, x1, x3
-; CHECK-NEXT:    subs x9, x2, x0
-; CHECK-NEXT:    sbc x10, x3, x1
-; CHECK-NEXT:    subs x11, x0, x2
-; CHECK-NEXT:    sbcs xzr, x1, x3
-; CHECK-NEXT:    csel x0, x9, x11, ge
-; CHECK-NEXT:    csel x1, x10, x8, ge
+; CHECK-NEXT:    subs x8, x0, x2
+; CHECK-NEXT:    sbc x9, x1, x3
+; CHECK-NEXT:    subs x10, x2, x0
+; CHECK-NEXT:    sbc x11, x3, x1
+; CHECK-NEXT:    sbcs xzr, x3, x1
+; CHECK-NEXT:    csel x0, x8, x10, lt
+; CHECK-NEXT:    csel x1, x9, x11, lt
 ; CHECK-NEXT:    ret
   %cmp = icmp sge i128 %a, %b
   %ab = sub i128 %a, %b
   %ba = sub i128 %b, %a
-  %sel = select i1 %cmp, i128 %ba, i128 %ab
+  %sel = select i1 %cmp, i128 %ab, i128 %ba
   ret i128 %sel
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/abdu-neg.ll b/llvm/test/CodeGen/AArch64/abdu-neg.ll
index b148a29a72976c..2118816ca7c589 100644
--- a/llvm/test/CodeGen/AArch64/abdu-neg.ll
+++ b/llvm/test/CodeGen/AArch64/abdu-neg.ll
@@ -379,31 +379,31 @@ define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub x8, x1, x0
 ; CHECK-NEXT:    subs x9, x0, x1
-; CHECK-NEXT:    csel x0, x9, x8, hi
+; CHECK-NEXT:    csel x0, x9, x8, lo
 ; CHECK-NEXT:    ret
   %cmp = icmp ult i64 %a, %b
   %ab = sub i64 %a, %b
   %ba = sub i64 %b, %a
-  %sel = select i1 %cmp, i64 %ba, i64 %ab
+  %sel = select i1 %cmp, i64 %ab, i64 %ba
   ret i64 %sel
 }
 
 define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
 ; CHECK-LABEL: abd_cmp_i128:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    subs x8, x0, x2
-; CHECK-NEXT:    sbcs x9, x1, x3
-; CHECK-NEXT:    cset w10, lo
-; CHECK-NEXT:    sbfx x10, x10, #0, #1
-; CHECK-NEXT:    eor x8, x8, x10
-; CHECK-NEXT:    eor x9, x9, x10
-; CHECK-NEXT:    subs x0, x8, x10
-; CHECK-NEXT:    sbc x1, x9, x10
+; CHECK-NEXT:    cmp x0, x2
+; CHECK-NEXT:    sbc x8, x1, x3
+; CHECK-NEXT:    subs x9, x2, x0
+; CHECK-NEXT:    sbc x10, x3, x1
+; CHECK-NEXT:    subs x11, x0, x2
+; CHECK-NEXT:    sbcs xzr, x1, x3
+; CHECK-NEXT:    csel x0, x11, x9, lo
+; CHECK-NEXT:    csel x1, x8, x10, lo
 ; CHECK-NEXT:    ret
   %cmp = icmp ult i128 %a, %b
   %ab = sub i128 %a, %b
   %ba = sub i128 %b, %a
-  %sel = select i1 %cmp, i128 %ba, i128 %ab
+  %sel = select i1 %cmp, i128 %ab, i128 %ba
   ret i128 %sel
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/abdu.ll b/llvm/test/CodeGen/AArch64/abdu.ll
index 22d41dfb85a629..eb866e6a78a9b0 100644
--- a/llvm/test/CodeGen/AArch64/abdu.ll
+++ b/llvm/test/CodeGen/AArch64/abdu.ll
@@ -346,31 +346,31 @@ define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub x8, x1, x0
 ; CHECK-NEXT:    subs x9, x0, x1
-; CHECK-NEXT:    csel x0, x8, x9, hs
+; CHECK-NEXT:    csel x0, x9, x8, hi
 ; CHECK-NEXT:    ret
   %cmp = icmp uge i64 %a, %b
   %ab = sub i64 %a, %b
   %ba = sub i64 %b, %a
-  %sel = select i1 %cmp, i64 %ba, i64 %ab
+  %sel = select i1 %cmp, i64 %ab, i64 %ba
   ret i64 %sel
 }
 
 define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
 ; CHECK-LABEL: abd_cmp_i128:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp x0, x2
-; CHECK-NEXT:    sbc x8, x1, x3
-; CHECK-NEXT:    subs x9, x2, x0
-; CHECK-NEXT:    sbc x10, x3, x1
-; CHECK-NEXT:    subs x11, x0, x2
-; CHECK-NEXT:    sbcs xzr, x1, x3
-; CHECK-NEXT:    csel x0, x9, x11, hs
-; CHECK-NEXT:    csel x1, x10, x8, hs
+; CHECK-NEXT:    subs x8, x0, x2
+; CHECK-NEXT:    sbcs x9, x1, x3
+; CHECK-NEXT:    cset w10, lo
+; CHECK-NEXT:    sbfx x10, x10, #0, #1
+; CHECK-NEXT:    eor x8, x8, x10
+; CHECK-NEXT:    eor x9, x9, x10
+; CHECK-NEXT:    subs x0, x8, x10
+; CHECK-NEXT:    sbc x1, x9, x10
 ; CHECK-NEXT:    ret
   %cmp = icmp uge i128 %a, %b
   %ab = sub i128 %a, %b
   %ba = sub i128 %b, %a
-  %sel = select i1 %cmp, i128 %ba, i128 %ab
+  %sel = select i1 %cmp, i128 %ab, i128 %ba
   ret i128 %sel
 }
 

diff  --git a/llvm/test/CodeGen/RISCV/abds-neg.ll b/llvm/test/CodeGen/RISCV/abds-neg.ll
index 058f105e8f7358..168615983d9709 100644
--- a/llvm/test/CodeGen/RISCV/abds-neg.ll
+++ b/llvm/test/CodeGen/RISCV/abds-neg.ll
@@ -1791,20 +1791,20 @@ define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
 define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-LABEL: abd_cmp_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    sltu a4, a2, a0
+; RV32I-NEXT:    sltu a4, a0, a2
 ; RV32I-NEXT:    mv a5, a4
 ; RV32I-NEXT:    beq a1, a3, .LBB21_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    slt a5, a3, a1
+; RV32I-NEXT:    slt a5, a1, a3
 ; RV32I-NEXT:  .LBB21_2:
 ; RV32I-NEXT:    bnez a5, .LBB21_4
 ; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    sltu a4, a2, a0
 ; RV32I-NEXT:    sub a1, a3, a1
 ; RV32I-NEXT:    sub a1, a1, a4
 ; RV32I-NEXT:    sub a0, a2, a0
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB21_4:
-; RV32I-NEXT:    sltu a4, a0, a2
 ; RV32I-NEXT:    sub a1, a1, a3
 ; RV32I-NEXT:    sub a1, a1, a4
 ; RV32I-NEXT:    sub a0, a0, a2
@@ -1812,7 +1812,7 @@ define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
 ;
 ; RV64I-LABEL: abd_cmp_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    blt a1, a0, .LBB21_2
+; RV64I-NEXT:    blt a0, a1, .LBB21_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    sub a0, a1, a0
 ; RV64I-NEXT:    ret
@@ -1822,20 +1822,20 @@ define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
 ;
 ; RV32ZBB-LABEL: abd_cmp_i64:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    sltu a4, a2, a0
+; RV32ZBB-NEXT:    sltu a4, a0, a2
 ; RV32ZBB-NEXT:    mv a5, a4
 ; RV32ZBB-NEXT:    beq a1, a3, .LBB21_2
 ; RV32ZBB-NEXT:  # %bb.1:
-; RV32ZBB-NEXT:    slt a5, a3, a1
+; RV32ZBB-NEXT:    slt a5, a1, a3
 ; RV32ZBB-NEXT:  .LBB21_2:
 ; RV32ZBB-NEXT:    bnez a5, .LBB21_4
 ; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    sltu a4, a2, a0
 ; RV32ZBB-NEXT:    sub a1, a3, a1
 ; RV32ZBB-NEXT:    sub a1, a1, a4
 ; RV32ZBB-NEXT:    sub a0, a2, a0
 ; RV32ZBB-NEXT:    ret
 ; RV32ZBB-NEXT:  .LBB21_4:
-; RV32ZBB-NEXT:    sltu a4, a0, a2
 ; RV32ZBB-NEXT:    sub a1, a1, a3
 ; RV32ZBB-NEXT:    sub a1, a1, a4
 ; RV32ZBB-NEXT:    sub a0, a0, a2
@@ -1843,109 +1843,103 @@ define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
 ;
 ; RV64ZBB-LABEL: abd_cmp_i64:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    min a2, a0, a1
-; RV64ZBB-NEXT:    max a0, a0, a1
-; RV64ZBB-NEXT:    sub a0, a0, a2
+; RV64ZBB-NEXT:    blt a0, a1, .LBB21_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    sub a0, a1, a0
+; RV64ZBB-NEXT:    ret
+; RV64ZBB-NEXT:  .LBB21_2:
+; RV64ZBB-NEXT:    sub a0, a0, a1
 ; RV64ZBB-NEXT:    ret
   %cmp = icmp slt i64 %a, %b
   %ab = sub i64 %a, %b
   %ba = sub i64 %b, %a
-  %sel = select i1 %cmp, i64 %ba, i64 %ab
+  %sel = select i1 %cmp, i64 %ab, i64 %ba
   ret i64 %sel
 }
 
 define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
 ; RV32I-LABEL: abd_cmp_i128:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lw a3, 0(a1)
-; RV32I-NEXT:    lw a5, 0(a2)
-; RV32I-NEXT:    lw a4, 4(a1)
-; RV32I-NEXT:    lw a6, 8(a1)
-; RV32I-NEXT:    lw a7, 8(a2)
+; RV32I-NEXT:    lw a3, 0(a2)
+; RV32I-NEXT:    lw a4, 0(a1)
+; RV32I-NEXT:    lw a5, 4(a2)
+; RV32I-NEXT:    lw a6, 8(a2)
+; RV32I-NEXT:    lw a7, 8(a1)
+; RV32I-NEXT:    lw a2, 12(a2)
 ; RV32I-NEXT:    lw t0, 12(a1)
-; RV32I-NEXT:    lw t1, 12(a2)
-; RV32I-NEXT:    lw a1, 4(a2)
-; RV32I-NEXT:    sltu a2, a7, a6
-; RV32I-NEXT:    mv t4, a2
-; RV32I-NEXT:    beq t0, t1, .LBB22_2
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    sltu t1, a7, a6
+; RV32I-NEXT:    mv t4, t1
+; RV32I-NEXT:    beq t0, a2, .LBB22_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    slt t4, t1, t0
+; RV32I-NEXT:    slt t4, t0, a2
 ; RV32I-NEXT:  .LBB22_2:
-; RV32I-NEXT:    sltu t2, a5, a3
-; RV32I-NEXT:    sltu t5, a1, a4
+; RV32I-NEXT:    sltu t2, a4, a3
 ; RV32I-NEXT:    mv t3, t2
-; RV32I-NEXT:    beq a4, a1, .LBB22_4
+; RV32I-NEXT:    beq a1, a5, .LBB22_4
 ; RV32I-NEXT:  # %bb.3:
-; RV32I-NEXT:    mv t3, t5
+; RV32I-NEXT:    sltu t3, a1, a5
 ; RV32I-NEXT:  .LBB22_4:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    xor t6, t0, t1
-; RV32I-NEXT:    xor s0, a6, a7
-; RV32I-NEXT:    or t6, s0, t6
-; RV32I-NEXT:    beqz t6, .LBB22_6
+; RV32I-NEXT:    xor t5, t0, a2
+; RV32I-NEXT:    xor t6, a7, a6
+; RV32I-NEXT:    or t5, t6, t5
+; RV32I-NEXT:    mv t6, t3
+; RV32I-NEXT:    beqz t5, .LBB22_6
 ; RV32I-NEXT:  # %bb.5:
-; RV32I-NEXT:    mv t3, t4
+; RV32I-NEXT:    mv t6, t4
 ; RV32I-NEXT:  .LBB22_6:
-; RV32I-NEXT:    mv t4, t2
-; RV32I-NEXT:    beq a1, a4, .LBB22_8
+; RV32I-NEXT:    sltu t4, a3, a4
+; RV32I-NEXT:    mv t5, t4
+; RV32I-NEXT:    beq a1, a5, .LBB22_8
 ; RV32I-NEXT:  # %bb.7:
-; RV32I-NEXT:    mv t4, t5
+; RV32I-NEXT:    sltu t5, a5, a1
 ; RV32I-NEXT:  .LBB22_8:
-; RV32I-NEXT:    sltu t5, a3, a5
-; RV32I-NEXT:    mv t6, t5
-; RV32I-NEXT:    beq a4, a1, .LBB22_10
+; RV32I-NEXT:    bnez t6, .LBB22_10
 ; RV32I-NEXT:  # %bb.9:
-; RV32I-NEXT:    sltu t6, a4, a1
+; RV32I-NEXT:    sltu t1, a6, a7
+; RV32I-NEXT:    sub a2, a2, t0
+; RV32I-NEXT:    sub a2, a2, t1
+; RV32I-NEXT:    sub a6, a6, a7
+; RV32I-NEXT:    sltu a7, a6, t5
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    sub a6, a6, t5
+; RV32I-NEXT:    sub a5, a5, a1
+; RV32I-NEXT:    sub a1, a5, t4
+; RV32I-NEXT:    sub a3, a3, a4
+; RV32I-NEXT:    j .LBB22_11
 ; RV32I-NEXT:  .LBB22_10:
-; RV32I-NEXT:    bnez t3, .LBB22_12
-; RV32I-NEXT:  # %bb.11:
-; RV32I-NEXT:    sub t0, t1, t0
-; RV32I-NEXT:    sub a6, a7, a6
 ; RV32I-NEXT:    sub a2, t0, a2
-; RV32I-NEXT:    sltu a7, a6, t4
+; RV32I-NEXT:    sub a6, a7, a6
+; RV32I-NEXT:    sub a2, a2, t1
+; RV32I-NEXT:    sltu a7, a6, t3
+; RV32I-NEXT:    sub a1, a1, a5
 ; RV32I-NEXT:    sub a2, a2, a7
-; RV32I-NEXT:    sub a3, a5, a3
-; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a6, a6, t3
 ; RV32I-NEXT:    sub a1, a1, t2
-; RV32I-NEXT:    sub a4, a6, t4
-; RV32I-NEXT:    j .LBB22_13
-; RV32I-NEXT:  .LBB22_12:
-; RV32I-NEXT:    sltu a2, a6, a7
-; RV32I-NEXT:    sub t0, t0, t1
-; RV32I-NEXT:    sub a2, t0, a2
-; RV32I-NEXT:    sub a6, a6, a7
-; RV32I-NEXT:    sltu a7, a6, t6
-; RV32I-NEXT:    sub a2, a2, a7
-; RV32I-NEXT:    sub a3, a3, a5
-; RV32I-NEXT:    sub a4, a4, a1
-; RV32I-NEXT:    sub a1, a4, t5
-; RV32I-NEXT:    sub a4, a6, t6
-; RV32I-NEXT:  .LBB22_13:
-; RV32I-NEXT:    sw a4, 8(a0)
+; RV32I-NEXT:    sub a3, a4, a3
+; RV32I-NEXT:  .LBB22_11:
+; RV32I-NEXT:    sw a6, 8(a0)
 ; RV32I-NEXT:    sw a1, 4(a0)
 ; RV32I-NEXT:    sw a3, 0(a0)
 ; RV32I-NEXT:    sw a2, 12(a0)
-; RV32I-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: abd_cmp_i128:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sltu a4, a2, a0
+; RV64I-NEXT:    sltu a4, a0, a2
 ; RV64I-NEXT:    mv a5, a4
 ; RV64I-NEXT:    beq a1, a3, .LBB22_2
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    slt a5, a3, a1
+; RV64I-NEXT:    slt a5, a1, a3
 ; RV64I-NEXT:  .LBB22_2:
 ; RV64I-NEXT:    bnez a5, .LBB22_4
 ; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    sltu a4, a2, a0
 ; RV64I-NEXT:    sub a1, a3, a1
 ; RV64I-NEXT:    sub a1, a1, a4
 ; RV64I-NEXT:    sub a0, a2, a0
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB22_4:
-; RV64I-NEXT:    sltu a4, a0, a2
 ; RV64I-NEXT:    sub a1, a1, a3
 ; RV64I-NEXT:    sub a1, a1, a4
 ; RV64I-NEXT:    sub a0, a0, a2
@@ -1953,95 +1947,86 @@ define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
 ;
 ; RV32ZBB-LABEL: abd_cmp_i128:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    lw a3, 0(a1)
-; RV32ZBB-NEXT:    lw a5, 0(a2)
-; RV32ZBB-NEXT:    lw a4, 4(a1)
-; RV32ZBB-NEXT:    lw a6, 8(a1)
-; RV32ZBB-NEXT:    lw a7, 8(a2)
+; RV32ZBB-NEXT:    lw a3, 0(a2)
+; RV32ZBB-NEXT:    lw a4, 0(a1)
+; RV32ZBB-NEXT:    lw a5, 4(a2)
+; RV32ZBB-NEXT:    lw a6, 8(a2)
+; RV32ZBB-NEXT:    lw a7, 8(a1)
+; RV32ZBB-NEXT:    lw a2, 12(a2)
 ; RV32ZBB-NEXT:    lw t0, 12(a1)
-; RV32ZBB-NEXT:    lw t1, 12(a2)
-; RV32ZBB-NEXT:    lw a1, 4(a2)
-; RV32ZBB-NEXT:    sltu a2, a7, a6
-; RV32ZBB-NEXT:    mv t4, a2
-; RV32ZBB-NEXT:    beq t0, t1, .LBB22_2
+; RV32ZBB-NEXT:    lw a1, 4(a1)
+; RV32ZBB-NEXT:    sltu t1, a7, a6
+; RV32ZBB-NEXT:    mv t4, t1
+; RV32ZBB-NEXT:    beq t0, a2, .LBB22_2
 ; RV32ZBB-NEXT:  # %bb.1:
-; RV32ZBB-NEXT:    slt t4, t1, t0
+; RV32ZBB-NEXT:    slt t4, t0, a2
 ; RV32ZBB-NEXT:  .LBB22_2:
-; RV32ZBB-NEXT:    sltu t2, a5, a3
-; RV32ZBB-NEXT:    sltu t5, a1, a4
+; RV32ZBB-NEXT:    sltu t2, a4, a3
 ; RV32ZBB-NEXT:    mv t3, t2
-; RV32ZBB-NEXT:    beq a4, a1, .LBB22_4
+; RV32ZBB-NEXT:    beq a1, a5, .LBB22_4
 ; RV32ZBB-NEXT:  # %bb.3:
-; RV32ZBB-NEXT:    mv t3, t5
+; RV32ZBB-NEXT:    sltu t3, a1, a5
 ; RV32ZBB-NEXT:  .LBB22_4:
-; RV32ZBB-NEXT:    addi sp, sp, -16
-; RV32ZBB-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
-; RV32ZBB-NEXT:    xor t6, t0, t1
-; RV32ZBB-NEXT:    xor s0, a6, a7
-; RV32ZBB-NEXT:    or t6, s0, t6
-; RV32ZBB-NEXT:    beqz t6, .LBB22_6
+; RV32ZBB-NEXT:    xor t5, t0, a2
+; RV32ZBB-NEXT:    xor t6, a7, a6
+; RV32ZBB-NEXT:    or t5, t6, t5
+; RV32ZBB-NEXT:    mv t6, t3
+; RV32ZBB-NEXT:    beqz t5, .LBB22_6
 ; RV32ZBB-NEXT:  # %bb.5:
-; RV32ZBB-NEXT:    mv t3, t4
+; RV32ZBB-NEXT:    mv t6, t4
 ; RV32ZBB-NEXT:  .LBB22_6:
-; RV32ZBB-NEXT:    mv t4, t2
-; RV32ZBB-NEXT:    beq a1, a4, .LBB22_8
+; RV32ZBB-NEXT:    sltu t4, a3, a4
+; RV32ZBB-NEXT:    mv t5, t4
+; RV32ZBB-NEXT:    beq a1, a5, .LBB22_8
 ; RV32ZBB-NEXT:  # %bb.7:
-; RV32ZBB-NEXT:    mv t4, t5
+; RV32ZBB-NEXT:    sltu t5, a5, a1
 ; RV32ZBB-NEXT:  .LBB22_8:
-; RV32ZBB-NEXT:    sltu t5, a3, a5
-; RV32ZBB-NEXT:    mv t6, t5
-; RV32ZBB-NEXT:    beq a4, a1, .LBB22_10
+; RV32ZBB-NEXT:    bnez t6, .LBB22_10
 ; RV32ZBB-NEXT:  # %bb.9:
-; RV32ZBB-NEXT:    sltu t6, a4, a1
+; RV32ZBB-NEXT:    sltu t1, a6, a7
+; RV32ZBB-NEXT:    sub a2, a2, t0
+; RV32ZBB-NEXT:    sub a2, a2, t1
+; RV32ZBB-NEXT:    sub a6, a6, a7
+; RV32ZBB-NEXT:    sltu a7, a6, t5
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    sub a6, a6, t5
+; RV32ZBB-NEXT:    sub a5, a5, a1
+; RV32ZBB-NEXT:    sub a1, a5, t4
+; RV32ZBB-NEXT:    sub a3, a3, a4
+; RV32ZBB-NEXT:    j .LBB22_11
 ; RV32ZBB-NEXT:  .LBB22_10:
-; RV32ZBB-NEXT:    bnez t3, .LBB22_12
-; RV32ZBB-NEXT:  # %bb.11:
-; RV32ZBB-NEXT:    sub t0, t1, t0
-; RV32ZBB-NEXT:    sub a6, a7, a6
 ; RV32ZBB-NEXT:    sub a2, t0, a2
-; RV32ZBB-NEXT:    sltu a7, a6, t4
+; RV32ZBB-NEXT:    sub a6, a7, a6
+; RV32ZBB-NEXT:    sub a2, a2, t1
+; RV32ZBB-NEXT:    sltu a7, a6, t3
+; RV32ZBB-NEXT:    sub a1, a1, a5
 ; RV32ZBB-NEXT:    sub a2, a2, a7
-; RV32ZBB-NEXT:    sub a3, a5, a3
-; RV32ZBB-NEXT:    sub a1, a1, a4
+; RV32ZBB-NEXT:    sub a6, a6, t3
 ; RV32ZBB-NEXT:    sub a1, a1, t2
-; RV32ZBB-NEXT:    sub a4, a6, t4
-; RV32ZBB-NEXT:    j .LBB22_13
-; RV32ZBB-NEXT:  .LBB22_12:
-; RV32ZBB-NEXT:    sltu a2, a6, a7
-; RV32ZBB-NEXT:    sub t0, t0, t1
-; RV32ZBB-NEXT:    sub a2, t0, a2
-; RV32ZBB-NEXT:    sub a6, a6, a7
-; RV32ZBB-NEXT:    sltu a7, a6, t6
-; RV32ZBB-NEXT:    sub a2, a2, a7
-; RV32ZBB-NEXT:    sub a3, a3, a5
-; RV32ZBB-NEXT:    sub a4, a4, a1
-; RV32ZBB-NEXT:    sub a1, a4, t5
-; RV32ZBB-NEXT:    sub a4, a6, t6
-; RV32ZBB-NEXT:  .LBB22_13:
-; RV32ZBB-NEXT:    sw a4, 8(a0)
+; RV32ZBB-NEXT:    sub a3, a4, a3
+; RV32ZBB-NEXT:  .LBB22_11:
+; RV32ZBB-NEXT:    sw a6, 8(a0)
 ; RV32ZBB-NEXT:    sw a1, 4(a0)
 ; RV32ZBB-NEXT:    sw a3, 0(a0)
 ; RV32ZBB-NEXT:    sw a2, 12(a0)
-; RV32ZBB-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
-; RV32ZBB-NEXT:    addi sp, sp, 16
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: abd_cmp_i128:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    sltu a4, a2, a0
+; RV64ZBB-NEXT:    sltu a4, a0, a2
 ; RV64ZBB-NEXT:    mv a5, a4
 ; RV64ZBB-NEXT:    beq a1, a3, .LBB22_2
 ; RV64ZBB-NEXT:  # %bb.1:
-; RV64ZBB-NEXT:    slt a5, a3, a1
+; RV64ZBB-NEXT:    slt a5, a1, a3
 ; RV64ZBB-NEXT:  .LBB22_2:
 ; RV64ZBB-NEXT:    bnez a5, .LBB22_4
 ; RV64ZBB-NEXT:  # %bb.3:
+; RV64ZBB-NEXT:    sltu a4, a2, a0
 ; RV64ZBB-NEXT:    sub a1, a3, a1
 ; RV64ZBB-NEXT:    sub a1, a1, a4
 ; RV64ZBB-NEXT:    sub a0, a2, a0
 ; RV64ZBB-NEXT:    ret
 ; RV64ZBB-NEXT:  .LBB22_4:
-; RV64ZBB-NEXT:    sltu a4, a0, a2
 ; RV64ZBB-NEXT:    sub a1, a1, a3
 ; RV64ZBB-NEXT:    sub a1, a1, a4
 ; RV64ZBB-NEXT:    sub a0, a0, a2
@@ -2049,7 +2034,7 @@ define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
   %cmp = icmp slt i128 %a, %b
   %ab = sub i128 %a, %b
   %ba = sub i128 %b, %a
-  %sel = select i1 %cmp, i128 %ba, i128 %ab
+  %sel = select i1 %cmp, i128 %ab, i128 %ba
   ret i128 %sel
 }
 

diff  --git a/llvm/test/CodeGen/RISCV/abds.ll b/llvm/test/CodeGen/RISCV/abds.ll
index b867a55445c95b..86b36d8f69e95f 100644
--- a/llvm/test/CodeGen/RISCV/abds.ll
+++ b/llvm/test/CodeGen/RISCV/abds.ll
@@ -1448,250 +1448,265 @@ define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
 define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-LABEL: abd_cmp_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    sltu a4, a0, a2
+; RV32I-NEXT:    sltu a4, a2, a0
 ; RV32I-NEXT:    mv a5, a4
 ; RV32I-NEXT:    beq a1, a3, .LBB21_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    slt a5, a1, a3
+; RV32I-NEXT:    slt a5, a3, a1
 ; RV32I-NEXT:  .LBB21_2:
-; RV32I-NEXT:    beqz a5, .LBB21_4
+; RV32I-NEXT:    bnez a5, .LBB21_4
 ; RV32I-NEXT:  # %bb.3:
-; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sub a1, a3, a1
 ; RV32I-NEXT:    sub a1, a1, a4
-; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    sub a0, a2, a0
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB21_4:
-; RV32I-NEXT:    sltu a4, a2, a0
-; RV32I-NEXT:    sub a1, a3, a1
+; RV32I-NEXT:    sltu a4, a0, a2
+; RV32I-NEXT:    sub a1, a1, a3
 ; RV32I-NEXT:    sub a1, a1, a4
-; RV32I-NEXT:    sub a0, a2, a0
+; RV32I-NEXT:    sub a0, a0, a2
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: abd_cmp_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    bge a0, a1, .LBB21_2
+; RV64I-NEXT:    blt a1, a0, .LBB21_2
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB21_2:
-; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    sub a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: abd_cmp_i64:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    sltu a4, a0, a2
+; RV32ZBB-NEXT:    sltu a4, a2, a0
 ; RV32ZBB-NEXT:    mv a5, a4
 ; RV32ZBB-NEXT:    beq a1, a3, .LBB21_2
 ; RV32ZBB-NEXT:  # %bb.1:
-; RV32ZBB-NEXT:    slt a5, a1, a3
+; RV32ZBB-NEXT:    slt a5, a3, a1
 ; RV32ZBB-NEXT:  .LBB21_2:
-; RV32ZBB-NEXT:    beqz a5, .LBB21_4
+; RV32ZBB-NEXT:    bnez a5, .LBB21_4
 ; RV32ZBB-NEXT:  # %bb.3:
-; RV32ZBB-NEXT:    sub a1, a1, a3
+; RV32ZBB-NEXT:    sub a1, a3, a1
 ; RV32ZBB-NEXT:    sub a1, a1, a4
-; RV32ZBB-NEXT:    sub a0, a0, a2
+; RV32ZBB-NEXT:    sub a0, a2, a0
 ; RV32ZBB-NEXT:    ret
 ; RV32ZBB-NEXT:  .LBB21_4:
-; RV32ZBB-NEXT:    sltu a4, a2, a0
-; RV32ZBB-NEXT:    sub a1, a3, a1
+; RV32ZBB-NEXT:    sltu a4, a0, a2
+; RV32ZBB-NEXT:    sub a1, a1, a3
 ; RV32ZBB-NEXT:    sub a1, a1, a4
-; RV32ZBB-NEXT:    sub a0, a2, a0
+; RV32ZBB-NEXT:    sub a0, a0, a2
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: abd_cmp_i64:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    bge a0, a1, .LBB21_2
-; RV64ZBB-NEXT:  # %bb.1:
-; RV64ZBB-NEXT:    sub a0, a0, a1
-; RV64ZBB-NEXT:    ret
-; RV64ZBB-NEXT:  .LBB21_2:
-; RV64ZBB-NEXT:    sub a0, a1, a0
+; RV64ZBB-NEXT:    min a2, a0, a1
+; RV64ZBB-NEXT:    max a0, a0, a1
+; RV64ZBB-NEXT:    sub a0, a0, a2
 ; RV64ZBB-NEXT:    ret
   %cmp = icmp sge i64 %a, %b
   %ab = sub i64 %a, %b
   %ba = sub i64 %b, %a
-  %sel = select i1 %cmp, i64 %ba, i64 %ab
+  %sel = select i1 %cmp, i64 %ab, i64 %ba
   ret i64 %sel
 }
 
 define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
 ; RV32I-LABEL: abd_cmp_i128:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lw a3, 0(a2)
-; RV32I-NEXT:    lw a4, 0(a1)
-; RV32I-NEXT:    lw a5, 4(a2)
-; RV32I-NEXT:    lw a6, 8(a2)
-; RV32I-NEXT:    lw a7, 8(a1)
-; RV32I-NEXT:    lw a2, 12(a2)
+; RV32I-NEXT:    lw a3, 0(a1)
+; RV32I-NEXT:    lw a5, 0(a2)
+; RV32I-NEXT:    lw a4, 4(a1)
+; RV32I-NEXT:    lw a6, 8(a1)
+; RV32I-NEXT:    lw a7, 8(a2)
 ; RV32I-NEXT:    lw t0, 12(a1)
-; RV32I-NEXT:    lw a1, 4(a1)
-; RV32I-NEXT:    sltu t1, a7, a6
-; RV32I-NEXT:    mv t4, t1
-; RV32I-NEXT:    beq t0, a2, .LBB22_2
+; RV32I-NEXT:    lw t1, 12(a2)
+; RV32I-NEXT:    lw a1, 4(a2)
+; RV32I-NEXT:    sltu a2, a7, a6
+; RV32I-NEXT:    mv t4, a2
+; RV32I-NEXT:    beq t0, t1, .LBB22_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    slt t4, t0, a2
+; RV32I-NEXT:    slt t4, t1, t0
 ; RV32I-NEXT:  .LBB22_2:
-; RV32I-NEXT:    xor t3, t0, a2
-; RV32I-NEXT:    xor t5, a7, a6
-; RV32I-NEXT:    sltu t2, a4, a3
-; RV32I-NEXT:    or t5, t5, t3
+; RV32I-NEXT:    sltu t2, a5, a3
+; RV32I-NEXT:    sltu t5, a1, a4
 ; RV32I-NEXT:    mv t3, t2
-; RV32I-NEXT:    beq a1, a5, .LBB22_4
+; RV32I-NEXT:    beq a4, a1, .LBB22_4
 ; RV32I-NEXT:  # %bb.3:
-; RV32I-NEXT:    sltu t3, a1, a5
+; RV32I-NEXT:    mv t3, t5
 ; RV32I-NEXT:  .LBB22_4:
-; RV32I-NEXT:    mv t6, t3
-; RV32I-NEXT:    beqz t5, .LBB22_6
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    xor t6, t0, t1
+; RV32I-NEXT:    xor s0, a6, a7
+; RV32I-NEXT:    or t6, s0, t6
+; RV32I-NEXT:    beqz t6, .LBB22_6
 ; RV32I-NEXT:  # %bb.5:
-; RV32I-NEXT:    mv t6, t4
+; RV32I-NEXT:    mv t3, t4
 ; RV32I-NEXT:  .LBB22_6:
-; RV32I-NEXT:    sltu t4, a3, a4
-; RV32I-NEXT:    mv t5, t4
-; RV32I-NEXT:    beq a1, a5, .LBB22_8
+; RV32I-NEXT:    mv t4, t2
+; RV32I-NEXT:    beq a1, a4, .LBB22_8
 ; RV32I-NEXT:  # %bb.7:
-; RV32I-NEXT:    sltu t5, a5, a1
+; RV32I-NEXT:    mv t4, t5
 ; RV32I-NEXT:  .LBB22_8:
-; RV32I-NEXT:    beqz t6, .LBB22_10
+; RV32I-NEXT:    sltu t5, a3, a5
+; RV32I-NEXT:    mv t6, t5
+; RV32I-NEXT:    beq a4, a1, .LBB22_10
 ; RV32I-NEXT:  # %bb.9:
-; RV32I-NEXT:    sub a2, t0, a2
+; RV32I-NEXT:    sltu t6, a4, a1
+; RV32I-NEXT:  .LBB22_10:
+; RV32I-NEXT:    bnez t3, .LBB22_12
+; RV32I-NEXT:  # %bb.11:
+; RV32I-NEXT:    sub t0, t1, t0
 ; RV32I-NEXT:    sub a6, a7, a6
-; RV32I-NEXT:    sub a2, a2, t1
-; RV32I-NEXT:    sltu a7, a6, t3
-; RV32I-NEXT:    sub a1, a1, a5
+; RV32I-NEXT:    sub a2, t0, a2
+; RV32I-NEXT:    sltu a7, a6, t4
 ; RV32I-NEXT:    sub a2, a2, a7
-; RV32I-NEXT:    sub a6, a6, t3
+; RV32I-NEXT:    sub a3, a5, a3
+; RV32I-NEXT:    sub a1, a1, a4
 ; RV32I-NEXT:    sub a1, a1, t2
-; RV32I-NEXT:    sub a3, a4, a3
-; RV32I-NEXT:    j .LBB22_11
-; RV32I-NEXT:  .LBB22_10:
-; RV32I-NEXT:    sltu t1, a6, a7
-; RV32I-NEXT:    sub a2, a2, t0
-; RV32I-NEXT:    sub a2, a2, t1
+; RV32I-NEXT:    sub a4, a6, t4
+; RV32I-NEXT:    j .LBB22_13
+; RV32I-NEXT:  .LBB22_12:
+; RV32I-NEXT:    sltu a2, a6, a7
+; RV32I-NEXT:    sub t0, t0, t1
+; RV32I-NEXT:    sub a2, t0, a2
 ; RV32I-NEXT:    sub a6, a6, a7
-; RV32I-NEXT:    sltu a7, a6, t5
+; RV32I-NEXT:    sltu a7, a6, t6
 ; RV32I-NEXT:    sub a2, a2, a7
-; RV32I-NEXT:    sub a6, a6, t5
-; RV32I-NEXT:    sub a5, a5, a1
-; RV32I-NEXT:    sub a1, a5, t4
-; RV32I-NEXT:    sub a3, a3, a4
-; RV32I-NEXT:  .LBB22_11:
-; RV32I-NEXT:    sw a6, 8(a0)
+; RV32I-NEXT:    sub a3, a3, a5
+; RV32I-NEXT:    sub a4, a4, a1
+; RV32I-NEXT:    sub a1, a4, t5
+; RV32I-NEXT:    sub a4, a6, t6
+; RV32I-NEXT:  .LBB22_13:
+; RV32I-NEXT:    sw a4, 8(a0)
 ; RV32I-NEXT:    sw a1, 4(a0)
 ; RV32I-NEXT:    sw a3, 0(a0)
 ; RV32I-NEXT:    sw a2, 12(a0)
+; RV32I-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: abd_cmp_i128:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sltu a4, a0, a2
+; RV64I-NEXT:    sltu a4, a2, a0
 ; RV64I-NEXT:    mv a5, a4
 ; RV64I-NEXT:    beq a1, a3, .LBB22_2
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    slt a5, a1, a3
+; RV64I-NEXT:    slt a5, a3, a1
 ; RV64I-NEXT:  .LBB22_2:
-; RV64I-NEXT:    beqz a5, .LBB22_4
+; RV64I-NEXT:    bnez a5, .LBB22_4
 ; RV64I-NEXT:  # %bb.3:
-; RV64I-NEXT:    sub a1, a1, a3
+; RV64I-NEXT:    sub a1, a3, a1
 ; RV64I-NEXT:    sub a1, a1, a4
-; RV64I-NEXT:    sub a0, a0, a2
+; RV64I-NEXT:    sub a0, a2, a0
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB22_4:
-; RV64I-NEXT:    sltu a4, a2, a0
-; RV64I-NEXT:    sub a1, a3, a1
+; RV64I-NEXT:    sltu a4, a0, a2
+; RV64I-NEXT:    sub a1, a1, a3
 ; RV64I-NEXT:    sub a1, a1, a4
-; RV64I-NEXT:    sub a0, a2, a0
+; RV64I-NEXT:    sub a0, a0, a2
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: abd_cmp_i128:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    lw a3, 0(a2)
-; RV32ZBB-NEXT:    lw a4, 0(a1)
-; RV32ZBB-NEXT:    lw a5, 4(a2)
-; RV32ZBB-NEXT:    lw a6, 8(a2)
-; RV32ZBB-NEXT:    lw a7, 8(a1)
-; RV32ZBB-NEXT:    lw a2, 12(a2)
+; RV32ZBB-NEXT:    lw a3, 0(a1)
+; RV32ZBB-NEXT:    lw a5, 0(a2)
+; RV32ZBB-NEXT:    lw a4, 4(a1)
+; RV32ZBB-NEXT:    lw a6, 8(a1)
+; RV32ZBB-NEXT:    lw a7, 8(a2)
 ; RV32ZBB-NEXT:    lw t0, 12(a1)
-; RV32ZBB-NEXT:    lw a1, 4(a1)
-; RV32ZBB-NEXT:    sltu t1, a7, a6
-; RV32ZBB-NEXT:    mv t4, t1
-; RV32ZBB-NEXT:    beq t0, a2, .LBB22_2
+; RV32ZBB-NEXT:    lw t1, 12(a2)
+; RV32ZBB-NEXT:    lw a1, 4(a2)
+; RV32ZBB-NEXT:    sltu a2, a7, a6
+; RV32ZBB-NEXT:    mv t4, a2
+; RV32ZBB-NEXT:    beq t0, t1, .LBB22_2
 ; RV32ZBB-NEXT:  # %bb.1:
-; RV32ZBB-NEXT:    slt t4, t0, a2
+; RV32ZBB-NEXT:    slt t4, t1, t0
 ; RV32ZBB-NEXT:  .LBB22_2:
-; RV32ZBB-NEXT:    xor t3, t0, a2
-; RV32ZBB-NEXT:    xor t5, a7, a6
-; RV32ZBB-NEXT:    sltu t2, a4, a3
-; RV32ZBB-NEXT:    or t5, t5, t3
+; RV32ZBB-NEXT:    sltu t2, a5, a3
+; RV32ZBB-NEXT:    sltu t5, a1, a4
 ; RV32ZBB-NEXT:    mv t3, t2
-; RV32ZBB-NEXT:    beq a1, a5, .LBB22_4
+; RV32ZBB-NEXT:    beq a4, a1, .LBB22_4
 ; RV32ZBB-NEXT:  # %bb.3:
-; RV32ZBB-NEXT:    sltu t3, a1, a5
+; RV32ZBB-NEXT:    mv t3, t5
 ; RV32ZBB-NEXT:  .LBB22_4:
-; RV32ZBB-NEXT:    mv t6, t3
-; RV32ZBB-NEXT:    beqz t5, .LBB22_6
+; RV32ZBB-NEXT:    addi sp, sp, -16
+; RV32ZBB-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT:    xor t6, t0, t1
+; RV32ZBB-NEXT:    xor s0, a6, a7
+; RV32ZBB-NEXT:    or t6, s0, t6
+; RV32ZBB-NEXT:    beqz t6, .LBB22_6
 ; RV32ZBB-NEXT:  # %bb.5:
-; RV32ZBB-NEXT:    mv t6, t4
+; RV32ZBB-NEXT:    mv t3, t4
 ; RV32ZBB-NEXT:  .LBB22_6:
-; RV32ZBB-NEXT:    sltu t4, a3, a4
-; RV32ZBB-NEXT:    mv t5, t4
-; RV32ZBB-NEXT:    beq a1, a5, .LBB22_8
+; RV32ZBB-NEXT:    mv t4, t2
+; RV32ZBB-NEXT:    beq a1, a4, .LBB22_8
 ; RV32ZBB-NEXT:  # %bb.7:
-; RV32ZBB-NEXT:    sltu t5, a5, a1
+; RV32ZBB-NEXT:    mv t4, t5
 ; RV32ZBB-NEXT:  .LBB22_8:
-; RV32ZBB-NEXT:    beqz t6, .LBB22_10
+; RV32ZBB-NEXT:    sltu t5, a3, a5
+; RV32ZBB-NEXT:    mv t6, t5
+; RV32ZBB-NEXT:    beq a4, a1, .LBB22_10
 ; RV32ZBB-NEXT:  # %bb.9:
-; RV32ZBB-NEXT:    sub a2, t0, a2
+; RV32ZBB-NEXT:    sltu t6, a4, a1
+; RV32ZBB-NEXT:  .LBB22_10:
+; RV32ZBB-NEXT:    bnez t3, .LBB22_12
+; RV32ZBB-NEXT:  # %bb.11:
+; RV32ZBB-NEXT:    sub t0, t1, t0
 ; RV32ZBB-NEXT:    sub a6, a7, a6
-; RV32ZBB-NEXT:    sub a2, a2, t1
-; RV32ZBB-NEXT:    sltu a7, a6, t3
-; RV32ZBB-NEXT:    sub a1, a1, a5
+; RV32ZBB-NEXT:    sub a2, t0, a2
+; RV32ZBB-NEXT:    sltu a7, a6, t4
 ; RV32ZBB-NEXT:    sub a2, a2, a7
-; RV32ZBB-NEXT:    sub a6, a6, t3
+; RV32ZBB-NEXT:    sub a3, a5, a3
+; RV32ZBB-NEXT:    sub a1, a1, a4
 ; RV32ZBB-NEXT:    sub a1, a1, t2
-; RV32ZBB-NEXT:    sub a3, a4, a3
-; RV32ZBB-NEXT:    j .LBB22_11
-; RV32ZBB-NEXT:  .LBB22_10:
-; RV32ZBB-NEXT:    sltu t1, a6, a7
-; RV32ZBB-NEXT:    sub a2, a2, t0
-; RV32ZBB-NEXT:    sub a2, a2, t1
+; RV32ZBB-NEXT:    sub a4, a6, t4
+; RV32ZBB-NEXT:    j .LBB22_13
+; RV32ZBB-NEXT:  .LBB22_12:
+; RV32ZBB-NEXT:    sltu a2, a6, a7
+; RV32ZBB-NEXT:    sub t0, t0, t1
+; RV32ZBB-NEXT:    sub a2, t0, a2
 ; RV32ZBB-NEXT:    sub a6, a6, a7
-; RV32ZBB-NEXT:    sltu a7, a6, t5
+; RV32ZBB-NEXT:    sltu a7, a6, t6
 ; RV32ZBB-NEXT:    sub a2, a2, a7
-; RV32ZBB-NEXT:    sub a6, a6, t5
-; RV32ZBB-NEXT:    sub a5, a5, a1
-; RV32ZBB-NEXT:    sub a1, a5, t4
-; RV32ZBB-NEXT:    sub a3, a3, a4
-; RV32ZBB-NEXT:  .LBB22_11:
-; RV32ZBB-NEXT:    sw a6, 8(a0)
+; RV32ZBB-NEXT:    sub a3, a3, a5
+; RV32ZBB-NEXT:    sub a4, a4, a1
+; RV32ZBB-NEXT:    sub a1, a4, t5
+; RV32ZBB-NEXT:    sub a4, a6, t6
+; RV32ZBB-NEXT:  .LBB22_13:
+; RV32ZBB-NEXT:    sw a4, 8(a0)
 ; RV32ZBB-NEXT:    sw a1, 4(a0)
 ; RV32ZBB-NEXT:    sw a3, 0(a0)
 ; RV32ZBB-NEXT:    sw a2, 12(a0)
+; RV32ZBB-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT:    addi sp, sp, 16
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: abd_cmp_i128:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    sltu a4, a0, a2
+; RV64ZBB-NEXT:    sltu a4, a2, a0
 ; RV64ZBB-NEXT:    mv a5, a4
 ; RV64ZBB-NEXT:    beq a1, a3, .LBB22_2
 ; RV64ZBB-NEXT:  # %bb.1:
-; RV64ZBB-NEXT:    slt a5, a1, a3
+; RV64ZBB-NEXT:    slt a5, a3, a1
 ; RV64ZBB-NEXT:  .LBB22_2:
-; RV64ZBB-NEXT:    beqz a5, .LBB22_4
+; RV64ZBB-NEXT:    bnez a5, .LBB22_4
 ; RV64ZBB-NEXT:  # %bb.3:
-; RV64ZBB-NEXT:    sub a1, a1, a3
+; RV64ZBB-NEXT:    sub a1, a3, a1
 ; RV64ZBB-NEXT:    sub a1, a1, a4
-; RV64ZBB-NEXT:    sub a0, a0, a2
+; RV64ZBB-NEXT:    sub a0, a2, a0
 ; RV64ZBB-NEXT:    ret
 ; RV64ZBB-NEXT:  .LBB22_4:
-; RV64ZBB-NEXT:    sltu a4, a2, a0
-; RV64ZBB-NEXT:    sub a1, a3, a1
+; RV64ZBB-NEXT:    sltu a4, a0, a2
+; RV64ZBB-NEXT:    sub a1, a1, a3
 ; RV64ZBB-NEXT:    sub a1, a1, a4
-; RV64ZBB-NEXT:    sub a0, a2, a0
+; RV64ZBB-NEXT:    sub a0, a0, a2
 ; RV64ZBB-NEXT:    ret
   %cmp = icmp sge i128 %a, %b
   %ab = sub i128 %a, %b
   %ba = sub i128 %b, %a
-  %sel = select i1 %cmp, i128 %ba, i128 %ab
+  %sel = select i1 %cmp, i128 %ab, i128 %ba
   ret i128 %sel
 }
 

diff  --git a/llvm/test/CodeGen/RISCV/abdu-neg.ll b/llvm/test/CodeGen/RISCV/abdu-neg.ll
index bcacdf44ab1030..87a06fc4403eb9 100644
--- a/llvm/test/CodeGen/RISCV/abdu-neg.ll
+++ b/llvm/test/CodeGen/RISCV/abdu-neg.ll
@@ -1597,17 +1597,6 @@ define i128 @abd_minmax_i128(i128 %a, i128 %b) nounwind {
 ;
 
 define i8 @abd_cmp_i8(i8 %a, i8 %b) nounwind {
-; CHECK-LABEL: abd_cmp_i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    andi a2, a0, 255
-; CHECK-NEXT:    andi a3, a1, 255
-; CHECK-NEXT:    bgeu a3, a2, .LBB18_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    sub a0, a1, a0
-; CHECK-NEXT:    ret
-; CHECK-NEXT:  .LBB18_2:
-; CHECK-NEXT:    sub a0, a0, a1
-; CHECK-NEXT:    ret
 ; NOZBB-LABEL: abd_cmp_i8:
 ; NOZBB:       # %bb.0:
 ; NOZBB-NEXT:    andi a2, a0, 255
@@ -1740,28 +1729,27 @@ define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-LABEL: abd_cmp_i64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sltu a4, a0, a2
-; RV32I-NEXT:    sub a3, a1, a3
-; RV32I-NEXT:    sub a3, a3, a4
-; RV32I-NEXT:    sub a2, a0, a2
-; RV32I-NEXT:    beq a3, a1, .LBB21_2
+; RV32I-NEXT:    mv a5, a4
+; RV32I-NEXT:    beq a1, a3, .LBB21_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    sltu a0, a1, a3
-; RV32I-NEXT:    j .LBB21_3
+; RV32I-NEXT:    sltu a5, a1, a3
 ; RV32I-NEXT:  .LBB21_2:
-; RV32I-NEXT:    sltu a0, a0, a2
-; RV32I-NEXT:  .LBB21_3:
-; RV32I-NEXT:    neg a1, a0
-; RV32I-NEXT:    xor a2, a2, a1
-; RV32I-NEXT:    sltu a4, a2, a1
-; RV32I-NEXT:    xor a1, a3, a1
-; RV32I-NEXT:    add a1, a1, a0
+; RV32I-NEXT:    bnez a5, .LBB21_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    sltu a4, a2, a0
+; RV32I-NEXT:    sub a1, a3, a1
 ; RV32I-NEXT:    sub a1, a1, a4
-; RV32I-NEXT:    add a0, a2, a0
+; RV32I-NEXT:    sub a0, a2, a0
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB21_4:
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a0, a0, a2
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: abd_cmp_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    bltu a1, a0, .LBB21_2
+; RV64I-NEXT:    bltu a0, a1, .LBB21_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    sub a0, a1, a0
 ; RV64I-NEXT:    ret
@@ -1772,234 +1760,218 @@ define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
 ; RV32ZBB-LABEL: abd_cmp_i64:
 ; RV32ZBB:       # %bb.0:
 ; RV32ZBB-NEXT:    sltu a4, a0, a2
-; RV32ZBB-NEXT:    sub a3, a1, a3
-; RV32ZBB-NEXT:    sub a3, a3, a4
-; RV32ZBB-NEXT:    sub a2, a0, a2
-; RV32ZBB-NEXT:    beq a3, a1, .LBB21_2
+; RV32ZBB-NEXT:    mv a5, a4
+; RV32ZBB-NEXT:    beq a1, a3, .LBB21_2
 ; RV32ZBB-NEXT:  # %bb.1:
-; RV32ZBB-NEXT:    sltu a0, a1, a3
-; RV32ZBB-NEXT:    j .LBB21_3
+; RV32ZBB-NEXT:    sltu a5, a1, a3
 ; RV32ZBB-NEXT:  .LBB21_2:
-; RV32ZBB-NEXT:    sltu a0, a0, a2
-; RV32ZBB-NEXT:  .LBB21_3:
-; RV32ZBB-NEXT:    neg a1, a0
-; RV32ZBB-NEXT:    xor a2, a2, a1
-; RV32ZBB-NEXT:    sltu a4, a2, a1
-; RV32ZBB-NEXT:    xor a1, a3, a1
-; RV32ZBB-NEXT:    add a1, a1, a0
+; RV32ZBB-NEXT:    bnez a5, .LBB21_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    sltu a4, a2, a0
+; RV32ZBB-NEXT:    sub a1, a3, a1
 ; RV32ZBB-NEXT:    sub a1, a1, a4
-; RV32ZBB-NEXT:    add a0, a2, a0
+; RV32ZBB-NEXT:    sub a0, a2, a0
+; RV32ZBB-NEXT:    ret
+; RV32ZBB-NEXT:  .LBB21_4:
+; RV32ZBB-NEXT:    sub a1, a1, a3
+; RV32ZBB-NEXT:    sub a1, a1, a4
+; RV32ZBB-NEXT:    sub a0, a0, a2
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: abd_cmp_i64:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    minu a2, a0, a1
-; RV64ZBB-NEXT:    maxu a0, a0, a1
-; RV64ZBB-NEXT:    sub a0, a0, a2
+; RV64ZBB-NEXT:    bltu a0, a1, .LBB21_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    sub a0, a1, a0
+; RV64ZBB-NEXT:    ret
+; RV64ZBB-NEXT:  .LBB21_2:
+; RV64ZBB-NEXT:    sub a0, a0, a1
 ; RV64ZBB-NEXT:    ret
   %cmp = icmp ult i64 %a, %b
   %ab = sub i64 %a, %b
   %ba = sub i64 %b, %a
-  %sel = select i1 %cmp, i64 %ba, i64 %ab
+  %sel = select i1 %cmp, i64 %ab, i64 %ba
   ret i64 %sel
 }
 
 define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
 ; RV32I-LABEL: abd_cmp_i128:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lw a5, 0(a2)
-; RV32I-NEXT:    lw a3, 0(a1)
-; RV32I-NEXT:    lw t1, 12(a2)
-; RV32I-NEXT:    lw a7, 8(a2)
-; RV32I-NEXT:    lw a4, 8(a1)
-; RV32I-NEXT:    lw a6, 12(a1)
-; RV32I-NEXT:    lw t0, 4(a2)
+; RV32I-NEXT:    lw a3, 0(a2)
+; RV32I-NEXT:    lw a4, 0(a1)
+; RV32I-NEXT:    lw a5, 4(a2)
+; RV32I-NEXT:    lw a6, 8(a2)
+; RV32I-NEXT:    lw a7, 8(a1)
+; RV32I-NEXT:    lw a2, 12(a2)
+; RV32I-NEXT:    lw t0, 12(a1)
 ; RV32I-NEXT:    lw a1, 4(a1)
-; RV32I-NEXT:    sltu a2, a4, a7
-; RV32I-NEXT:    sub t1, a6, t1
-; RV32I-NEXT:    sltu t2, a3, a5
-; RV32I-NEXT:    sub a2, t1, a2
-; RV32I-NEXT:    mv t1, t2
-; RV32I-NEXT:    beq a1, t0, .LBB22_2
+; RV32I-NEXT:    sltu t1, a7, a6
+; RV32I-NEXT:    mv t4, t1
+; RV32I-NEXT:    beq t0, a2, .LBB22_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    sltu t1, a1, t0
+; RV32I-NEXT:    sltu t4, t0, a2
 ; RV32I-NEXT:  .LBB22_2:
-; RV32I-NEXT:    sub a7, a4, a7
-; RV32I-NEXT:    sltu t3, a7, t1
-; RV32I-NEXT:    sub a2, a2, t3
-; RV32I-NEXT:    sub a7, a7, t1
-; RV32I-NEXT:    beq a2, a6, .LBB22_4
+; RV32I-NEXT:    sltu t2, a4, a3
+; RV32I-NEXT:    mv t3, t2
+; RV32I-NEXT:    beq a1, a5, .LBB22_4
 ; RV32I-NEXT:  # %bb.3:
-; RV32I-NEXT:    sltu t1, a6, a2
-; RV32I-NEXT:    j .LBB22_5
+; RV32I-NEXT:    sltu t3, a1, a5
 ; RV32I-NEXT:  .LBB22_4:
-; RV32I-NEXT:    sltu t1, a4, a7
-; RV32I-NEXT:  .LBB22_5:
-; RV32I-NEXT:    sub t0, a1, t0
-; RV32I-NEXT:    sub t0, t0, t2
-; RV32I-NEXT:    sub a5, a3, a5
-; RV32I-NEXT:    beq t0, a1, .LBB22_7
-; RV32I-NEXT:  # %bb.6:
-; RV32I-NEXT:    sltu a1, a1, t0
-; RV32I-NEXT:    j .LBB22_8
-; RV32I-NEXT:  .LBB22_7:
-; RV32I-NEXT:    sltu a1, a3, a5
+; RV32I-NEXT:    xor t5, t0, a2
+; RV32I-NEXT:    xor t6, a7, a6
+; RV32I-NEXT:    or t5, t6, t5
+; RV32I-NEXT:    mv t6, t3
+; RV32I-NEXT:    beqz t5, .LBB22_6
+; RV32I-NEXT:  # %bb.5:
+; RV32I-NEXT:    mv t6, t4
+; RV32I-NEXT:  .LBB22_6:
+; RV32I-NEXT:    sltu t4, a3, a4
+; RV32I-NEXT:    mv t5, t4
+; RV32I-NEXT:    beq a1, a5, .LBB22_8
+; RV32I-NEXT:  # %bb.7:
+; RV32I-NEXT:    sltu t5, a5, a1
 ; RV32I-NEXT:  .LBB22_8:
-; RV32I-NEXT:    xor a3, a2, a6
-; RV32I-NEXT:    xor a4, a7, a4
-; RV32I-NEXT:    or a3, a4, a3
-; RV32I-NEXT:    beqz a3, .LBB22_10
+; RV32I-NEXT:    bnez t6, .LBB22_10
 ; RV32I-NEXT:  # %bb.9:
-; RV32I-NEXT:    mv a1, t1
+; RV32I-NEXT:    sltu t1, a6, a7
+; RV32I-NEXT:    sub a2, a2, t0
+; RV32I-NEXT:    sub a2, a2, t1
+; RV32I-NEXT:    sub a6, a6, a7
+; RV32I-NEXT:    sltu a7, a6, t5
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    sub a6, a6, t5
+; RV32I-NEXT:    sub a5, a5, a1
+; RV32I-NEXT:    sub a1, a5, t4
+; RV32I-NEXT:    sub a3, a3, a4
+; RV32I-NEXT:    j .LBB22_11
 ; RV32I-NEXT:  .LBB22_10:
-; RV32I-NEXT:    neg a6, a1
-; RV32I-NEXT:    xor a3, a7, a6
-; RV32I-NEXT:    sltu a4, a3, a6
-; RV32I-NEXT:    xor a2, a2, a6
-; RV32I-NEXT:    add a2, a2, a1
-; RV32I-NEXT:    sub a4, a2, a4
-; RV32I-NEXT:    xor a2, a5, a6
-; RV32I-NEXT:    sltu a5, a2, a6
-; RV32I-NEXT:    xor a7, t0, a6
-; RV32I-NEXT:    mv t1, a5
-; RV32I-NEXT:    beqz t0, .LBB22_12
-; RV32I-NEXT:  # %bb.11:
-; RV32I-NEXT:    sltu t1, a7, a6
-; RV32I-NEXT:  .LBB22_12:
-; RV32I-NEXT:    add a3, a3, a1
-; RV32I-NEXT:    sltu a6, a3, t1
-; RV32I-NEXT:    sub a4, a4, a6
-; RV32I-NEXT:    sub a3, a3, t1
-; RV32I-NEXT:    add a7, a7, a1
-; RV32I-NEXT:    sub a5, a7, a5
-; RV32I-NEXT:    add a1, a2, a1
-; RV32I-NEXT:    sw a1, 0(a0)
-; RV32I-NEXT:    sw a5, 4(a0)
-; RV32I-NEXT:    sw a3, 8(a0)
-; RV32I-NEXT:    sw a4, 12(a0)
+; RV32I-NEXT:    sub a2, t0, a2
+; RV32I-NEXT:    sub a6, a7, a6
+; RV32I-NEXT:    sub a2, a2, t1
+; RV32I-NEXT:    sltu a7, a6, t3
+; RV32I-NEXT:    sub a1, a1, a5
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    sub a6, a6, t3
+; RV32I-NEXT:    sub a1, a1, t2
+; RV32I-NEXT:    sub a3, a4, a3
+; RV32I-NEXT:  .LBB22_11:
+; RV32I-NEXT:    sw a6, 8(a0)
+; RV32I-NEXT:    sw a1, 4(a0)
+; RV32I-NEXT:    sw a3, 0(a0)
+; RV32I-NEXT:    sw a2, 12(a0)
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: abd_cmp_i128:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sltu a4, a0, a2
-; RV64I-NEXT:    sub a3, a1, a3
-; RV64I-NEXT:    sub a3, a3, a4
-; RV64I-NEXT:    sub a2, a0, a2
-; RV64I-NEXT:    beq a3, a1, .LBB22_2
+; RV64I-NEXT:    mv a5, a4
+; RV64I-NEXT:    beq a1, a3, .LBB22_2
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    sltu a0, a1, a3
-; RV64I-NEXT:    j .LBB22_3
+; RV64I-NEXT:    sltu a5, a1, a3
 ; RV64I-NEXT:  .LBB22_2:
-; RV64I-NEXT:    sltu a0, a0, a2
-; RV64I-NEXT:  .LBB22_3:
-; RV64I-NEXT:    neg a1, a0
-; RV64I-NEXT:    xor a2, a2, a1
-; RV64I-NEXT:    sltu a4, a2, a1
-; RV64I-NEXT:    xor a1, a3, a1
-; RV64I-NEXT:    add a1, a1, a0
+; RV64I-NEXT:    bnez a5, .LBB22_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    sltu a4, a2, a0
+; RV64I-NEXT:    sub a1, a3, a1
 ; RV64I-NEXT:    sub a1, a1, a4
-; RV64I-NEXT:    add a0, a2, a0
+; RV64I-NEXT:    sub a0, a2, a0
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB22_4:
+; RV64I-NEXT:    sub a1, a1, a3
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    sub a0, a0, a2
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: abd_cmp_i128:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    lw a5, 0(a2)
-; RV32ZBB-NEXT:    lw a3, 0(a1)
-; RV32ZBB-NEXT:    lw t1, 12(a2)
-; RV32ZBB-NEXT:    lw a7, 8(a2)
-; RV32ZBB-NEXT:    lw a4, 8(a1)
-; RV32ZBB-NEXT:    lw a6, 12(a1)
-; RV32ZBB-NEXT:    lw t0, 4(a2)
+; RV32ZBB-NEXT:    lw a3, 0(a2)
+; RV32ZBB-NEXT:    lw a4, 0(a1)
+; RV32ZBB-NEXT:    lw a5, 4(a2)
+; RV32ZBB-NEXT:    lw a6, 8(a2)
+; RV32ZBB-NEXT:    lw a7, 8(a1)
+; RV32ZBB-NEXT:    lw a2, 12(a2)
+; RV32ZBB-NEXT:    lw t0, 12(a1)
 ; RV32ZBB-NEXT:    lw a1, 4(a1)
-; RV32ZBB-NEXT:    sltu a2, a4, a7
-; RV32ZBB-NEXT:    sub t1, a6, t1
-; RV32ZBB-NEXT:    sltu t2, a3, a5
-; RV32ZBB-NEXT:    sub a2, t1, a2
-; RV32ZBB-NEXT:    mv t1, t2
-; RV32ZBB-NEXT:    beq a1, t0, .LBB22_2
+; RV32ZBB-NEXT:    sltu t1, a7, a6
+; RV32ZBB-NEXT:    mv t4, t1
+; RV32ZBB-NEXT:    beq t0, a2, .LBB22_2
 ; RV32ZBB-NEXT:  # %bb.1:
-; RV32ZBB-NEXT:    sltu t1, a1, t0
+; RV32ZBB-NEXT:    sltu t4, t0, a2
 ; RV32ZBB-NEXT:  .LBB22_2:
-; RV32ZBB-NEXT:    sub a7, a4, a7
-; RV32ZBB-NEXT:    sltu t3, a7, t1
-; RV32ZBB-NEXT:    sub a2, a2, t3
-; RV32ZBB-NEXT:    sub a7, a7, t1
-; RV32ZBB-NEXT:    beq a2, a6, .LBB22_4
+; RV32ZBB-NEXT:    sltu t2, a4, a3
+; RV32ZBB-NEXT:    mv t3, t2
+; RV32ZBB-NEXT:    beq a1, a5, .LBB22_4
 ; RV32ZBB-NEXT:  # %bb.3:
-; RV32ZBB-NEXT:    sltu t1, a6, a2
-; RV32ZBB-NEXT:    j .LBB22_5
+; RV32ZBB-NEXT:    sltu t3, a1, a5
 ; RV32ZBB-NEXT:  .LBB22_4:
-; RV32ZBB-NEXT:    sltu t1, a4, a7
-; RV32ZBB-NEXT:  .LBB22_5:
-; RV32ZBB-NEXT:    sub t0, a1, t0
-; RV32ZBB-NEXT:    sub t0, t0, t2
-; RV32ZBB-NEXT:    sub a5, a3, a5
-; RV32ZBB-NEXT:    beq t0, a1, .LBB22_7
-; RV32ZBB-NEXT:  # %bb.6:
-; RV32ZBB-NEXT:    sltu a1, a1, t0
-; RV32ZBB-NEXT:    j .LBB22_8
-; RV32ZBB-NEXT:  .LBB22_7:
-; RV32ZBB-NEXT:    sltu a1, a3, a5
+; RV32ZBB-NEXT:    xor t5, t0, a2
+; RV32ZBB-NEXT:    xor t6, a7, a6
+; RV32ZBB-NEXT:    or t5, t6, t5
+; RV32ZBB-NEXT:    mv t6, t3
+; RV32ZBB-NEXT:    beqz t5, .LBB22_6
+; RV32ZBB-NEXT:  # %bb.5:
+; RV32ZBB-NEXT:    mv t6, t4
+; RV32ZBB-NEXT:  .LBB22_6:
+; RV32ZBB-NEXT:    sltu t4, a3, a4
+; RV32ZBB-NEXT:    mv t5, t4
+; RV32ZBB-NEXT:    beq a1, a5, .LBB22_8
+; RV32ZBB-NEXT:  # %bb.7:
+; RV32ZBB-NEXT:    sltu t5, a5, a1
 ; RV32ZBB-NEXT:  .LBB22_8:
-; RV32ZBB-NEXT:    xor a3, a2, a6
-; RV32ZBB-NEXT:    xor a4, a7, a4
-; RV32ZBB-NEXT:    or a3, a4, a3
-; RV32ZBB-NEXT:    beqz a3, .LBB22_10
+; RV32ZBB-NEXT:    bnez t6, .LBB22_10
 ; RV32ZBB-NEXT:  # %bb.9:
-; RV32ZBB-NEXT:    mv a1, t1
+; RV32ZBB-NEXT:    sltu t1, a6, a7
+; RV32ZBB-NEXT:    sub a2, a2, t0
+; RV32ZBB-NEXT:    sub a2, a2, t1
+; RV32ZBB-NEXT:    sub a6, a6, a7
+; RV32ZBB-NEXT:    sltu a7, a6, t5
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    sub a6, a6, t5
+; RV32ZBB-NEXT:    sub a5, a5, a1
+; RV32ZBB-NEXT:    sub a1, a5, t4
+; RV32ZBB-NEXT:    sub a3, a3, a4
+; RV32ZBB-NEXT:    j .LBB22_11
 ; RV32ZBB-NEXT:  .LBB22_10:
-; RV32ZBB-NEXT:    neg a6, a1
-; RV32ZBB-NEXT:    xor a3, a7, a6
-; RV32ZBB-NEXT:    sltu a4, a3, a6
-; RV32ZBB-NEXT:    xor a2, a2, a6
-; RV32ZBB-NEXT:    add a2, a2, a1
-; RV32ZBB-NEXT:    sub a4, a2, a4
-; RV32ZBB-NEXT:    xor a2, a5, a6
-; RV32ZBB-NEXT:    sltu a5, a2, a6
-; RV32ZBB-NEXT:    xor a7, t0, a6
-; RV32ZBB-NEXT:    mv t1, a5
-; RV32ZBB-NEXT:    beqz t0, .LBB22_12
-; RV32ZBB-NEXT:  # %bb.11:
-; RV32ZBB-NEXT:    sltu t1, a7, a6
-; RV32ZBB-NEXT:  .LBB22_12:
-; RV32ZBB-NEXT:    add a3, a3, a1
-; RV32ZBB-NEXT:    sltu a6, a3, t1
-; RV32ZBB-NEXT:    sub a4, a4, a6
-; RV32ZBB-NEXT:    sub a3, a3, t1
-; RV32ZBB-NEXT:    add a7, a7, a1
-; RV32ZBB-NEXT:    sub a5, a7, a5
-; RV32ZBB-NEXT:    add a1, a2, a1
-; RV32ZBB-NEXT:    sw a1, 0(a0)
-; RV32ZBB-NEXT:    sw a5, 4(a0)
-; RV32ZBB-NEXT:    sw a3, 8(a0)
-; RV32ZBB-NEXT:    sw a4, 12(a0)
+; RV32ZBB-NEXT:    sub a2, t0, a2
+; RV32ZBB-NEXT:    sub a6, a7, a6
+; RV32ZBB-NEXT:    sub a2, a2, t1
+; RV32ZBB-NEXT:    sltu a7, a6, t3
+; RV32ZBB-NEXT:    sub a1, a1, a5
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    sub a6, a6, t3
+; RV32ZBB-NEXT:    sub a1, a1, t2
+; RV32ZBB-NEXT:    sub a3, a4, a3
+; RV32ZBB-NEXT:  .LBB22_11:
+; RV32ZBB-NEXT:    sw a6, 8(a0)
+; RV32ZBB-NEXT:    sw a1, 4(a0)
+; RV32ZBB-NEXT:    sw a3, 0(a0)
+; RV32ZBB-NEXT:    sw a2, 12(a0)
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: abd_cmp_i128:
 ; RV64ZBB:       # %bb.0:
 ; RV64ZBB-NEXT:    sltu a4, a0, a2
-; RV64ZBB-NEXT:    sub a3, a1, a3
-; RV64ZBB-NEXT:    sub a3, a3, a4
-; RV64ZBB-NEXT:    sub a2, a0, a2
-; RV64ZBB-NEXT:    beq a3, a1, .LBB22_2
+; RV64ZBB-NEXT:    mv a5, a4
+; RV64ZBB-NEXT:    beq a1, a3, .LBB22_2
 ; RV64ZBB-NEXT:  # %bb.1:
-; RV64ZBB-NEXT:    sltu a0, a1, a3
-; RV64ZBB-NEXT:    j .LBB22_3
+; RV64ZBB-NEXT:    sltu a5, a1, a3
 ; RV64ZBB-NEXT:  .LBB22_2:
-; RV64ZBB-NEXT:    sltu a0, a0, a2
-; RV64ZBB-NEXT:  .LBB22_3:
-; RV64ZBB-NEXT:    neg a1, a0
-; RV64ZBB-NEXT:    xor a2, a2, a1
-; RV64ZBB-NEXT:    sltu a4, a2, a1
-; RV64ZBB-NEXT:    xor a1, a3, a1
-; RV64ZBB-NEXT:    add a1, a1, a0
+; RV64ZBB-NEXT:    bnez a5, .LBB22_4
+; RV64ZBB-NEXT:  # %bb.3:
+; RV64ZBB-NEXT:    sltu a4, a2, a0
+; RV64ZBB-NEXT:    sub a1, a3, a1
 ; RV64ZBB-NEXT:    sub a1, a1, a4
-; RV64ZBB-NEXT:    add a0, a2, a0
+; RV64ZBB-NEXT:    sub a0, a2, a0
+; RV64ZBB-NEXT:    ret
+; RV64ZBB-NEXT:  .LBB22_4:
+; RV64ZBB-NEXT:    sub a1, a1, a3
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a0, a0, a2
 ; RV64ZBB-NEXT:    ret
   %cmp = icmp ult i128 %a, %b
   %ab = sub i128 %a, %b
   %ba = sub i128 %b, %a
-  %sel = select i1 %cmp, i128 %ba, i128 %ab
+  %sel = select i1 %cmp, i128 %ab, i128 %ba
   ret i128 %sel
 }
 

diff  --git a/llvm/test/CodeGen/RISCV/abdu.ll b/llvm/test/CodeGen/RISCV/abdu.ll
index 39aef369a29672..14f45895754dfd 100644
--- a/llvm/test/CodeGen/RISCV/abdu.ll
+++ b/llvm/test/CodeGen/RISCV/abdu.ll
@@ -1457,249 +1457,266 @@ define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-LABEL: abd_cmp_i64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sltu a4, a0, a2
-; RV32I-NEXT:    mv a5, a4
-; RV32I-NEXT:    beq a1, a3, .LBB21_2
+; RV32I-NEXT:    sub a3, a1, a3
+; RV32I-NEXT:    sub a3, a3, a4
+; RV32I-NEXT:    sub a2, a0, a2
+; RV32I-NEXT:    beq a3, a1, .LBB21_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    sltu a5, a1, a3
+; RV32I-NEXT:    sltu a0, a1, a3
+; RV32I-NEXT:    j .LBB21_3
 ; RV32I-NEXT:  .LBB21_2:
-; RV32I-NEXT:    beqz a5, .LBB21_4
-; RV32I-NEXT:  # %bb.3:
-; RV32I-NEXT:    sub a1, a1, a3
-; RV32I-NEXT:    sub a1, a1, a4
-; RV32I-NEXT:    sub a0, a0, a2
-; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB21_4:
-; RV32I-NEXT:    sltu a4, a2, a0
-; RV32I-NEXT:    sub a1, a3, a1
+; RV32I-NEXT:    sltu a0, a0, a2
+; RV32I-NEXT:  .LBB21_3:
+; RV32I-NEXT:    neg a1, a0
+; RV32I-NEXT:    xor a2, a2, a1
+; RV32I-NEXT:    sltu a4, a2, a1
+; RV32I-NEXT:    xor a1, a3, a1
+; RV32I-NEXT:    add a1, a1, a0
 ; RV32I-NEXT:    sub a1, a1, a4
-; RV32I-NEXT:    sub a0, a2, a0
+; RV32I-NEXT:    add a0, a2, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: abd_cmp_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    bgeu a0, a1, .LBB21_2
+; RV64I-NEXT:    bltu a1, a0, .LBB21_2
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB21_2:
-; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    sub a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: abd_cmp_i64:
 ; RV32ZBB:       # %bb.0:
 ; RV32ZBB-NEXT:    sltu a4, a0, a2
-; RV32ZBB-NEXT:    mv a5, a4
-; RV32ZBB-NEXT:    beq a1, a3, .LBB21_2
+; RV32ZBB-NEXT:    sub a3, a1, a3
+; RV32ZBB-NEXT:    sub a3, a3, a4
+; RV32ZBB-NEXT:    sub a2, a0, a2
+; RV32ZBB-NEXT:    beq a3, a1, .LBB21_2
 ; RV32ZBB-NEXT:  # %bb.1:
-; RV32ZBB-NEXT:    sltu a5, a1, a3
+; RV32ZBB-NEXT:    sltu a0, a1, a3
+; RV32ZBB-NEXT:    j .LBB21_3
 ; RV32ZBB-NEXT:  .LBB21_2:
-; RV32ZBB-NEXT:    beqz a5, .LBB21_4
-; RV32ZBB-NEXT:  # %bb.3:
-; RV32ZBB-NEXT:    sub a1, a1, a3
-; RV32ZBB-NEXT:    sub a1, a1, a4
-; RV32ZBB-NEXT:    sub a0, a0, a2
-; RV32ZBB-NEXT:    ret
-; RV32ZBB-NEXT:  .LBB21_4:
-; RV32ZBB-NEXT:    sltu a4, a2, a0
-; RV32ZBB-NEXT:    sub a1, a3, a1
+; RV32ZBB-NEXT:    sltu a0, a0, a2
+; RV32ZBB-NEXT:  .LBB21_3:
+; RV32ZBB-NEXT:    neg a1, a0
+; RV32ZBB-NEXT:    xor a2, a2, a1
+; RV32ZBB-NEXT:    sltu a4, a2, a1
+; RV32ZBB-NEXT:    xor a1, a3, a1
+; RV32ZBB-NEXT:    add a1, a1, a0
 ; RV32ZBB-NEXT:    sub a1, a1, a4
-; RV32ZBB-NEXT:    sub a0, a2, a0
+; RV32ZBB-NEXT:    add a0, a2, a0
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: abd_cmp_i64:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    bgeu a0, a1, .LBB21_2
-; RV64ZBB-NEXT:  # %bb.1:
-; RV64ZBB-NEXT:    sub a0, a0, a1
-; RV64ZBB-NEXT:    ret
-; RV64ZBB-NEXT:  .LBB21_2:
-; RV64ZBB-NEXT:    sub a0, a1, a0
+; RV64ZBB-NEXT:    minu a2, a0, a1
+; RV64ZBB-NEXT:    maxu a0, a0, a1
+; RV64ZBB-NEXT:    sub a0, a0, a2
 ; RV64ZBB-NEXT:    ret
   %cmp = icmp uge i64 %a, %b
   %ab = sub i64 %a, %b
   %ba = sub i64 %b, %a
-  %sel = select i1 %cmp, i64 %ba, i64 %ab
+  %sel = select i1 %cmp, i64 %ab, i64 %ba
   ret i64 %sel
 }
 
 define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
 ; RV32I-LABEL: abd_cmp_i128:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lw a3, 0(a2)
-; RV32I-NEXT:    lw a4, 0(a1)
-; RV32I-NEXT:    lw a5, 4(a2)
-; RV32I-NEXT:    lw a6, 8(a2)
-; RV32I-NEXT:    lw a7, 8(a1)
-; RV32I-NEXT:    lw a2, 12(a2)
-; RV32I-NEXT:    lw t0, 12(a1)
+; RV32I-NEXT:    lw a5, 0(a2)
+; RV32I-NEXT:    lw a3, 0(a1)
+; RV32I-NEXT:    lw t1, 12(a2)
+; RV32I-NEXT:    lw a7, 8(a2)
+; RV32I-NEXT:    lw a4, 8(a1)
+; RV32I-NEXT:    lw a6, 12(a1)
+; RV32I-NEXT:    lw t0, 4(a2)
 ; RV32I-NEXT:    lw a1, 4(a1)
-; RV32I-NEXT:    sltu t1, a7, a6
-; RV32I-NEXT:    mv t4, t1
-; RV32I-NEXT:    beq t0, a2, .LBB22_2
+; RV32I-NEXT:    sltu a2, a4, a7
+; RV32I-NEXT:    sub t1, a6, t1
+; RV32I-NEXT:    sltu t2, a3, a5
+; RV32I-NEXT:    sub a2, t1, a2
+; RV32I-NEXT:    mv t1, t2
+; RV32I-NEXT:    beq a1, t0, .LBB22_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    sltu t4, t0, a2
+; RV32I-NEXT:    sltu t1, a1, t0
 ; RV32I-NEXT:  .LBB22_2:
-; RV32I-NEXT:    xor t3, t0, a2
-; RV32I-NEXT:    xor t5, a7, a6
-; RV32I-NEXT:    sltu t2, a4, a3
-; RV32I-NEXT:    or t5, t5, t3
-; RV32I-NEXT:    mv t3, t2
-; RV32I-NEXT:    beq a1, a5, .LBB22_4
+; RV32I-NEXT:    sub a7, a4, a7
+; RV32I-NEXT:    sltu t3, a7, t1
+; RV32I-NEXT:    sub a2, a2, t3
+; RV32I-NEXT:    sub a7, a7, t1
+; RV32I-NEXT:    beq a2, a6, .LBB22_4
 ; RV32I-NEXT:  # %bb.3:
-; RV32I-NEXT:    sltu t3, a1, a5
+; RV32I-NEXT:    sltu t1, a6, a2
+; RV32I-NEXT:    j .LBB22_5
 ; RV32I-NEXT:  .LBB22_4:
-; RV32I-NEXT:    mv t6, t3
-; RV32I-NEXT:    beqz t5, .LBB22_6
-; RV32I-NEXT:  # %bb.5:
-; RV32I-NEXT:    mv t6, t4
-; RV32I-NEXT:  .LBB22_6:
-; RV32I-NEXT:    sltu t4, a3, a4
-; RV32I-NEXT:    mv t5, t4
-; RV32I-NEXT:    beq a1, a5, .LBB22_8
-; RV32I-NEXT:  # %bb.7:
-; RV32I-NEXT:    sltu t5, a5, a1
+; RV32I-NEXT:    sltu t1, a4, a7
+; RV32I-NEXT:  .LBB22_5:
+; RV32I-NEXT:    sub t0, a1, t0
+; RV32I-NEXT:    sub t0, t0, t2
+; RV32I-NEXT:    sub a5, a3, a5
+; RV32I-NEXT:    beq t0, a1, .LBB22_7
+; RV32I-NEXT:  # %bb.6:
+; RV32I-NEXT:    sltu a1, a1, t0
+; RV32I-NEXT:    j .LBB22_8
+; RV32I-NEXT:  .LBB22_7:
+; RV32I-NEXT:    sltu a1, a3, a5
 ; RV32I-NEXT:  .LBB22_8:
-; RV32I-NEXT:    beqz t6, .LBB22_10
+; RV32I-NEXT:    xor a3, a2, a6
+; RV32I-NEXT:    xor a4, a7, a4
+; RV32I-NEXT:    or a3, a4, a3
+; RV32I-NEXT:    beqz a3, .LBB22_10
 ; RV32I-NEXT:  # %bb.9:
-; RV32I-NEXT:    sub a2, t0, a2
-; RV32I-NEXT:    sub a6, a7, a6
-; RV32I-NEXT:    sub a2, a2, t1
-; RV32I-NEXT:    sltu a7, a6, t3
-; RV32I-NEXT:    sub a1, a1, a5
-; RV32I-NEXT:    sub a2, a2, a7
-; RV32I-NEXT:    sub a6, a6, t3
-; RV32I-NEXT:    sub a1, a1, t2
-; RV32I-NEXT:    sub a3, a4, a3
-; RV32I-NEXT:    j .LBB22_11
+; RV32I-NEXT:    mv a1, t1
 ; RV32I-NEXT:  .LBB22_10:
-; RV32I-NEXT:    sltu t1, a6, a7
-; RV32I-NEXT:    sub a2, a2, t0
-; RV32I-NEXT:    sub a2, a2, t1
-; RV32I-NEXT:    sub a6, a6, a7
-; RV32I-NEXT:    sltu a7, a6, t5
-; RV32I-NEXT:    sub a2, a2, a7
-; RV32I-NEXT:    sub a6, a6, t5
-; RV32I-NEXT:    sub a5, a5, a1
-; RV32I-NEXT:    sub a1, a5, t4
-; RV32I-NEXT:    sub a3, a3, a4
-; RV32I-NEXT:  .LBB22_11:
-; RV32I-NEXT:    sw a6, 8(a0)
-; RV32I-NEXT:    sw a1, 4(a0)
-; RV32I-NEXT:    sw a3, 0(a0)
-; RV32I-NEXT:    sw a2, 12(a0)
+; RV32I-NEXT:    neg a6, a1
+; RV32I-NEXT:    xor a3, a7, a6
+; RV32I-NEXT:    sltu a4, a3, a6
+; RV32I-NEXT:    xor a2, a2, a6
+; RV32I-NEXT:    add a2, a2, a1
+; RV32I-NEXT:    sub a4, a2, a4
+; RV32I-NEXT:    xor a2, a5, a6
+; RV32I-NEXT:    sltu a5, a2, a6
+; RV32I-NEXT:    xor a7, t0, a6
+; RV32I-NEXT:    mv t1, a5
+; RV32I-NEXT:    beqz t0, .LBB22_12
+; RV32I-NEXT:  # %bb.11:
+; RV32I-NEXT:    sltu t1, a7, a6
+; RV32I-NEXT:  .LBB22_12:
+; RV32I-NEXT:    add a3, a3, a1
+; RV32I-NEXT:    sltu a6, a3, t1
+; RV32I-NEXT:    sub a4, a4, a6
+; RV32I-NEXT:    sub a3, a3, t1
+; RV32I-NEXT:    add a7, a7, a1
+; RV32I-NEXT:    sub a5, a7, a5
+; RV32I-NEXT:    add a1, a2, a1
+; RV32I-NEXT:    sw a1, 0(a0)
+; RV32I-NEXT:    sw a5, 4(a0)
+; RV32I-NEXT:    sw a3, 8(a0)
+; RV32I-NEXT:    sw a4, 12(a0)
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: abd_cmp_i128:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sltu a4, a0, a2
-; RV64I-NEXT:    mv a5, a4
-; RV64I-NEXT:    beq a1, a3, .LBB22_2
+; RV64I-NEXT:    sub a3, a1, a3
+; RV64I-NEXT:    sub a3, a3, a4
+; RV64I-NEXT:    sub a2, a0, a2
+; RV64I-NEXT:    beq a3, a1, .LBB22_2
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    sltu a5, a1, a3
+; RV64I-NEXT:    sltu a0, a1, a3
+; RV64I-NEXT:    j .LBB22_3
 ; RV64I-NEXT:  .LBB22_2:
-; RV64I-NEXT:    beqz a5, .LBB22_4
-; RV64I-NEXT:  # %bb.3:
-; RV64I-NEXT:    sub a1, a1, a3
-; RV64I-NEXT:    sub a1, a1, a4
-; RV64I-NEXT:    sub a0, a0, a2
-; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB22_4:
-; RV64I-NEXT:    sltu a4, a2, a0
-; RV64I-NEXT:    sub a1, a3, a1
+; RV64I-NEXT:    sltu a0, a0, a2
+; RV64I-NEXT:  .LBB22_3:
+; RV64I-NEXT:    neg a1, a0
+; RV64I-NEXT:    xor a2, a2, a1
+; RV64I-NEXT:    sltu a4, a2, a1
+; RV64I-NEXT:    xor a1, a3, a1
+; RV64I-NEXT:    add a1, a1, a0
 ; RV64I-NEXT:    sub a1, a1, a4
-; RV64I-NEXT:    sub a0, a2, a0
+; RV64I-NEXT:    add a0, a2, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: abd_cmp_i128:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    lw a3, 0(a2)
-; RV32ZBB-NEXT:    lw a4, 0(a1)
-; RV32ZBB-NEXT:    lw a5, 4(a2)
-; RV32ZBB-NEXT:    lw a6, 8(a2)
-; RV32ZBB-NEXT:    lw a7, 8(a1)
-; RV32ZBB-NEXT:    lw a2, 12(a2)
-; RV32ZBB-NEXT:    lw t0, 12(a1)
+; RV32ZBB-NEXT:    lw a5, 0(a2)
+; RV32ZBB-NEXT:    lw a3, 0(a1)
+; RV32ZBB-NEXT:    lw t1, 12(a2)
+; RV32ZBB-NEXT:    lw a7, 8(a2)
+; RV32ZBB-NEXT:    lw a4, 8(a1)
+; RV32ZBB-NEXT:    lw a6, 12(a1)
+; RV32ZBB-NEXT:    lw t0, 4(a2)
 ; RV32ZBB-NEXT:    lw a1, 4(a1)
-; RV32ZBB-NEXT:    sltu t1, a7, a6
-; RV32ZBB-NEXT:    mv t4, t1
-; RV32ZBB-NEXT:    beq t0, a2, .LBB22_2
+; RV32ZBB-NEXT:    sltu a2, a4, a7
+; RV32ZBB-NEXT:    sub t1, a6, t1
+; RV32ZBB-NEXT:    sltu t2, a3, a5
+; RV32ZBB-NEXT:    sub a2, t1, a2
+; RV32ZBB-NEXT:    mv t1, t2
+; RV32ZBB-NEXT:    beq a1, t0, .LBB22_2
 ; RV32ZBB-NEXT:  # %bb.1:
-; RV32ZBB-NEXT:    sltu t4, t0, a2
+; RV32ZBB-NEXT:    sltu t1, a1, t0
 ; RV32ZBB-NEXT:  .LBB22_2:
-; RV32ZBB-NEXT:    xor t3, t0, a2
-; RV32ZBB-NEXT:    xor t5, a7, a6
-; RV32ZBB-NEXT:    sltu t2, a4, a3
-; RV32ZBB-NEXT:    or t5, t5, t3
-; RV32ZBB-NEXT:    mv t3, t2
-; RV32ZBB-NEXT:    beq a1, a5, .LBB22_4
+; RV32ZBB-NEXT:    sub a7, a4, a7
+; RV32ZBB-NEXT:    sltu t3, a7, t1
+; RV32ZBB-NEXT:    sub a2, a2, t3
+; RV32ZBB-NEXT:    sub a7, a7, t1
+; RV32ZBB-NEXT:    beq a2, a6, .LBB22_4
 ; RV32ZBB-NEXT:  # %bb.3:
-; RV32ZBB-NEXT:    sltu t3, a1, a5
+; RV32ZBB-NEXT:    sltu t1, a6, a2
+; RV32ZBB-NEXT:    j .LBB22_5
 ; RV32ZBB-NEXT:  .LBB22_4:
-; RV32ZBB-NEXT:    mv t6, t3
-; RV32ZBB-NEXT:    beqz t5, .LBB22_6
-; RV32ZBB-NEXT:  # %bb.5:
-; RV32ZBB-NEXT:    mv t6, t4
-; RV32ZBB-NEXT:  .LBB22_6:
-; RV32ZBB-NEXT:    sltu t4, a3, a4
-; RV32ZBB-NEXT:    mv t5, t4
-; RV32ZBB-NEXT:    beq a1, a5, .LBB22_8
-; RV32ZBB-NEXT:  # %bb.7:
-; RV32ZBB-NEXT:    sltu t5, a5, a1
+; RV32ZBB-NEXT:    sltu t1, a4, a7
+; RV32ZBB-NEXT:  .LBB22_5:
+; RV32ZBB-NEXT:    sub t0, a1, t0
+; RV32ZBB-NEXT:    sub t0, t0, t2
+; RV32ZBB-NEXT:    sub a5, a3, a5
+; RV32ZBB-NEXT:    beq t0, a1, .LBB22_7
+; RV32ZBB-NEXT:  # %bb.6:
+; RV32ZBB-NEXT:    sltu a1, a1, t0
+; RV32ZBB-NEXT:    j .LBB22_8
+; RV32ZBB-NEXT:  .LBB22_7:
+; RV32ZBB-NEXT:    sltu a1, a3, a5
 ; RV32ZBB-NEXT:  .LBB22_8:
-; RV32ZBB-NEXT:    beqz t6, .LBB22_10
+; RV32ZBB-NEXT:    xor a3, a2, a6
+; RV32ZBB-NEXT:    xor a4, a7, a4
+; RV32ZBB-NEXT:    or a3, a4, a3
+; RV32ZBB-NEXT:    beqz a3, .LBB22_10
 ; RV32ZBB-NEXT:  # %bb.9:
-; RV32ZBB-NEXT:    sub a2, t0, a2
-; RV32ZBB-NEXT:    sub a6, a7, a6
-; RV32ZBB-NEXT:    sub a2, a2, t1
-; RV32ZBB-NEXT:    sltu a7, a6, t3
-; RV32ZBB-NEXT:    sub a1, a1, a5
-; RV32ZBB-NEXT:    sub a2, a2, a7
-; RV32ZBB-NEXT:    sub a6, a6, t3
-; RV32ZBB-NEXT:    sub a1, a1, t2
-; RV32ZBB-NEXT:    sub a3, a4, a3
-; RV32ZBB-NEXT:    j .LBB22_11
+; RV32ZBB-NEXT:    mv a1, t1
 ; RV32ZBB-NEXT:  .LBB22_10:
-; RV32ZBB-NEXT:    sltu t1, a6, a7
-; RV32ZBB-NEXT:    sub a2, a2, t0
-; RV32ZBB-NEXT:    sub a2, a2, t1
-; RV32ZBB-NEXT:    sub a6, a6, a7
-; RV32ZBB-NEXT:    sltu a7, a6, t5
-; RV32ZBB-NEXT:    sub a2, a2, a7
-; RV32ZBB-NEXT:    sub a6, a6, t5
-; RV32ZBB-NEXT:    sub a5, a5, a1
-; RV32ZBB-NEXT:    sub a1, a5, t4
-; RV32ZBB-NEXT:    sub a3, a3, a4
-; RV32ZBB-NEXT:  .LBB22_11:
-; RV32ZBB-NEXT:    sw a6, 8(a0)
-; RV32ZBB-NEXT:    sw a1, 4(a0)
-; RV32ZBB-NEXT:    sw a3, 0(a0)
-; RV32ZBB-NEXT:    sw a2, 12(a0)
+; RV32ZBB-NEXT:    neg a6, a1
+; RV32ZBB-NEXT:    xor a3, a7, a6
+; RV32ZBB-NEXT:    sltu a4, a3, a6
+; RV32ZBB-NEXT:    xor a2, a2, a6
+; RV32ZBB-NEXT:    add a2, a2, a1
+; RV32ZBB-NEXT:    sub a4, a2, a4
+; RV32ZBB-NEXT:    xor a2, a5, a6
+; RV32ZBB-NEXT:    sltu a5, a2, a6
+; RV32ZBB-NEXT:    xor a7, t0, a6
+; RV32ZBB-NEXT:    mv t1, a5
+; RV32ZBB-NEXT:    beqz t0, .LBB22_12
+; RV32ZBB-NEXT:  # %bb.11:
+; RV32ZBB-NEXT:    sltu t1, a7, a6
+; RV32ZBB-NEXT:  .LBB22_12:
+; RV32ZBB-NEXT:    add a3, a3, a1
+; RV32ZBB-NEXT:    sltu a6, a3, t1
+; RV32ZBB-NEXT:    sub a4, a4, a6
+; RV32ZBB-NEXT:    sub a3, a3, t1
+; RV32ZBB-NEXT:    add a7, a7, a1
+; RV32ZBB-NEXT:    sub a5, a7, a5
+; RV32ZBB-NEXT:    add a1, a2, a1
+; RV32ZBB-NEXT:    sw a1, 0(a0)
+; RV32ZBB-NEXT:    sw a5, 4(a0)
+; RV32ZBB-NEXT:    sw a3, 8(a0)
+; RV32ZBB-NEXT:    sw a4, 12(a0)
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: abd_cmp_i128:
 ; RV64ZBB:       # %bb.0:
 ; RV64ZBB-NEXT:    sltu a4, a0, a2
-; RV64ZBB-NEXT:    mv a5, a4
-; RV64ZBB-NEXT:    beq a1, a3, .LBB22_2
+; RV64ZBB-NEXT:    sub a3, a1, a3
+; RV64ZBB-NEXT:    sub a3, a3, a4
+; RV64ZBB-NEXT:    sub a2, a0, a2
+; RV64ZBB-NEXT:    beq a3, a1, .LBB22_2
 ; RV64ZBB-NEXT:  # %bb.1:
-; RV64ZBB-NEXT:    sltu a5, a1, a3
+; RV64ZBB-NEXT:    sltu a0, a1, a3
+; RV64ZBB-NEXT:    j .LBB22_3
 ; RV64ZBB-NEXT:  .LBB22_2:
-; RV64ZBB-NEXT:    beqz a5, .LBB22_4
-; RV64ZBB-NEXT:  # %bb.3:
-; RV64ZBB-NEXT:    sub a1, a1, a3
-; RV64ZBB-NEXT:    sub a1, a1, a4
-; RV64ZBB-NEXT:    sub a0, a0, a2
-; RV64ZBB-NEXT:    ret
-; RV64ZBB-NEXT:  .LBB22_4:
-; RV64ZBB-NEXT:    sltu a4, a2, a0
-; RV64ZBB-NEXT:    sub a1, a3, a1
+; RV64ZBB-NEXT:    sltu a0, a0, a2
+; RV64ZBB-NEXT:  .LBB22_3:
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    xor a2, a2, a1
+; RV64ZBB-NEXT:    sltu a4, a2, a1
+; RV64ZBB-NEXT:    xor a1, a3, a1
+; RV64ZBB-NEXT:    add a1, a1, a0
 ; RV64ZBB-NEXT:    sub a1, a1, a4
-; RV64ZBB-NEXT:    sub a0, a2, a0
+; RV64ZBB-NEXT:    add a0, a2, a0
 ; RV64ZBB-NEXT:    ret
   %cmp = icmp uge i128 %a, %b
   %ab = sub i128 %a, %b
   %ba = sub i128 %b, %a
-  %sel = select i1 %cmp, i128 %ba, i128 %ab
+  %sel = select i1 %cmp, i128 %ab, i128 %ba
   ret i128 %sel
 }
 

diff  --git a/llvm/test/CodeGen/X86/abds-neg.ll b/llvm/test/CodeGen/X86/abds-neg.ll
index 246cd8e0e852d5..833273dc982438 100644
--- a/llvm/test/CodeGen/X86/abds-neg.ll
+++ b/llvm/test/CodeGen/X86/abds-neg.ll
@@ -842,8 +842,8 @@ define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
 ; X86-NEXT:    sbbl %edx, %ebx
 ; X86-NEXT:    subl %ecx, %eax
 ; X86-NEXT:    sbbl %esi, %edx
-; X86-NEXT:    cmovll %edi, %eax
-; X86-NEXT:    cmovll %ebx, %edx
+; X86-NEXT:    cmovgel %edi, %eax
+; X86-NEXT:    cmovgel %ebx, %edx
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    popl %edi
 ; X86-NEXT:    popl %ebx
@@ -853,13 +853,14 @@ define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
 ; X64:       # %bb.0:
 ; X64-NEXT:    movq %rdi, %rax
 ; X64-NEXT:    subq %rsi, %rax
-; X64-NEXT:    subq %rdi, %rsi
-; X64-NEXT:    cmovgeq %rsi, %rax
+; X64-NEXT:    negq %rax
+; X64-NEXT:    subq %rsi, %rdi
+; X64-NEXT:    cmovlq %rdi, %rax
 ; X64-NEXT:    retq
   %cmp = icmp slt i64 %a, %b
   %ab = sub i64 %a, %b
   %ba = sub i64 %b, %a
-  %sel = select i1 %cmp, i64 %ba, i64 %ab
+  %sel = select i1 %cmp, i64 %ab, i64 %ba
   ret i64 %sel
 }
 
@@ -888,10 +889,10 @@ define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
 ; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %esi
 ; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %edi
-; X86-NEXT:    cmovll (%esp), %edx # 4-byte Folded Reload
-; X86-NEXT:    cmovll %ebx, %esi
-; X86-NEXT:    cmovll %ebp, %ecx
-; X86-NEXT:    cmovll %eax, %edi
+; X86-NEXT:    cmovgel (%esp), %edx # 4-byte Folded Reload
+; X86-NEXT:    cmovgel %ebx, %esi
+; X86-NEXT:    cmovgel %ebp, %ecx
+; X86-NEXT:    cmovgel %eax, %edi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl %edi, 12(%eax)
 ; X86-NEXT:    movl %ecx, 8(%eax)
@@ -906,20 +907,20 @@ define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
 ;
 ; X64-LABEL: abd_cmp_i128:
 ; X64:       # %bb.0:
-; X64-NEXT:    movq %rdi, %rax
-; X64-NEXT:    subq %rdx, %rax
-; X64-NEXT:    movq %rsi, %r8
-; X64-NEXT:    sbbq %rcx, %r8
-; X64-NEXT:    subq %rdi, %rdx
-; X64-NEXT:    sbbq %rsi, %rcx
-; X64-NEXT:    cmovgeq %rdx, %rax
-; X64-NEXT:    cmovgeq %rcx, %r8
+; X64-NEXT:    movq %rdx, %rax
+; X64-NEXT:    subq %rdi, %rax
+; X64-NEXT:    movq %rcx, %r8
+; X64-NEXT:    sbbq %rsi, %r8
+; X64-NEXT:    subq %rdx, %rdi
+; X64-NEXT:    sbbq %rcx, %rsi
+; X64-NEXT:    cmovlq %rdi, %rax
+; X64-NEXT:    cmovlq %rsi, %r8
 ; X64-NEXT:    movq %r8, %rdx
 ; X64-NEXT:    retq
   %cmp = icmp slt i128 %a, %b
   %ab = sub i128 %a, %b
   %ba = sub i128 %b, %a
-  %sel = select i1 %cmp, i128 %ba, i128 %ab
+  %sel = select i1 %cmp, i128 %ab, i128 %ba
   ret i128 %sel
 }
 

diff  --git a/llvm/test/CodeGen/X86/abds.ll b/llvm/test/CodeGen/X86/abds.ll
index 9f3b99b349aeda..d9ba140032b31d 100644
--- a/llvm/test/CodeGen/X86/abds.ll
+++ b/llvm/test/CodeGen/X86/abds.ll
@@ -744,8 +744,8 @@ define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
 ; X86-NEXT:    sbbl %edx, %ebx
 ; X86-NEXT:    subl %ecx, %eax
 ; X86-NEXT:    sbbl %esi, %edx
-; X86-NEXT:    cmovgel %edi, %eax
-; X86-NEXT:    cmovgel %ebx, %edx
+; X86-NEXT:    cmovll %edi, %eax
+; X86-NEXT:    cmovll %ebx, %edx
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    popl %edi
 ; X86-NEXT:    popl %ebx
@@ -755,14 +755,13 @@ define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
 ; X64:       # %bb.0:
 ; X64-NEXT:    movq %rdi, %rax
 ; X64-NEXT:    subq %rsi, %rax
-; X64-NEXT:    negq %rax
-; X64-NEXT:    subq %rsi, %rdi
-; X64-NEXT:    cmovlq %rdi, %rax
+; X64-NEXT:    subq %rdi, %rsi
+; X64-NEXT:    cmovgeq %rsi, %rax
 ; X64-NEXT:    retq
   %cmp = icmp sge i64 %a, %b
   %ab = sub i64 %a, %b
   %ba = sub i64 %b, %a
-  %sel = select i1 %cmp, i64 %ba, i64 %ab
+  %sel = select i1 %cmp, i64 %ab, i64 %ba
   ret i64 %sel
 }
 
@@ -791,10 +790,10 @@ define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
 ; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %esi
 ; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %edi
-; X86-NEXT:    cmovgel (%esp), %edx # 4-byte Folded Reload
-; X86-NEXT:    cmovgel %ebx, %esi
-; X86-NEXT:    cmovgel %ebp, %ecx
-; X86-NEXT:    cmovgel %eax, %edi
+; X86-NEXT:    cmovll (%esp), %edx # 4-byte Folded Reload
+; X86-NEXT:    cmovll %ebx, %esi
+; X86-NEXT:    cmovll %ebp, %ecx
+; X86-NEXT:    cmovll %eax, %edi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl %edi, 12(%eax)
 ; X86-NEXT:    movl %ecx, 8(%eax)
@@ -809,20 +808,20 @@ define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
 ;
 ; X64-LABEL: abd_cmp_i128:
 ; X64:       # %bb.0:
-; X64-NEXT:    movq %rdx, %rax
-; X64-NEXT:    subq %rdi, %rax
-; X64-NEXT:    movq %rcx, %r8
-; X64-NEXT:    sbbq %rsi, %r8
-; X64-NEXT:    subq %rdx, %rdi
-; X64-NEXT:    sbbq %rcx, %rsi
-; X64-NEXT:    cmovlq %rdi, %rax
-; X64-NEXT:    cmovlq %rsi, %r8
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    subq %rdx, %rax
+; X64-NEXT:    movq %rsi, %r8
+; X64-NEXT:    sbbq %rcx, %r8
+; X64-NEXT:    subq %rdi, %rdx
+; X64-NEXT:    sbbq %rsi, %rcx
+; X64-NEXT:    cmovgeq %rdx, %rax
+; X64-NEXT:    cmovgeq %rcx, %r8
 ; X64-NEXT:    movq %r8, %rdx
 ; X64-NEXT:    retq
   %cmp = icmp sge i128 %a, %b
   %ab = sub i128 %a, %b
   %ba = sub i128 %b, %a
-  %sel = select i1 %cmp, i128 %ba, i128 %ab
+  %sel = select i1 %cmp, i128 %ab, i128 %ba
   ret i128 %sel
 }
 

diff  --git a/llvm/test/CodeGen/X86/abdu-neg.ll b/llvm/test/CodeGen/X86/abdu-neg.ll
index 9cb3e5e8bf0c26..507f7681400ef2 100644
--- a/llvm/test/CodeGen/X86/abdu-neg.ll
+++ b/llvm/test/CodeGen/X86/abdu-neg.ll
@@ -824,8 +824,8 @@ define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
 ; X86-NEXT:    sbbl %edx, %ebx
 ; X86-NEXT:    subl %ecx, %eax
 ; X86-NEXT:    sbbl %esi, %edx
-; X86-NEXT:    cmovbl %edi, %eax
-; X86-NEXT:    cmovbl %ebx, %edx
+; X86-NEXT:    cmovael %edi, %eax
+; X86-NEXT:    cmovael %ebx, %edx
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    popl %edi
 ; X86-NEXT:    popl %ebx
@@ -837,12 +837,12 @@ define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
 ; X64-NEXT:    subq %rsi, %rax
 ; X64-NEXT:    negq %rax
 ; X64-NEXT:    subq %rsi, %rdi
-; X64-NEXT:    cmovaeq %rdi, %rax
+; X64-NEXT:    cmovbq %rdi, %rax
 ; X64-NEXT:    retq
   %cmp = icmp ult i64 %a, %b
   %ab = sub i64 %a, %b
   %ba = sub i64 %b, %a
-  %sel = select i1 %cmp, i64 %ba, i64 %ab
+  %sel = select i1 %cmp, i64 %ab, i64 %ba
   ret i64 %sel
 }
 
@@ -871,10 +871,10 @@ define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
 ; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %esi
 ; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %edi
-; X86-NEXT:    cmovbl (%esp), %edx # 4-byte Folded Reload
-; X86-NEXT:    cmovbl %ebx, %esi
-; X86-NEXT:    cmovbl %ebp, %ecx
-; X86-NEXT:    cmovbl %eax, %edi
+; X86-NEXT:    cmovael (%esp), %edx # 4-byte Folded Reload
+; X86-NEXT:    cmovael %ebx, %esi
+; X86-NEXT:    cmovael %ebp, %ecx
+; X86-NEXT:    cmovael %eax, %edi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl %edi, 12(%eax)
 ; X86-NEXT:    movl %ecx, 8(%eax)
@@ -895,14 +895,14 @@ define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
 ; X64-NEXT:    sbbq %rsi, %r8
 ; X64-NEXT:    subq %rdx, %rdi
 ; X64-NEXT:    sbbq %rcx, %rsi
-; X64-NEXT:    cmovaeq %rdi, %rax
-; X64-NEXT:    cmovaeq %rsi, %r8
+; X64-NEXT:    cmovbq %rdi, %rax
+; X64-NEXT:    cmovbq %rsi, %r8
 ; X64-NEXT:    movq %r8, %rdx
 ; X64-NEXT:    retq
   %cmp = icmp ult i128 %a, %b
   %ab = sub i128 %a, %b
   %ba = sub i128 %b, %a
-  %sel = select i1 %cmp, i128 %ba, i128 %ab
+  %sel = select i1 %cmp, i128 %ab, i128 %ba
   ret i128 %sel
 }
 

diff  --git a/llvm/test/CodeGen/X86/abdu.ll b/llvm/test/CodeGen/X86/abdu.ll
index c8fa19cb661b6b..290894d2712e84 100644
--- a/llvm/test/CodeGen/X86/abdu.ll
+++ b/llvm/test/CodeGen/X86/abdu.ll
@@ -695,98 +695,83 @@ define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
 define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
 ; X86-LABEL: abd_cmp_i64:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl %ebx
-; X86-NEXT:    pushl %edi
-; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    movl %ecx, %edi
-; X86-NEXT:    subl %eax, %edi
-; X86-NEXT:    movl %esi, %ebx
-; X86-NEXT:    sbbl %edx, %ebx
+; X86-NEXT:    xorl %ecx, %ecx
+; X86-NEXT:    subl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    sbbl %ecx, %ecx
+; X86-NEXT:    xorl %ecx, %edx
+; X86-NEXT:    xorl %ecx, %eax
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    sbbl %esi, %edx
-; X86-NEXT:    cmovael %edi, %eax
-; X86-NEXT:    cmovael %ebx, %edx
-; X86-NEXT:    popl %esi
-; X86-NEXT:    popl %edi
-; X86-NEXT:    popl %ebx
+; X86-NEXT:    sbbl %ecx, %edx
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: abd_cmp_i64:
 ; X64:       # %bb.0:
 ; X64-NEXT:    movq %rdi, %rax
 ; X64-NEXT:    subq %rsi, %rax
-; X64-NEXT:    negq %rax
-; X64-NEXT:    subq %rsi, %rdi
-; X64-NEXT:    cmovbq %rdi, %rax
+; X64-NEXT:    subq %rdi, %rsi
+; X64-NEXT:    cmovaeq %rsi, %rax
 ; X64-NEXT:    retq
   %cmp = icmp uge i64 %a, %b
   %ab = sub i64 %a, %b
   %ba = sub i64 %b, %a
-  %sel = select i1 %cmp, i64 %ba, i64 %ab
+  %sel = select i1 %cmp, i64 %ab, i64 %ba
   ret i64 %sel
 }
 
 define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
 ; X86-LABEL: abd_cmp_i128:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    pushl %ebx
 ; X86-NEXT:    pushl %edi
 ; X86-NEXT:    pushl %esi
-; X86-NEXT:    pushl %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
-; X86-NEXT:    subl %edx, %eax
-; X86-NEXT:    movl %eax, (%esp) # 4-byte Spill
-; X86-NEXT:    sbbl %esi, %ebx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
-; X86-NEXT:    sbbl %ecx, %ebp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    sbbl %edi, %eax
-; X86-NEXT:    subl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    subl {{[0-9]+}}(%esp), %edi
 ; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %edi
-; X86-NEXT:    cmovael (%esp), %edx # 4-byte Folded Reload
-; X86-NEXT:    cmovael %ebx, %esi
-; X86-NEXT:    cmovael %ebp, %ecx
-; X86-NEXT:    cmovael %eax, %edi
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl %edi, 12(%eax)
-; X86-NEXT:    movl %ecx, 8(%eax)
+; X86-NEXT:    sbbl %ebx, %ebx
+; X86-NEXT:    xorl %ebx, %ecx
+; X86-NEXT:    xorl %ebx, %edx
+; X86-NEXT:    xorl %ebx, %esi
+; X86-NEXT:    xorl %ebx, %edi
+; X86-NEXT:    subl %ebx, %edi
+; X86-NEXT:    sbbl %ebx, %esi
+; X86-NEXT:    sbbl %ebx, %edx
+; X86-NEXT:    sbbl %ebx, %ecx
+; X86-NEXT:    movl %edi, (%eax)
 ; X86-NEXT:    movl %esi, 4(%eax)
-; X86-NEXT:    movl %edx, (%eax)
-; X86-NEXT:    addl $4, %esp
+; X86-NEXT:    movl %edx, 8(%eax)
+; X86-NEXT:    movl %ecx, 12(%eax)
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    popl %edi
 ; X86-NEXT:    popl %ebx
-; X86-NEXT:    popl %ebp
 ; X86-NEXT:    retl $4
 ;
 ; X64-LABEL: abd_cmp_i128:
 ; X64:       # %bb.0:
-; X64-NEXT:    movq %rdx, %rax
-; X64-NEXT:    subq %rdi, %rax
-; X64-NEXT:    movq %rcx, %r8
-; X64-NEXT:    sbbq %rsi, %r8
-; X64-NEXT:    subq %rdx, %rdi
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    xorl %edi, %edi
+; X64-NEXT:    subq %rdx, %rax
 ; X64-NEXT:    sbbq %rcx, %rsi
-; X64-NEXT:    cmovbq %rdi, %rax
-; X64-NEXT:    cmovbq %rsi, %r8
-; X64-NEXT:    movq %r8, %rdx
+; X64-NEXT:    sbbq %rdi, %rdi
+; X64-NEXT:    xorq %rdi, %rsi
+; X64-NEXT:    xorq %rdi, %rax
+; X64-NEXT:    subq %rdi, %rax
+; X64-NEXT:    sbbq %rdi, %rsi
+; X64-NEXT:    movq %rsi, %rdx
 ; X64-NEXT:    retq
   %cmp = icmp uge i128 %a, %b
   %ab = sub i128 %a, %b
   %ba = sub i128 %b, %a
-  %sel = select i1 %cmp, i128 %ba, i128 %ab
+  %sel = select i1 %cmp, i128 %ab, i128 %ba
   ret i128 %sel
 }
 


        


More information about the llvm-commits mailing list