[llvm] 43da8a7 - [DAG] Add test coverage for ABD "sub of selects" patterns based off #53045
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Wed Sep 11 07:18:51 PDT 2024
Author: Simon Pilgrim
Date: 2024-09-11T15:18:29+01:00
New Revision: 43da8a7a10237e8cb89e6d776bec81d97b5326d1
URL: https://github.com/llvm/llvm-project/commit/43da8a7a10237e8cb89e6d776bec81d97b5326d1
DIFF: https://github.com/llvm/llvm-project/commit/43da8a7a10237e8cb89e6d776bec81d97b5326d1.diff
LOG: [DAG] Add test coverage for ABD "sub of selects" patterns based off #53045
Add tests for "sub(select(icmp(a,b),a,b),select(icmp(a,b),b,a)) -> abd(a,b)" patterns that still fail to match to abd nodes
This will hopefully be helped by #108218
Added:
Modified:
llvm/test/CodeGen/AArch64/abds.ll
llvm/test/CodeGen/AArch64/abdu.ll
llvm/test/CodeGen/RISCV/abds.ll
llvm/test/CodeGen/RISCV/abdu.ll
llvm/test/CodeGen/X86/abds.ll
llvm/test/CodeGen/X86/abdu.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/AArch64/abds.ll b/llvm/test/CodeGen/AArch64/abds.ll
index 0e35f8240848b1..e5cc04f9be1a1f 100644
--- a/llvm/test/CodeGen/AArch64/abds.ll
+++ b/llvm/test/CodeGen/AArch64/abds.ll
@@ -539,6 +539,90 @@ define i64 @vector_legalized(i16 %a, i16 %b) {
ret i64 %z
}
+;
+; sub(select(icmp(a,b),a,b),select(icmp(a,b),b,a)) -> abds(a,b)
+;
+
+define i8 @abd_select_i8(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: abd_select_i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sxtb w8, w0
+; CHECK-NEXT: cmp w8, w1, sxtb
+; CHECK-NEXT: csel w8, w0, w1, lt
+; CHECK-NEXT: csel w9, w1, w0, lt
+; CHECK-NEXT: sub w0, w9, w8
+; CHECK-NEXT: ret
+ %cmp = icmp slt i8 %a, %b
+ %ab = select i1 %cmp, i8 %a, i8 %b
+ %ba = select i1 %cmp, i8 %b, i8 %a
+ %sub = sub i8 %ba, %ab
+ ret i8 %sub
+}
+
+define i16 @abd_select_i16(i16 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_select_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sxth w8, w0
+; CHECK-NEXT: cmp w8, w1, sxth
+; CHECK-NEXT: csel w8, w0, w1, le
+; CHECK-NEXT: csel w9, w1, w0, le
+; CHECK-NEXT: sub w0, w9, w8
+; CHECK-NEXT: ret
+ %cmp = icmp sle i16 %a, %b
+ %ab = select i1 %cmp, i16 %a, i16 %b
+ %ba = select i1 %cmp, i16 %b, i16 %a
+ %sub = sub i16 %ba, %ab
+ ret i16 %sub
+}
+
+define i32 @abd_select_i32(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_select_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmp w0, w1
+; CHECK-NEXT: csel w8, w0, w1, gt
+; CHECK-NEXT: csel w9, w1, w0, gt
+; CHECK-NEXT: sub w0, w8, w9
+; CHECK-NEXT: ret
+ %cmp = icmp sgt i32 %a, %b
+ %ab = select i1 %cmp, i32 %a, i32 %b
+ %ba = select i1 %cmp, i32 %b, i32 %a
+ %sub = sub i32 %ab, %ba
+ ret i32 %sub
+}
+
+define i64 @abd_select_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: abd_select_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmp x0, x1
+; CHECK-NEXT: csel x8, x0, x1, ge
+; CHECK-NEXT: csel x9, x1, x0, ge
+; CHECK-NEXT: sub x0, x8, x9
+; CHECK-NEXT: ret
+ %cmp = icmp sge i64 %a, %b
+ %ab = select i1 %cmp, i64 %a, i64 %b
+ %ba = select i1 %cmp, i64 %b, i64 %a
+ %sub = sub i64 %ab, %ba
+ ret i64 %sub
+}
+
+define i128 @abd_select_i128(i128 %a, i128 %b) nounwind {
+; CHECK-LABEL: abd_select_i128:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmp x0, x2
+; CHECK-NEXT: sbcs xzr, x1, x3
+; CHECK-NEXT: csel x8, x0, x2, lt
+; CHECK-NEXT: csel x9, x2, x0, lt
+; CHECK-NEXT: csel x10, x1, x3, lt
+; CHECK-NEXT: csel x11, x3, x1, lt
+; CHECK-NEXT: subs x0, x9, x8
+; CHECK-NEXT: sbc x1, x11, x10
+; CHECK-NEXT: ret
+ %cmp = icmp slt i128 %a, %b
+ %ab = select i1 %cmp, i128 %a, i128 %b
+ %ba = select i1 %cmp, i128 %b, i128 %a
+ %sub = sub i128 %ba, %ab
+ ret i128 %sub
+}
declare i8 @llvm.abs.i8(i8, i1)
declare i16 @llvm.abs.i16(i16, i1)
diff --git a/llvm/test/CodeGen/AArch64/abdu.ll b/llvm/test/CodeGen/AArch64/abdu.ll
index eb866e6a78a9b0..0a44ae16884582 100644
--- a/llvm/test/CodeGen/AArch64/abdu.ll
+++ b/llvm/test/CodeGen/AArch64/abdu.ll
@@ -400,6 +400,91 @@ define i64 @vector_legalized(i16 %a, i16 %b) {
ret i64 %z
}
+;
+; sub(select(icmp(a,b),a,b),select(icmp(a,b),b,a)) -> abdu(a,b)
+;
+
+define i8 @abd_select_i8(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: abd_select_i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and w8, w0, #0xff
+; CHECK-NEXT: cmp w8, w1, uxtb
+; CHECK-NEXT: csel w8, w0, w1, lo
+; CHECK-NEXT: csel w9, w1, w0, lo
+; CHECK-NEXT: sub w0, w9, w8
+; CHECK-NEXT: ret
+ %cmp = icmp ult i8 %a, %b
+ %ab = select i1 %cmp, i8 %a, i8 %b
+ %ba = select i1 %cmp, i8 %b, i8 %a
+ %sub = sub i8 %ba, %ab
+ ret i8 %sub
+}
+
+define i16 @abd_select_i16(i16 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_select_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and w8, w0, #0xffff
+; CHECK-NEXT: cmp w8, w1, uxth
+; CHECK-NEXT: csel w8, w0, w1, ls
+; CHECK-NEXT: csel w9, w1, w0, ls
+; CHECK-NEXT: sub w0, w9, w8
+; CHECK-NEXT: ret
+ %cmp = icmp ule i16 %a, %b
+ %ab = select i1 %cmp, i16 %a, i16 %b
+ %ba = select i1 %cmp, i16 %b, i16 %a
+ %sub = sub i16 %ba, %ab
+ ret i16 %sub
+}
+
+define i32 @abd_select_i32(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_select_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmp w0, w1
+; CHECK-NEXT: csel w8, w0, w1, hi
+; CHECK-NEXT: csel w9, w1, w0, hi
+; CHECK-NEXT: sub w0, w8, w9
+; CHECK-NEXT: ret
+ %cmp = icmp ugt i32 %a, %b
+ %ab = select i1 %cmp, i32 %a, i32 %b
+ %ba = select i1 %cmp, i32 %b, i32 %a
+ %sub = sub i32 %ab, %ba
+ ret i32 %sub
+}
+
+define i64 @abd_select_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: abd_select_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmp x0, x1
+; CHECK-NEXT: csel x8, x0, x1, hs
+; CHECK-NEXT: csel x9, x1, x0, hs
+; CHECK-NEXT: sub x0, x8, x9
+; CHECK-NEXT: ret
+ %cmp = icmp uge i64 %a, %b
+ %ab = select i1 %cmp, i64 %a, i64 %b
+ %ba = select i1 %cmp, i64 %b, i64 %a
+ %sub = sub i64 %ab, %ba
+ ret i64 %sub
+}
+
+define i128 @abd_select_i128(i128 %a, i128 %b) nounwind {
+; CHECK-LABEL: abd_select_i128:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmp x0, x2
+; CHECK-NEXT: sbcs xzr, x1, x3
+; CHECK-NEXT: csel x8, x0, x2, lo
+; CHECK-NEXT: csel x9, x2, x0, lo
+; CHECK-NEXT: csel x10, x1, x3, lo
+; CHECK-NEXT: csel x11, x3, x1, lo
+; CHECK-NEXT: subs x0, x9, x8
+; CHECK-NEXT: sbc x1, x11, x10
+; CHECK-NEXT: ret
+ %cmp = icmp ult i128 %a, %b
+ %ab = select i1 %cmp, i128 %a, i128 %b
+ %ba = select i1 %cmp, i128 %b, i128 %a
+ %sub = sub i128 %ba, %ab
+ ret i128 %sub
+}
+
declare i8 @llvm.abs.i8(i8, i1)
declare i16 @llvm.abs.i16(i16, i1)
declare i32 @llvm.abs.i32(i32, i1)
diff --git a/llvm/test/CodeGen/RISCV/abds.ll b/llvm/test/CodeGen/RISCV/abds.ll
index 86b36d8f69e95f..919214b0e9a8dd 100644
--- a/llvm/test/CodeGen/RISCV/abds.ll
+++ b/llvm/test/CodeGen/RISCV/abds.ll
@@ -2341,6 +2341,410 @@ define i32 @abd_sub_i32(i32 %a, i32 %b) nounwind {
ret i32 %abs
}
+;
+; sub(select(icmp(a,b),a,b),select(icmp(a,b),b,a)) -> abds(a,b)
+;
+
+define i8 @abd_select_i8(i8 %a, i8 %b) nounwind {
+; RV32I-LABEL: abd_select_i8:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a2, a1, 24
+; RV32I-NEXT: srai a2, a2, 24
+; RV32I-NEXT: slli a3, a0, 24
+; RV32I-NEXT: srai a3, a3, 24
+; RV32I-NEXT: blt a3, a2, .LBB34_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: sub a0, a0, a1
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB34_2:
+; RV32I-NEXT: sub a0, a1, a0
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: abd_select_i8:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a2, a1, 56
+; RV64I-NEXT: srai a2, a2, 56
+; RV64I-NEXT: slli a3, a0, 56
+; RV64I-NEXT: srai a3, a3, 56
+; RV64I-NEXT: blt a3, a2, .LBB34_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: sub a0, a0, a1
+; RV64I-NEXT: ret
+; RV64I-NEXT: .LBB34_2:
+; RV64I-NEXT: sub a0, a1, a0
+; RV64I-NEXT: ret
+;
+; ZBB-LABEL: abd_select_i8:
+; ZBB: # %bb.0:
+; ZBB-NEXT: sext.b a1, a1
+; ZBB-NEXT: sext.b a0, a0
+; ZBB-NEXT: min a2, a0, a1
+; ZBB-NEXT: max a0, a0, a1
+; ZBB-NEXT: sub a0, a0, a2
+; ZBB-NEXT: ret
+ %cmp = icmp slt i8 %a, %b
+ %ab = select i1 %cmp, i8 %a, i8 %b
+ %ba = select i1 %cmp, i8 %b, i8 %a
+ %sub = sub i8 %ba, %ab
+ ret i8 %sub
+}
+
+define i16 @abd_select_i16(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_select_i16:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a2, a0, 16
+; RV32I-NEXT: srai a2, a2, 16
+; RV32I-NEXT: slli a3, a1, 16
+; RV32I-NEXT: srai a3, a3, 16
+; RV32I-NEXT: bge a3, a2, .LBB35_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: sub a0, a0, a1
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB35_2:
+; RV32I-NEXT: sub a0, a1, a0
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: abd_select_i16:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a2, a0, 48
+; RV64I-NEXT: srai a2, a2, 48
+; RV64I-NEXT: slli a3, a1, 48
+; RV64I-NEXT: srai a3, a3, 48
+; RV64I-NEXT: bge a3, a2, .LBB35_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: sub a0, a0, a1
+; RV64I-NEXT: ret
+; RV64I-NEXT: .LBB35_2:
+; RV64I-NEXT: sub a0, a1, a0
+; RV64I-NEXT: ret
+;
+; ZBB-LABEL: abd_select_i16:
+; ZBB: # %bb.0:
+; ZBB-NEXT: sext.h a1, a1
+; ZBB-NEXT: sext.h a0, a0
+; ZBB-NEXT: min a2, a0, a1
+; ZBB-NEXT: max a0, a0, a1
+; ZBB-NEXT: sub a0, a0, a2
+; ZBB-NEXT: ret
+ %cmp = icmp sle i16 %a, %b
+ %ab = select i1 %cmp, i16 %a, i16 %b
+ %ba = select i1 %cmp, i16 %b, i16 %a
+ %sub = sub i16 %ba, %ab
+ ret i16 %sub
+}
+
+define i32 @abd_select_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_select_i32:
+; RV32I: # %bb.0:
+; RV32I-NEXT: blt a1, a0, .LBB36_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: sub a0, a1, a0
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB36_2:
+; RV32I-NEXT: sub a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: abd_select_i32:
+; RV64I: # %bb.0:
+; RV64I-NEXT: sext.w a2, a0
+; RV64I-NEXT: sext.w a3, a1
+; RV64I-NEXT: blt a3, a2, .LBB36_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: subw a0, a1, a0
+; RV64I-NEXT: ret
+; RV64I-NEXT: .LBB36_2:
+; RV64I-NEXT: subw a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV32ZBB-LABEL: abd_select_i32:
+; RV32ZBB: # %bb.0:
+; RV32ZBB-NEXT: min a2, a0, a1
+; RV32ZBB-NEXT: max a0, a0, a1
+; RV32ZBB-NEXT: sub a0, a0, a2
+; RV32ZBB-NEXT: ret
+;
+; RV64ZBB-LABEL: abd_select_i32:
+; RV64ZBB: # %bb.0:
+; RV64ZBB-NEXT: sext.w a1, a1
+; RV64ZBB-NEXT: sext.w a0, a0
+; RV64ZBB-NEXT: min a2, a0, a1
+; RV64ZBB-NEXT: max a0, a0, a1
+; RV64ZBB-NEXT: sub a0, a0, a2
+; RV64ZBB-NEXT: ret
+ %cmp = icmp sgt i32 %a, %b
+ %ab = select i1 %cmp, i32 %a, i32 %b
+ %ba = select i1 %cmp, i32 %b, i32 %a
+ %sub = sub i32 %ab, %ba
+ ret i32 %sub
+}
+
+define i64 @abd_select_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: abd_select_i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: beq a1, a3, .LBB37_3
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: slt a4, a1, a3
+; RV32I-NEXT: bnez a4, .LBB37_4
+; RV32I-NEXT: .LBB37_2:
+; RV32I-NEXT: mv a4, a1
+; RV32I-NEXT: mv a5, a0
+; RV32I-NEXT: mv a1, a3
+; RV32I-NEXT: mv a0, a2
+; RV32I-NEXT: j .LBB37_5
+; RV32I-NEXT: .LBB37_3:
+; RV32I-NEXT: sltu a4, a0, a2
+; RV32I-NEXT: beqz a4, .LBB37_2
+; RV32I-NEXT: .LBB37_4:
+; RV32I-NEXT: mv a4, a3
+; RV32I-NEXT: mv a5, a2
+; RV32I-NEXT: .LBB37_5:
+; RV32I-NEXT: sltu a2, a5, a0
+; RV32I-NEXT: sub a1, a4, a1
+; RV32I-NEXT: sub a1, a1, a2
+; RV32I-NEXT: sub a0, a5, a0
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: abd_select_i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: bge a0, a1, .LBB37_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: sub a0, a1, a0
+; RV64I-NEXT: ret
+; RV64I-NEXT: .LBB37_2:
+; RV64I-NEXT: sub a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV32ZBB-LABEL: abd_select_i64:
+; RV32ZBB: # %bb.0:
+; RV32ZBB-NEXT: sltu a4, a2, a0
+; RV32ZBB-NEXT: mv a5, a4
+; RV32ZBB-NEXT: beq a1, a3, .LBB37_2
+; RV32ZBB-NEXT: # %bb.1:
+; RV32ZBB-NEXT: slt a5, a3, a1
+; RV32ZBB-NEXT: .LBB37_2:
+; RV32ZBB-NEXT: bnez a5, .LBB37_4
+; RV32ZBB-NEXT: # %bb.3:
+; RV32ZBB-NEXT: sub a1, a3, a1
+; RV32ZBB-NEXT: sub a1, a1, a4
+; RV32ZBB-NEXT: sub a0, a2, a0
+; RV32ZBB-NEXT: ret
+; RV32ZBB-NEXT: .LBB37_4:
+; RV32ZBB-NEXT: sltu a4, a0, a2
+; RV32ZBB-NEXT: sub a1, a1, a3
+; RV32ZBB-NEXT: sub a1, a1, a4
+; RV32ZBB-NEXT: sub a0, a0, a2
+; RV32ZBB-NEXT: ret
+;
+; RV64ZBB-LABEL: abd_select_i64:
+; RV64ZBB: # %bb.0:
+; RV64ZBB-NEXT: min a2, a0, a1
+; RV64ZBB-NEXT: max a0, a0, a1
+; RV64ZBB-NEXT: sub a0, a0, a2
+; RV64ZBB-NEXT: ret
+ %cmp = icmp sge i64 %a, %b
+ %ab = select i1 %cmp, i64 %a, i64 %b
+ %ba = select i1 %cmp, i64 %b, i64 %a
+ %sub = sub i64 %ab, %ba
+ ret i64 %sub
+}
+
+define i128 @abd_select_i128(i128 %a, i128 %b) nounwind {
+; RV32I-LABEL: abd_select_i128:
+; RV32I: # %bb.0:
+; RV32I-NEXT: lw a7, 4(a2)
+; RV32I-NEXT: lw a3, 4(a1)
+; RV32I-NEXT: lw a6, 8(a2)
+; RV32I-NEXT: lw t0, 12(a2)
+; RV32I-NEXT: lw a5, 12(a1)
+; RV32I-NEXT: lw a4, 8(a1)
+; RV32I-NEXT: beq a5, t0, .LBB38_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: slt t1, a5, t0
+; RV32I-NEXT: j .LBB38_3
+; RV32I-NEXT: .LBB38_2:
+; RV32I-NEXT: sltu t1, a4, a6
+; RV32I-NEXT: .LBB38_3:
+; RV32I-NEXT: lw t3, 0(a2)
+; RV32I-NEXT: lw a1, 0(a1)
+; RV32I-NEXT: beq a3, a7, .LBB38_5
+; RV32I-NEXT: # %bb.4:
+; RV32I-NEXT: sltu a2, a3, a7
+; RV32I-NEXT: j .LBB38_6
+; RV32I-NEXT: .LBB38_5:
+; RV32I-NEXT: sltu a2, a1, t3
+; RV32I-NEXT: .LBB38_6:
+; RV32I-NEXT: xor t2, a5, t0
+; RV32I-NEXT: xor t4, a4, a6
+; RV32I-NEXT: or t2, t4, t2
+; RV32I-NEXT: beqz t2, .LBB38_8
+; RV32I-NEXT: # %bb.7:
+; RV32I-NEXT: mv a2, t1
+; RV32I-NEXT: .LBB38_8:
+; RV32I-NEXT: bnez a2, .LBB38_10
+; RV32I-NEXT: # %bb.9:
+; RV32I-NEXT: mv a2, t3
+; RV32I-NEXT: mv t1, a7
+; RV32I-NEXT: mv t4, t0
+; RV32I-NEXT: mv t2, a6
+; RV32I-NEXT: j .LBB38_11
+; RV32I-NEXT: .LBB38_10:
+; RV32I-NEXT: mv a2, a1
+; RV32I-NEXT: mv t1, a3
+; RV32I-NEXT: mv t4, a5
+; RV32I-NEXT: mv t2, a4
+; RV32I-NEXT: mv a1, t3
+; RV32I-NEXT: mv a3, a7
+; RV32I-NEXT: mv a5, t0
+; RV32I-NEXT: mv a4, a6
+; RV32I-NEXT: .LBB38_11:
+; RV32I-NEXT: sltu a6, a4, t2
+; RV32I-NEXT: sub a7, a5, t4
+; RV32I-NEXT: sltu a5, a1, a2
+; RV32I-NEXT: sub a6, a7, a6
+; RV32I-NEXT: mv a7, a5
+; RV32I-NEXT: beq a3, t1, .LBB38_13
+; RV32I-NEXT: # %bb.12:
+; RV32I-NEXT: sltu a7, a3, t1
+; RV32I-NEXT: .LBB38_13:
+; RV32I-NEXT: sub a4, a4, t2
+; RV32I-NEXT: sltu t0, a4, a7
+; RV32I-NEXT: sub a6, a6, t0
+; RV32I-NEXT: sub a4, a4, a7
+; RV32I-NEXT: sub a3, a3, t1
+; RV32I-NEXT: sub a3, a3, a5
+; RV32I-NEXT: sub a1, a1, a2
+; RV32I-NEXT: sw a1, 0(a0)
+; RV32I-NEXT: sw a3, 4(a0)
+; RV32I-NEXT: sw a4, 8(a0)
+; RV32I-NEXT: sw a6, 12(a0)
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: abd_select_i128:
+; RV64I: # %bb.0:
+; RV64I-NEXT: beq a1, a3, .LBB38_3
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: slt a4, a1, a3
+; RV64I-NEXT: beqz a4, .LBB38_4
+; RV64I-NEXT: .LBB38_2:
+; RV64I-NEXT: mv a4, a1
+; RV64I-NEXT: mv a5, a0
+; RV64I-NEXT: mv a1, a3
+; RV64I-NEXT: mv a0, a2
+; RV64I-NEXT: j .LBB38_5
+; RV64I-NEXT: .LBB38_3:
+; RV64I-NEXT: sltu a4, a0, a2
+; RV64I-NEXT: bnez a4, .LBB38_2
+; RV64I-NEXT: .LBB38_4:
+; RV64I-NEXT: mv a4, a3
+; RV64I-NEXT: mv a5, a2
+; RV64I-NEXT: .LBB38_5:
+; RV64I-NEXT: sltu a2, a0, a5
+; RV64I-NEXT: sub a1, a1, a4
+; RV64I-NEXT: sub a1, a1, a2
+; RV64I-NEXT: sub a0, a0, a5
+; RV64I-NEXT: ret
+;
+; RV32ZBB-LABEL: abd_select_i128:
+; RV32ZBB: # %bb.0:
+; RV32ZBB-NEXT: lw a3, 0(a1)
+; RV32ZBB-NEXT: lw a5, 0(a2)
+; RV32ZBB-NEXT: lw a4, 4(a1)
+; RV32ZBB-NEXT: lw a6, 8(a1)
+; RV32ZBB-NEXT: lw a7, 8(a2)
+; RV32ZBB-NEXT: lw t0, 12(a1)
+; RV32ZBB-NEXT: lw t1, 12(a2)
+; RV32ZBB-NEXT: lw a1, 4(a2)
+; RV32ZBB-NEXT: sltu a2, a7, a6
+; RV32ZBB-NEXT: mv t4, a2
+; RV32ZBB-NEXT: beq t0, t1, .LBB38_2
+; RV32ZBB-NEXT: # %bb.1:
+; RV32ZBB-NEXT: slt t4, t1, t0
+; RV32ZBB-NEXT: .LBB38_2:
+; RV32ZBB-NEXT: sltu t2, a5, a3
+; RV32ZBB-NEXT: sltu t5, a1, a4
+; RV32ZBB-NEXT: mv t3, t2
+; RV32ZBB-NEXT: beq a4, a1, .LBB38_4
+; RV32ZBB-NEXT: # %bb.3:
+; RV32ZBB-NEXT: mv t3, t5
+; RV32ZBB-NEXT: .LBB38_4:
+; RV32ZBB-NEXT: addi sp, sp, -16
+; RV32ZBB-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT: xor t6, t0, t1
+; RV32ZBB-NEXT: xor s0, a6, a7
+; RV32ZBB-NEXT: or t6, s0, t6
+; RV32ZBB-NEXT: beqz t6, .LBB38_6
+; RV32ZBB-NEXT: # %bb.5:
+; RV32ZBB-NEXT: mv t3, t4
+; RV32ZBB-NEXT: .LBB38_6:
+; RV32ZBB-NEXT: mv t4, t2
+; RV32ZBB-NEXT: beq a1, a4, .LBB38_8
+; RV32ZBB-NEXT: # %bb.7:
+; RV32ZBB-NEXT: mv t4, t5
+; RV32ZBB-NEXT: .LBB38_8:
+; RV32ZBB-NEXT: sltu t5, a3, a5
+; RV32ZBB-NEXT: mv t6, t5
+; RV32ZBB-NEXT: beq a4, a1, .LBB38_10
+; RV32ZBB-NEXT: # %bb.9:
+; RV32ZBB-NEXT: sltu t6, a4, a1
+; RV32ZBB-NEXT: .LBB38_10:
+; RV32ZBB-NEXT: bnez t3, .LBB38_12
+; RV32ZBB-NEXT: # %bb.11:
+; RV32ZBB-NEXT: sub t0, t1, t0
+; RV32ZBB-NEXT: sub a6, a7, a6
+; RV32ZBB-NEXT: sub a2, t0, a2
+; RV32ZBB-NEXT: sltu a7, a6, t4
+; RV32ZBB-NEXT: sub a2, a2, a7
+; RV32ZBB-NEXT: sub a3, a5, a3
+; RV32ZBB-NEXT: sub a1, a1, a4
+; RV32ZBB-NEXT: sub a1, a1, t2
+; RV32ZBB-NEXT: sub a4, a6, t4
+; RV32ZBB-NEXT: j .LBB38_13
+; RV32ZBB-NEXT: .LBB38_12:
+; RV32ZBB-NEXT: sltu a2, a6, a7
+; RV32ZBB-NEXT: sub t0, t0, t1
+; RV32ZBB-NEXT: sub a2, t0, a2
+; RV32ZBB-NEXT: sub a6, a6, a7
+; RV32ZBB-NEXT: sltu a7, a6, t6
+; RV32ZBB-NEXT: sub a2, a2, a7
+; RV32ZBB-NEXT: sub a3, a3, a5
+; RV32ZBB-NEXT: sub a4, a4, a1
+; RV32ZBB-NEXT: sub a1, a4, t5
+; RV32ZBB-NEXT: sub a4, a6, t6
+; RV32ZBB-NEXT: .LBB38_13:
+; RV32ZBB-NEXT: sw a4, 8(a0)
+; RV32ZBB-NEXT: sw a1, 4(a0)
+; RV32ZBB-NEXT: sw a3, 0(a0)
+; RV32ZBB-NEXT: sw a2, 12(a0)
+; RV32ZBB-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT: addi sp, sp, 16
+; RV32ZBB-NEXT: ret
+;
+; RV64ZBB-LABEL: abd_select_i128:
+; RV64ZBB: # %bb.0:
+; RV64ZBB-NEXT: sltu a4, a2, a0
+; RV64ZBB-NEXT: mv a5, a4
+; RV64ZBB-NEXT: beq a1, a3, .LBB38_2
+; RV64ZBB-NEXT: # %bb.1:
+; RV64ZBB-NEXT: slt a5, a3, a1
+; RV64ZBB-NEXT: .LBB38_2:
+; RV64ZBB-NEXT: bnez a5, .LBB38_4
+; RV64ZBB-NEXT: # %bb.3:
+; RV64ZBB-NEXT: sub a1, a3, a1
+; RV64ZBB-NEXT: sub a1, a1, a4
+; RV64ZBB-NEXT: sub a0, a2, a0
+; RV64ZBB-NEXT: ret
+; RV64ZBB-NEXT: .LBB38_4:
+; RV64ZBB-NEXT: sltu a4, a0, a2
+; RV64ZBB-NEXT: sub a1, a1, a3
+; RV64ZBB-NEXT: sub a1, a1, a4
+; RV64ZBB-NEXT: sub a0, a0, a2
+; RV64ZBB-NEXT: ret
+ %cmp = icmp slt i128 %a, %b
+ %ab = select i1 %cmp, i128 %a, i128 %b
+ %ba = select i1 %cmp, i128 %b, i128 %a
+ %sub = sub i128 %ba, %ab
+ ret i128 %sub
+}
declare i8 @llvm.abs.i8(i8, i1)
declare i16 @llvm.abs.i16(i16, i1)
diff --git a/llvm/test/CodeGen/RISCV/abdu.ll b/llvm/test/CodeGen/RISCV/abdu.ll
index 14f45895754dfd..a9f933243f679a 100644
--- a/llvm/test/CodeGen/RISCV/abdu.ll
+++ b/llvm/test/CodeGen/RISCV/abdu.ll
@@ -1720,6 +1720,398 @@ define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
ret i128 %sel
}
+;
+; sub(select(icmp(a,b),a,b),select(icmp(a,b),b,a)) -> abdu(a,b)
+;
+
+define i8 @abd_select_i8(i8 %a, i8 %b) nounwind {
+; NOZBB-LABEL: abd_select_i8:
+; NOZBB: # %bb.0:
+; NOZBB-NEXT: andi a2, a1, 255
+; NOZBB-NEXT: andi a3, a0, 255
+; NOZBB-NEXT: bltu a3, a2, .LBB23_2
+; NOZBB-NEXT: # %bb.1:
+; NOZBB-NEXT: sub a0, a0, a1
+; NOZBB-NEXT: ret
+; NOZBB-NEXT: .LBB23_2:
+; NOZBB-NEXT: sub a0, a1, a0
+; NOZBB-NEXT: ret
+;
+; ZBB-LABEL: abd_select_i8:
+; ZBB: # %bb.0:
+; ZBB-NEXT: andi a1, a1, 255
+; ZBB-NEXT: andi a0, a0, 255
+; ZBB-NEXT: minu a2, a0, a1
+; ZBB-NEXT: maxu a0, a0, a1
+; ZBB-NEXT: sub a0, a0, a2
+; ZBB-NEXT: ret
+ %cmp = icmp ult i8 %a, %b
+ %ab = select i1 %cmp, i8 %a, i8 %b
+ %ba = select i1 %cmp, i8 %b, i8 %a
+ %sub = sub i8 %ba, %ab
+ ret i8 %sub
+}
+
+define i16 @abd_select_i16(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_select_i16:
+; RV32I: # %bb.0:
+; RV32I-NEXT: lui a2, 16
+; RV32I-NEXT: addi a2, a2, -1
+; RV32I-NEXT: and a3, a0, a2
+; RV32I-NEXT: and a2, a1, a2
+; RV32I-NEXT: bgeu a2, a3, .LBB24_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: sub a0, a0, a1
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB24_2:
+; RV32I-NEXT: sub a0, a1, a0
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: abd_select_i16:
+; RV64I: # %bb.0:
+; RV64I-NEXT: lui a2, 16
+; RV64I-NEXT: addiw a2, a2, -1
+; RV64I-NEXT: and a3, a0, a2
+; RV64I-NEXT: and a2, a1, a2
+; RV64I-NEXT: bgeu a2, a3, .LBB24_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: sub a0, a0, a1
+; RV64I-NEXT: ret
+; RV64I-NEXT: .LBB24_2:
+; RV64I-NEXT: sub a0, a1, a0
+; RV64I-NEXT: ret
+;
+; ZBB-LABEL: abd_select_i16:
+; ZBB: # %bb.0:
+; ZBB-NEXT: zext.h a1, a1
+; ZBB-NEXT: zext.h a0, a0
+; ZBB-NEXT: minu a2, a0, a1
+; ZBB-NEXT: maxu a0, a0, a1
+; ZBB-NEXT: sub a0, a0, a2
+; ZBB-NEXT: ret
+ %cmp = icmp ule i16 %a, %b
+ %ab = select i1 %cmp, i16 %a, i16 %b
+ %ba = select i1 %cmp, i16 %b, i16 %a
+ %sub = sub i16 %ba, %ab
+ ret i16 %sub
+}
+
+define i32 @abd_select_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_select_i32:
+; RV32I: # %bb.0:
+; RV32I-NEXT: bltu a1, a0, .LBB25_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: sub a0, a1, a0
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB25_2:
+; RV32I-NEXT: sub a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: abd_select_i32:
+; RV64I: # %bb.0:
+; RV64I-NEXT: sext.w a2, a0
+; RV64I-NEXT: sext.w a3, a1
+; RV64I-NEXT: bltu a3, a2, .LBB25_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: subw a0, a1, a0
+; RV64I-NEXT: ret
+; RV64I-NEXT: .LBB25_2:
+; RV64I-NEXT: subw a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV32ZBB-LABEL: abd_select_i32:
+; RV32ZBB: # %bb.0:
+; RV32ZBB-NEXT: minu a2, a0, a1
+; RV32ZBB-NEXT: maxu a0, a0, a1
+; RV32ZBB-NEXT: sub a0, a0, a2
+; RV32ZBB-NEXT: ret
+;
+; RV64ZBB-LABEL: abd_select_i32:
+; RV64ZBB: # %bb.0:
+; RV64ZBB-NEXT: slli a1, a1, 32
+; RV64ZBB-NEXT: srli a1, a1, 32
+; RV64ZBB-NEXT: slli a0, a0, 32
+; RV64ZBB-NEXT: srli a0, a0, 32
+; RV64ZBB-NEXT: minu a2, a0, a1
+; RV64ZBB-NEXT: maxu a0, a0, a1
+; RV64ZBB-NEXT: sub a0, a0, a2
+; RV64ZBB-NEXT: ret
+ %cmp = icmp ugt i32 %a, %b
+ %ab = select i1 %cmp, i32 %a, i32 %b
+ %ba = select i1 %cmp, i32 %b, i32 %a
+ %sub = sub i32 %ab, %ba
+ ret i32 %sub
+}
+
+define i64 @abd_select_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: abd_select_i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: beq a1, a3, .LBB26_3
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: sltu a4, a1, a3
+; RV32I-NEXT: bnez a4, .LBB26_4
+; RV32I-NEXT: .LBB26_2:
+; RV32I-NEXT: mv a4, a1
+; RV32I-NEXT: mv a5, a0
+; RV32I-NEXT: mv a1, a3
+; RV32I-NEXT: mv a0, a2
+; RV32I-NEXT: j .LBB26_5
+; RV32I-NEXT: .LBB26_3:
+; RV32I-NEXT: sltu a4, a0, a2
+; RV32I-NEXT: beqz a4, .LBB26_2
+; RV32I-NEXT: .LBB26_4:
+; RV32I-NEXT: mv a4, a3
+; RV32I-NEXT: mv a5, a2
+; RV32I-NEXT: .LBB26_5:
+; RV32I-NEXT: sltu a2, a5, a0
+; RV32I-NEXT: sub a1, a4, a1
+; RV32I-NEXT: sub a1, a1, a2
+; RV32I-NEXT: sub a0, a5, a0
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: abd_select_i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: bgeu a0, a1, .LBB26_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: sub a0, a1, a0
+; RV64I-NEXT: ret
+; RV64I-NEXT: .LBB26_2:
+; RV64I-NEXT: sub a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV32ZBB-LABEL: abd_select_i64:
+; RV32ZBB: # %bb.0:
+; RV32ZBB-NEXT: sltu a4, a0, a2
+; RV32ZBB-NEXT: sub a3, a1, a3
+; RV32ZBB-NEXT: sub a3, a3, a4
+; RV32ZBB-NEXT: sub a2, a0, a2
+; RV32ZBB-NEXT: beq a3, a1, .LBB26_2
+; RV32ZBB-NEXT: # %bb.1:
+; RV32ZBB-NEXT: sltu a0, a1, a3
+; RV32ZBB-NEXT: j .LBB26_3
+; RV32ZBB-NEXT: .LBB26_2:
+; RV32ZBB-NEXT: sltu a0, a0, a2
+; RV32ZBB-NEXT: .LBB26_3:
+; RV32ZBB-NEXT: neg a1, a0
+; RV32ZBB-NEXT: xor a2, a2, a1
+; RV32ZBB-NEXT: sltu a4, a2, a1
+; RV32ZBB-NEXT: xor a1, a3, a1
+; RV32ZBB-NEXT: add a1, a1, a0
+; RV32ZBB-NEXT: sub a1, a1, a4
+; RV32ZBB-NEXT: add a0, a2, a0
+; RV32ZBB-NEXT: ret
+;
+; RV64ZBB-LABEL: abd_select_i64:
+; RV64ZBB: # %bb.0:
+; RV64ZBB-NEXT: minu a2, a0, a1
+; RV64ZBB-NEXT: maxu a0, a0, a1
+; RV64ZBB-NEXT: sub a0, a0, a2
+; RV64ZBB-NEXT: ret
+ %cmp = icmp uge i64 %a, %b
+ %ab = select i1 %cmp, i64 %a, i64 %b
+ %ba = select i1 %cmp, i64 %b, i64 %a
+ %sub = sub i64 %ab, %ba
+ ret i64 %sub
+}
+
+define i128 @abd_select_i128(i128 %a, i128 %b) nounwind {
+; RV32I-LABEL: abd_select_i128:
+; RV32I: # %bb.0:
+; RV32I-NEXT: lw a7, 4(a2)
+; RV32I-NEXT: lw a3, 4(a1)
+; RV32I-NEXT: lw a6, 8(a2)
+; RV32I-NEXT: lw t0, 12(a2)
+; RV32I-NEXT: lw a5, 12(a1)
+; RV32I-NEXT: lw a4, 8(a1)
+; RV32I-NEXT: beq a5, t0, .LBB27_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: sltu t1, a5, t0
+; RV32I-NEXT: j .LBB27_3
+; RV32I-NEXT: .LBB27_2:
+; RV32I-NEXT: sltu t1, a4, a6
+; RV32I-NEXT: .LBB27_3:
+; RV32I-NEXT: lw t3, 0(a2)
+; RV32I-NEXT: lw a1, 0(a1)
+; RV32I-NEXT: beq a3, a7, .LBB27_5
+; RV32I-NEXT: # %bb.4:
+; RV32I-NEXT: sltu a2, a3, a7
+; RV32I-NEXT: j .LBB27_6
+; RV32I-NEXT: .LBB27_5:
+; RV32I-NEXT: sltu a2, a1, t3
+; RV32I-NEXT: .LBB27_6:
+; RV32I-NEXT: xor t2, a5, t0
+; RV32I-NEXT: xor t4, a4, a6
+; RV32I-NEXT: or t2, t4, t2
+; RV32I-NEXT: beqz t2, .LBB27_8
+; RV32I-NEXT: # %bb.7:
+; RV32I-NEXT: mv a2, t1
+; RV32I-NEXT: .LBB27_8:
+; RV32I-NEXT: bnez a2, .LBB27_10
+; RV32I-NEXT: # %bb.9:
+; RV32I-NEXT: mv a2, t3
+; RV32I-NEXT: mv t1, a7
+; RV32I-NEXT: mv t4, t0
+; RV32I-NEXT: mv t2, a6
+; RV32I-NEXT: j .LBB27_11
+; RV32I-NEXT: .LBB27_10:
+; RV32I-NEXT: mv a2, a1
+; RV32I-NEXT: mv t1, a3
+; RV32I-NEXT: mv t4, a5
+; RV32I-NEXT: mv t2, a4
+; RV32I-NEXT: mv a1, t3
+; RV32I-NEXT: mv a3, a7
+; RV32I-NEXT: mv a5, t0
+; RV32I-NEXT: mv a4, a6
+; RV32I-NEXT: .LBB27_11:
+; RV32I-NEXT: sltu a6, a4, t2
+; RV32I-NEXT: sub a7, a5, t4
+; RV32I-NEXT: sltu a5, a1, a2
+; RV32I-NEXT: sub a6, a7, a6
+; RV32I-NEXT: mv a7, a5
+; RV32I-NEXT: beq a3, t1, .LBB27_13
+; RV32I-NEXT: # %bb.12:
+; RV32I-NEXT: sltu a7, a3, t1
+; RV32I-NEXT: .LBB27_13:
+; RV32I-NEXT: sub a4, a4, t2
+; RV32I-NEXT: sltu t0, a4, a7
+; RV32I-NEXT: sub a6, a6, t0
+; RV32I-NEXT: sub a4, a4, a7
+; RV32I-NEXT: sub a3, a3, t1
+; RV32I-NEXT: sub a3, a3, a5
+; RV32I-NEXT: sub a1, a1, a2
+; RV32I-NEXT: sw a1, 0(a0)
+; RV32I-NEXT: sw a3, 4(a0)
+; RV32I-NEXT: sw a4, 8(a0)
+; RV32I-NEXT: sw a6, 12(a0)
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: abd_select_i128:
+; RV64I: # %bb.0:
+; RV64I-NEXT: beq a1, a3, .LBB27_3
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: sltu a4, a1, a3
+; RV64I-NEXT: beqz a4, .LBB27_4
+; RV64I-NEXT: .LBB27_2:
+; RV64I-NEXT: mv a4, a1
+; RV64I-NEXT: mv a5, a0
+; RV64I-NEXT: mv a1, a3
+; RV64I-NEXT: mv a0, a2
+; RV64I-NEXT: j .LBB27_5
+; RV64I-NEXT: .LBB27_3:
+; RV64I-NEXT: sltu a4, a0, a2
+; RV64I-NEXT: bnez a4, .LBB27_2
+; RV64I-NEXT: .LBB27_4:
+; RV64I-NEXT: mv a4, a3
+; RV64I-NEXT: mv a5, a2
+; RV64I-NEXT: .LBB27_5:
+; RV64I-NEXT: sltu a2, a0, a5
+; RV64I-NEXT: sub a1, a1, a4
+; RV64I-NEXT: sub a1, a1, a2
+; RV64I-NEXT: sub a0, a0, a5
+; RV64I-NEXT: ret
+;
+; RV32ZBB-LABEL: abd_select_i128:
+; RV32ZBB: # %bb.0:
+; RV32ZBB-NEXT: lw a5, 0(a2)
+; RV32ZBB-NEXT: lw a3, 0(a1)
+; RV32ZBB-NEXT: lw t1, 12(a2)
+; RV32ZBB-NEXT: lw a7, 8(a2)
+; RV32ZBB-NEXT: lw a4, 8(a1)
+; RV32ZBB-NEXT: lw a6, 12(a1)
+; RV32ZBB-NEXT: lw t0, 4(a2)
+; RV32ZBB-NEXT: lw a1, 4(a1)
+; RV32ZBB-NEXT: sltu a2, a4, a7
+; RV32ZBB-NEXT: sub t1, a6, t1
+; RV32ZBB-NEXT: sltu t2, a3, a5
+; RV32ZBB-NEXT: sub a2, t1, a2
+; RV32ZBB-NEXT: mv t1, t2
+; RV32ZBB-NEXT: beq a1, t0, .LBB27_2
+; RV32ZBB-NEXT: # %bb.1:
+; RV32ZBB-NEXT: sltu t1, a1, t0
+; RV32ZBB-NEXT: .LBB27_2:
+; RV32ZBB-NEXT: sub a7, a4, a7
+; RV32ZBB-NEXT: sltu t3, a7, t1
+; RV32ZBB-NEXT: sub a2, a2, t3
+; RV32ZBB-NEXT: sub a7, a7, t1
+; RV32ZBB-NEXT: beq a2, a6, .LBB27_4
+; RV32ZBB-NEXT: # %bb.3:
+; RV32ZBB-NEXT: sltu t1, a6, a2
+; RV32ZBB-NEXT: j .LBB27_5
+; RV32ZBB-NEXT: .LBB27_4:
+; RV32ZBB-NEXT: sltu t1, a4, a7
+; RV32ZBB-NEXT: .LBB27_5:
+; RV32ZBB-NEXT: sub t0, a1, t0
+; RV32ZBB-NEXT: sub t0, t0, t2
+; RV32ZBB-NEXT: sub a5, a3, a5
+; RV32ZBB-NEXT: beq t0, a1, .LBB27_7
+; RV32ZBB-NEXT: # %bb.6:
+; RV32ZBB-NEXT: sltu a1, a1, t0
+; RV32ZBB-NEXT: j .LBB27_8
+; RV32ZBB-NEXT: .LBB27_7:
+; RV32ZBB-NEXT: sltu a1, a3, a5
+; RV32ZBB-NEXT: .LBB27_8:
+; RV32ZBB-NEXT: xor a3, a2, a6
+; RV32ZBB-NEXT: xor a4, a7, a4
+; RV32ZBB-NEXT: or a3, a4, a3
+; RV32ZBB-NEXT: beqz a3, .LBB27_10
+; RV32ZBB-NEXT: # %bb.9:
+; RV32ZBB-NEXT: mv a1, t1
+; RV32ZBB-NEXT: .LBB27_10:
+; RV32ZBB-NEXT: neg a6, a1
+; RV32ZBB-NEXT: xor a3, a7, a6
+; RV32ZBB-NEXT: sltu a4, a3, a6
+; RV32ZBB-NEXT: xor a2, a2, a6
+; RV32ZBB-NEXT: add a2, a2, a1
+; RV32ZBB-NEXT: sub a4, a2, a4
+; RV32ZBB-NEXT: xor a2, a5, a6
+; RV32ZBB-NEXT: sltu a5, a2, a6
+; RV32ZBB-NEXT: xor a7, t0, a6
+; RV32ZBB-NEXT: mv t1, a5
+; RV32ZBB-NEXT: beqz t0, .LBB27_12
+; RV32ZBB-NEXT: # %bb.11:
+; RV32ZBB-NEXT: sltu t1, a7, a6
+; RV32ZBB-NEXT: .LBB27_12:
+; RV32ZBB-NEXT: add a3, a3, a1
+; RV32ZBB-NEXT: sltu a6, a3, t1
+; RV32ZBB-NEXT: sub a4, a4, a6
+; RV32ZBB-NEXT: sub a3, a3, t1
+; RV32ZBB-NEXT: add a7, a7, a1
+; RV32ZBB-NEXT: sub a5, a7, a5
+; RV32ZBB-NEXT: add a1, a2, a1
+; RV32ZBB-NEXT: sw a1, 0(a0)
+; RV32ZBB-NEXT: sw a5, 4(a0)
+; RV32ZBB-NEXT: sw a3, 8(a0)
+; RV32ZBB-NEXT: sw a4, 12(a0)
+; RV32ZBB-NEXT: ret
+;
+; RV64ZBB-LABEL: abd_select_i128:
+; RV64ZBB: # %bb.0:
+; RV64ZBB-NEXT: sltu a4, a0, a2
+; RV64ZBB-NEXT: sub a3, a1, a3
+; RV64ZBB-NEXT: sub a3, a3, a4
+; RV64ZBB-NEXT: sub a2, a0, a2
+; RV64ZBB-NEXT: beq a3, a1, .LBB27_2
+; RV64ZBB-NEXT: # %bb.1:
+; RV64ZBB-NEXT: sltu a0, a1, a3
+; RV64ZBB-NEXT: j .LBB27_3
+; RV64ZBB-NEXT: .LBB27_2:
+; RV64ZBB-NEXT: sltu a0, a0, a2
+; RV64ZBB-NEXT: .LBB27_3:
+; RV64ZBB-NEXT: neg a1, a0
+; RV64ZBB-NEXT: xor a2, a2, a1
+; RV64ZBB-NEXT: sltu a4, a2, a1
+; RV64ZBB-NEXT: xor a1, a3, a1
+; RV64ZBB-NEXT: add a1, a1, a0
+; RV64ZBB-NEXT: sub a1, a1, a4
+; RV64ZBB-NEXT: add a0, a2, a0
+; RV64ZBB-NEXT: ret
+ %cmp = icmp ult i128 %a, %b
+ %ab = select i1 %cmp, i128 %a, i128 %b
+ %ba = select i1 %cmp, i128 %b, i128 %a
+ %sub = sub i128 %ba, %ab
+ ret i128 %sub
+}
+
declare i8 @llvm.abs.i8(i8, i1)
declare i16 @llvm.abs.i16(i16, i1)
declare i32 @llvm.abs.i32(i32, i1)
@@ -1737,4 +2129,3 @@ declare i32 @llvm.umin.i32(i32, i32)
declare i64 @llvm.umin.i64(i64, i64)
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; CHECK: {{.*}}
-; NOZBB: {{.*}}
diff --git a/llvm/test/CodeGen/X86/abds.ll b/llvm/test/CodeGen/X86/abds.ll
index 9c4c059a3b9bf1..4c524c28b160ab 100644
--- a/llvm/test/CodeGen/X86/abds.ll
+++ b/llvm/test/CodeGen/X86/abds.ll
@@ -1154,6 +1154,211 @@ define i32 @abd_sub_i32(i32 %a, i32 %b) nounwind {
ret i32 %abs
}
+;
+; sub(select(icmp(a,b),a,b),select(icmp(a,b),b,a)) -> abds(a,b)
+;
+
+define i8 @abd_select_i8(i8 %a, i8 %b) nounwind {
+; X86-LABEL: abd_select_i8:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpb %cl, %al
+; X86-NEXT: movl %ecx, %edx
+; X86-NEXT: cmovll %eax, %edx
+; X86-NEXT: cmovll %ecx, %eax
+; X86-NEXT: subb %dl, %al
+; X86-NEXT: # kill: def $al killed $al killed $eax
+; X86-NEXT: retl
+;
+; X64-LABEL: abd_select_i8:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: cmpb %sil, %al
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: cmovll %edi, %ecx
+; X64-NEXT: cmovll %esi, %eax
+; X64-NEXT: subb %cl, %al
+; X64-NEXT: # kill: def $al killed $al killed $eax
+; X64-NEXT: retq
+ %cmp = icmp slt i8 %a, %b
+ %ab = select i1 %cmp, i8 %a, i8 %b
+ %ba = select i1 %cmp, i8 %b, i8 %a
+ %sub = sub i8 %ba, %ab
+ ret i8 %sub
+}
+
+define i16 @abd_select_i16(i16 %a, i16 %b) nounwind {
+; X86-LABEL: abd_select_i16:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpw %cx, %ax
+; X86-NEXT: movl %ecx, %edx
+; X86-NEXT: cmovlel %eax, %edx
+; X86-NEXT: cmovlel %ecx, %eax
+; X86-NEXT: subl %edx, %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NEXT: retl
+;
+; X64-LABEL: abd_select_i16:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: cmpw %si, %ax
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: cmovlel %edi, %ecx
+; X64-NEXT: cmovlel %esi, %eax
+; X64-NEXT: subl %ecx, %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-NEXT: retq
+ %cmp = icmp sle i16 %a, %b
+ %ab = select i1 %cmp, i16 %a, i16 %b
+ %ba = select i1 %cmp, i16 %b, i16 %a
+ %sub = sub i16 %ba, %ab
+ ret i16 %sub
+}
+
+define i32 @abd_select_i32(i32 %a, i32 %b) nounwind {
+; X86-LABEL: abd_select_i32:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: cmpl %ecx, %edx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: cmovgl %edx, %eax
+; X86-NEXT: cmovgl %ecx, %edx
+; X86-NEXT: subl %edx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: abd_select_i32:
+; X64: # %bb.0:
+; X64-NEXT: cmpl %esi, %edi
+; X64-NEXT: movl %esi, %eax
+; X64-NEXT: cmovgl %edi, %eax
+; X64-NEXT: cmovgl %esi, %edi
+; X64-NEXT: subl %edi, %eax
+; X64-NEXT: retq
+ %cmp = icmp sgt i32 %a, %b
+ %ab = select i1 %cmp, i32 %a, i32 %b
+ %ba = select i1 %cmp, i32 %b, i32 %a
+ %sub = sub i32 %ab, %ba
+ ret i32 %sub
+}
+
+define i64 @abd_select_i64(i64 %a, i64 %b) nounwind {
+; X86-LABEL: abd_select_i64:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebx
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: cmpl %esi, %ebx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: sbbl %edi, %eax
+; X86-NEXT: movl %edi, %edx
+; X86-NEXT: cmovgel %ecx, %edx
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: cmovgel %ebx, %eax
+; X86-NEXT: cmovgel %edi, %ecx
+; X86-NEXT: cmovgel %esi, %ebx
+; X86-NEXT: subl %ebx, %eax
+; X86-NEXT: sbbl %ecx, %edx
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebx
+; X86-NEXT: retl
+;
+; X64-LABEL: abd_select_i64:
+; X64: # %bb.0:
+; X64-NEXT: cmpq %rsi, %rdi
+; X64-NEXT: movq %rsi, %rax
+; X64-NEXT: cmovgeq %rdi, %rax
+; X64-NEXT: cmovgeq %rsi, %rdi
+; X64-NEXT: subq %rdi, %rax
+; X64-NEXT: retq
+ %cmp = icmp sge i64 %a, %b
+ %ab = select i1 %cmp, i64 %a, i64 %b
+ %ba = select i1 %cmp, i64 %b, i64 %a
+ %sub = sub i64 %ab, %ba
+ ret i64 %sub
+}
+
+define i128 @abd_select_i128(i128 %a, i128 %b) nounwind {
+; X86-LABEL: abd_select_i128:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: pushl %ebx
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: pushl %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: cmpl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: sbbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: sbbl %ebp, %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, %eax
+; X86-NEXT: sbbl %ebx, %eax
+; X86-NEXT: movl %ebx, %eax
+; X86-NEXT: cmovll %edi, %eax
+; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X86-NEXT: cmovll %ebx, %edi
+; X86-NEXT: movl %ebp, %ebx
+; X86-NEXT: cmovll %ecx, %ebx
+; X86-NEXT: cmovll %ebp, %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %ebp
+; X86-NEXT: cmovll %esi, %ebp
+; X86-NEXT: cmovll %eax, %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmovll %edx, %eax
+; X86-NEXT: cmovll {{[0-9]+}}(%esp), %edx
+; X86-NEXT: subl %eax, %edx
+; X86-NEXT: sbbl %ebp, %esi
+; X86-NEXT: sbbl %ebx, %ecx
+; X86-NEXT: sbbl (%esp), %edi # 4-byte Folded Reload
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %edx, (%eax)
+; X86-NEXT: movl %esi, 4(%eax)
+; X86-NEXT: movl %ecx, 8(%eax)
+; X86-NEXT: movl %edi, 12(%eax)
+; X86-NEXT: addl $4, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebx
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl $4
+;
+; X64-LABEL: abd_select_i128:
+; X64: # %bb.0:
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: cmpq %rdx, %rdi
+; X64-NEXT: movq %rsi, %rdi
+; X64-NEXT: sbbq %rcx, %rdi
+; X64-NEXT: movq %rcx, %rdi
+; X64-NEXT: cmovlq %rsi, %rdi
+; X64-NEXT: movq %rdx, %r8
+; X64-NEXT: cmovlq %rax, %r8
+; X64-NEXT: cmovlq %rcx, %rsi
+; X64-NEXT: cmovlq %rdx, %rax
+; X64-NEXT: subq %r8, %rax
+; X64-NEXT: sbbq %rdi, %rsi
+; X64-NEXT: movq %rsi, %rdx
+; X64-NEXT: retq
+ %cmp = icmp slt i128 %a, %b
+ %ab = select i1 %cmp, i128 %a, i128 %b
+ %ba = select i1 %cmp, i128 %b, i128 %a
+ %sub = sub i128 %ba, %ab
+ ret i128 %sub
+}
declare i8 @llvm.abs.i8(i8, i1)
declare i16 @llvm.abs.i16(i16, i1)
diff --git a/llvm/test/CodeGen/X86/abdu.ll b/llvm/test/CodeGen/X86/abdu.ll
index 335fa8c156f8e4..fe9006a8aec234 100644
--- a/llvm/test/CodeGen/X86/abdu.ll
+++ b/llvm/test/CodeGen/X86/abdu.ll
@@ -768,6 +768,212 @@ define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
ret i128 %sel
}
+;
+; sub(select(icmp(a,b),a,b),select(icmp(a,b),b,a)) -> abdu(a,b)
+;
+
+define i8 @abd_select_i8(i8 %a, i8 %b) nounwind {
+; X86-LABEL: abd_select_i8:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpb %cl, %al
+; X86-NEXT: movl %ecx, %edx
+; X86-NEXT: cmovbl %eax, %edx
+; X86-NEXT: cmovbl %ecx, %eax
+; X86-NEXT: subb %dl, %al
+; X86-NEXT: # kill: def $al killed $al killed $eax
+; X86-NEXT: retl
+;
+; X64-LABEL: abd_select_i8:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: cmpb %sil, %al
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: cmovbl %edi, %ecx
+; X64-NEXT: cmovbl %esi, %eax
+; X64-NEXT: subb %cl, %al
+; X64-NEXT: # kill: def $al killed $al killed $eax
+; X64-NEXT: retq
+ %cmp = icmp ult i8 %a, %b
+ %ab = select i1 %cmp, i8 %a, i8 %b
+ %ba = select i1 %cmp, i8 %b, i8 %a
+ %sub = sub i8 %ba, %ab
+ ret i8 %sub
+}
+
+define i16 @abd_select_i16(i16 %a, i16 %b) nounwind {
+; X86-LABEL: abd_select_i16:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpw %cx, %ax
+; X86-NEXT: movl %ecx, %edx
+; X86-NEXT: cmovbel %eax, %edx
+; X86-NEXT: cmovbel %ecx, %eax
+; X86-NEXT: subl %edx, %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NEXT: retl
+;
+; X64-LABEL: abd_select_i16:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: cmpw %si, %ax
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: cmovbel %edi, %ecx
+; X64-NEXT: cmovbel %esi, %eax
+; X64-NEXT: subl %ecx, %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-NEXT: retq
+ %cmp = icmp ule i16 %a, %b
+ %ab = select i1 %cmp, i16 %a, i16 %b
+ %ba = select i1 %cmp, i16 %b, i16 %a
+ %sub = sub i16 %ba, %ab
+ ret i16 %sub
+}
+
+define i32 @abd_select_i32(i32 %a, i32 %b) nounwind {
+; X86-LABEL: abd_select_i32:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: cmpl %ecx, %edx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: cmoval %edx, %eax
+; X86-NEXT: cmoval %ecx, %edx
+; X86-NEXT: subl %edx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: abd_select_i32:
+; X64: # %bb.0:
+; X64-NEXT: cmpl %esi, %edi
+; X64-NEXT: movl %esi, %eax
+; X64-NEXT: cmoval %edi, %eax
+; X64-NEXT: cmoval %esi, %edi
+; X64-NEXT: subl %edi, %eax
+; X64-NEXT: retq
+ %cmp = icmp ugt i32 %a, %b
+ %ab = select i1 %cmp, i32 %a, i32 %b
+ %ba = select i1 %cmp, i32 %b, i32 %a
+ %sub = sub i32 %ab, %ba
+ ret i32 %sub
+}
+
+define i64 @abd_select_i64(i64 %a, i64 %b) nounwind {
+; X86-LABEL: abd_select_i64:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebx
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: cmpl %esi, %ebx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: sbbl %edi, %eax
+; X86-NEXT: movl %edi, %edx
+; X86-NEXT: cmovael %ecx, %edx
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: cmovael %ebx, %eax
+; X86-NEXT: cmovael %edi, %ecx
+; X86-NEXT: cmovael %esi, %ebx
+; X86-NEXT: subl %ebx, %eax
+; X86-NEXT: sbbl %ecx, %edx
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebx
+; X86-NEXT: retl
+;
+; X64-LABEL: abd_select_i64:
+; X64: # %bb.0:
+; X64-NEXT: cmpq %rsi, %rdi
+; X64-NEXT: movq %rsi, %rax
+; X64-NEXT: cmovaeq %rdi, %rax
+; X64-NEXT: cmovaeq %rsi, %rdi
+; X64-NEXT: subq %rdi, %rax
+; X64-NEXT: retq
+ %cmp = icmp uge i64 %a, %b
+ %ab = select i1 %cmp, i64 %a, i64 %b
+ %ba = select i1 %cmp, i64 %b, i64 %a
+ %sub = sub i64 %ab, %ba
+ ret i64 %sub
+}
+
+define i128 @abd_select_i128(i128 %a, i128 %b) nounwind {
+; X86-LABEL: abd_select_i128:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: pushl %ebx
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: pushl %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: cmpl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: sbbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: sbbl %ebp, %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, %eax
+; X86-NEXT: sbbl %ebx, %eax
+; X86-NEXT: movl %ebx, %eax
+; X86-NEXT: cmovbl %edi, %eax
+; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X86-NEXT: cmovbl %ebx, %edi
+; X86-NEXT: movl %ebp, %ebx
+; X86-NEXT: cmovbl %ecx, %ebx
+; X86-NEXT: cmovbl %ebp, %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %ebp
+; X86-NEXT: cmovbl %esi, %ebp
+; X86-NEXT: cmovbl %eax, %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmovbl %edx, %eax
+; X86-NEXT: cmovbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: subl %eax, %edx
+; X86-NEXT: sbbl %ebp, %esi
+; X86-NEXT: sbbl %ebx, %ecx
+; X86-NEXT: sbbl (%esp), %edi # 4-byte Folded Reload
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %edx, (%eax)
+; X86-NEXT: movl %esi, 4(%eax)
+; X86-NEXT: movl %ecx, 8(%eax)
+; X86-NEXT: movl %edi, 12(%eax)
+; X86-NEXT: addl $4, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebx
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl $4
+;
+; X64-LABEL: abd_select_i128:
+; X64: # %bb.0:
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: cmpq %rdx, %rdi
+; X64-NEXT: movq %rsi, %rdi
+; X64-NEXT: sbbq %rcx, %rdi
+; X64-NEXT: movq %rcx, %rdi
+; X64-NEXT: cmovbq %rsi, %rdi
+; X64-NEXT: movq %rdx, %r8
+; X64-NEXT: cmovbq %rax, %r8
+; X64-NEXT: cmovbq %rcx, %rsi
+; X64-NEXT: cmovbq %rdx, %rax
+; X64-NEXT: subq %r8, %rax
+; X64-NEXT: sbbq %rdi, %rsi
+; X64-NEXT: movq %rsi, %rdx
+; X64-NEXT: retq
+ %cmp = icmp ult i128 %a, %b
+ %ab = select i1 %cmp, i128 %a, i128 %b
+ %ba = select i1 %cmp, i128 %b, i128 %a
+ %sub = sub i128 %ba, %ab
+ ret i128 %sub
+}
+
declare i8 @llvm.abs.i8(i8, i1)
declare i16 @llvm.abs.i16(i16, i1)
declare i32 @llvm.abs.i32(i32, i1)
More information about the llvm-commits
mailing list