[llvm] 04f6504 - [AArch64][RISCV] Add scalar abds/abdu test coverage - based off #100810

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 5 05:53:06 PDT 2024


Author: Simon Pilgrim
Date: 2024-08-05T13:43:39+01:00
New Revision: 04f65043bc8711996a4ade05c67ae2d087ae8be5

URL: https://github.com/llvm/llvm-project/commit/04f65043bc8711996a4ade05c67ae2d087ae8be5
DIFF: https://github.com/llvm/llvm-project/commit/04f65043bc8711996a4ade05c67ae2d087ae8be5.diff

LOG: [AArch64][RISCV] Add scalar abds/abdu test coverage - based off #100810

Extend test coverage for #92576 - copied from existing x86 tests

Added: 
    llvm/test/CodeGen/AArch64/abds-neg.ll
    llvm/test/CodeGen/AArch64/abds.ll
    llvm/test/CodeGen/AArch64/abdu-neg.ll
    llvm/test/CodeGen/AArch64/abdu.ll
    llvm/test/CodeGen/RISCV/abds-neg.ll
    llvm/test/CodeGen/RISCV/abds.ll
    llvm/test/CodeGen/RISCV/abdu-neg.ll
    llvm/test/CodeGen/RISCV/abdu.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/abds-neg.ll b/llvm/test/CodeGen/AArch64/abds-neg.ll
new file mode 100644
index 0000000000000..a3afda50acab2
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/abds-neg.ll
@@ -0,0 +1,603 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64 | FileCheck %s
+
+;
+; trunc(nabs(sub(sext(a),sext(b)))) -> nabds(a,b)
+;
+
+define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: abd_ext_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    sxtb x8, w0
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sub x8, x8, w1, sxtb
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x8, x8, mi
+; CHECK-NEXT:    neg w0, w8
+; CHECK-NEXT:    ret
+  %aext = sext i8 %a to i64
+  %bext = sext i8 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i8
+  ret i8 %trunc
+}
+
+define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_ext_i8_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    sxtb x8, w0
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sub x8, x8, w1, sxth
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x8, x8, mi
+; CHECK-NEXT:    neg w0, w8
+; CHECK-NEXT:    ret
+  %aext = sext i8 %a to i64
+  %bext = sext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i8
+  ret i8 %trunc
+}
+
+define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: abd_ext_i8_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    sxtb x8, w0
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sub x8, x8, w1, sxtb
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x8, x8, mi
+; CHECK-NEXT:    neg w0, w8
+; CHECK-NEXT:    ret
+  %aext = sext i8 %a to i64
+  %bext = sext i8 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i8
+  ret i8 %trunc
+}
+
+define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_ext_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    sxth x8, w0
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sub x8, x8, w1, sxth
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x8, x8, mi
+; CHECK-NEXT:    neg w0, w8
+; CHECK-NEXT:    ret
+  %aext = sext i16 %a to i64
+  %bext = sext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i16
+  ret i16 %trunc
+}
+
+define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_ext_i16_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    sxth x8, w0
+; CHECK-NEXT:    sub x8, x8, w1, sxtw
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x8, x8, mi
+; CHECK-NEXT:    neg w0, w8
+; CHECK-NEXT:    ret
+  %aext = sext i16 %a to i64
+  %bext = sext i32 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i16
+  ret i16 %trunc
+}
+
+define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_ext_i16_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    sxth x8, w0
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sub x8, x8, w1, sxth
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x8, x8, mi
+; CHECK-NEXT:    neg w0, w8
+; CHECK-NEXT:    ret
+  %aext = sext i16 %a to i64
+  %bext = sext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i16
+  ret i16 %trunc
+}
+
+define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_ext_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    sxtw x8, w0
+; CHECK-NEXT:    sub x8, x8, w1, sxtw
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x8, x8, mi
+; CHECK-NEXT:    neg w0, w8
+; CHECK-NEXT:    ret
+  %aext = sext i32 %a to i64
+  %bext = sext i32 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i32
+  ret i32 %trunc
+}
+
+define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_ext_i32_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    sxtw x8, w0
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sub x8, x8, w1, sxth
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x8, x8, mi
+; CHECK-NEXT:    neg w0, w8
+; CHECK-NEXT:    ret
+  %aext = sext i32 %a to i64
+  %bext = sext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i32
+  ret i32 %trunc
+}
+
+define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_ext_i32_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    sxtw x8, w0
+; CHECK-NEXT:    sub x8, x8, w1, sxtw
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x8, x8, mi
+; CHECK-NEXT:    neg w0, w8
+; CHECK-NEXT:    ret
+  %aext = sext i32 %a to i64
+  %bext = sext i32 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i32
+  ret i32 %trunc
+}
+
+define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: abd_ext_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    asr x8, x0, #63
+; CHECK-NEXT:    asr x9, x1, #63
+; CHECK-NEXT:    subs x10, x0, x1
+; CHECK-NEXT:    sbc x8, x8, x9
+; CHECK-NEXT:    asr x8, x8, #63
+; CHECK-NEXT:    eor x9, x10, x8
+; CHECK-NEXT:    sub x0, x8, x9
+; CHECK-NEXT:    ret
+  %aext = sext i64 %a to i128
+  %bext = sext i64 %b to i128
+  %sub = sub i128 %aext, %bext
+  %abs = call i128 @llvm.abs.i128(i128 %sub, i1 false)
+  %nabs = sub i128 0, %abs
+  %trunc = trunc i128 %nabs to i64
+  ret i64 %trunc
+}
+
+define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: abd_ext_i64_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    asr x8, x0, #63
+; CHECK-NEXT:    asr x9, x1, #63
+; CHECK-NEXT:    subs x10, x0, x1
+; CHECK-NEXT:    sbc x8, x8, x9
+; CHECK-NEXT:    asr x8, x8, #63
+; CHECK-NEXT:    eor x9, x10, x8
+; CHECK-NEXT:    sub x0, x8, x9
+; CHECK-NEXT:    ret
+  %aext = sext i64 %a to i128
+  %bext = sext i64 %b to i128
+  %sub = sub i128 %aext, %bext
+  %abs = call i128 @llvm.abs.i128(i128 %sub, i1 true)
+  %nabs = sub i128 0, %abs
+  %trunc = trunc i128 %nabs to i64
+  ret i64 %trunc
+}
+
+define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind {
+; CHECK-LABEL: abd_ext_i128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    asr x8, x1, #63
+; CHECK-NEXT:    asr x9, x3, #63
+; CHECK-NEXT:    subs x10, x0, x2
+; CHECK-NEXT:    sbcs x11, x1, x3
+; CHECK-NEXT:    sbcs xzr, x8, x9
+; CHECK-NEXT:    sbc x8, x8, x9
+; CHECK-NEXT:    asr x8, x8, #63
+; CHECK-NEXT:    eor x9, x10, x8
+; CHECK-NEXT:    eor x10, x11, x8
+; CHECK-NEXT:    subs x9, x9, x8
+; CHECK-NEXT:    sbc x8, x10, x8
+; CHECK-NEXT:    negs x0, x9
+; CHECK-NEXT:    ngc x1, x8
+; CHECK-NEXT:    ret
+  %aext = sext i128 %a to i256
+  %bext = sext i128 %b to i256
+  %sub = sub i256 %aext, %bext
+  %abs = call i256 @llvm.abs.i256(i256 %sub, i1 false)
+  %nabs = sub i256 0, %abs
+  %trunc = trunc i256 %nabs to i128
+  ret i128 %trunc
+}
+
+define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind {
+; CHECK-LABEL: abd_ext_i128_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    asr x8, x1, #63
+; CHECK-NEXT:    asr x9, x3, #63
+; CHECK-NEXT:    subs x10, x0, x2
+; CHECK-NEXT:    sbcs x11, x1, x3
+; CHECK-NEXT:    sbcs xzr, x8, x9
+; CHECK-NEXT:    sbc x8, x8, x9
+; CHECK-NEXT:    asr x8, x8, #63
+; CHECK-NEXT:    eor x9, x10, x8
+; CHECK-NEXT:    eor x10, x11, x8
+; CHECK-NEXT:    subs x9, x9, x8
+; CHECK-NEXT:    sbc x8, x10, x8
+; CHECK-NEXT:    negs x0, x9
+; CHECK-NEXT:    ngc x1, x8
+; CHECK-NEXT:    ret
+  %aext = sext i128 %a to i256
+  %bext = sext i128 %b to i256
+  %sub = sub i256 %aext, %bext
+  %abs = call i256 @llvm.abs.i256(i256 %sub, i1 true)
+  %nabs = sub i256 0, %abs
+  %trunc = trunc i256 %nabs to i128
+  ret i128 %trunc
+}
+
+;
+; sub(smin(a,b),smax(a,b)) -> nabds(a,b)
+;
+
+define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: abd_minmax_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxtb w8, w1
+; CHECK-NEXT:    sxtb w9, w0
+; CHECK-NEXT:    cmp w9, w8
+; CHECK-NEXT:    csel w10, w9, w8, lt
+; CHECK-NEXT:    csel w8, w9, w8, gt
+; CHECK-NEXT:    sub w0, w10, w8
+; CHECK-NEXT:    ret
+  %min = call i8 @llvm.smin.i8(i8 %a, i8 %b)
+  %max = call i8 @llvm.smax.i8(i8 %a, i8 %b)
+  %sub = sub i8 %min, %max
+  ret i8 %sub
+}
+
+define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_minmax_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxth w8, w1
+; CHECK-NEXT:    sxth w9, w0
+; CHECK-NEXT:    cmp w9, w8
+; CHECK-NEXT:    csel w10, w9, w8, lt
+; CHECK-NEXT:    csel w8, w9, w8, gt
+; CHECK-NEXT:    sub w0, w10, w8
+; CHECK-NEXT:    ret
+  %min = call i16 @llvm.smin.i16(i16 %a, i16 %b)
+  %max = call i16 @llvm.smax.i16(i16 %a, i16 %b)
+  %sub = sub i16 %min, %max
+  ret i16 %sub
+}
+
+define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_minmax_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    csel w8, w0, w1, lt
+; CHECK-NEXT:    csel w9, w0, w1, gt
+; CHECK-NEXT:    sub w0, w8, w9
+; CHECK-NEXT:    ret
+  %min = call i32 @llvm.smin.i32(i32 %a, i32 %b)
+  %max = call i32 @llvm.smax.i32(i32 %a, i32 %b)
+  %sub = sub i32 %min, %max
+  ret i32 %sub
+}
+
+define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: abd_minmax_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, x1
+; CHECK-NEXT:    csel x8, x0, x1, lt
+; CHECK-NEXT:    csel x9, x0, x1, gt
+; CHECK-NEXT:    sub x0, x8, x9
+; CHECK-NEXT:    ret
+  %min = call i64 @llvm.smin.i64(i64 %a, i64 %b)
+  %max = call i64 @llvm.smax.i64(i64 %a, i64 %b)
+  %sub = sub i64 %min, %max
+  ret i64 %sub
+}
+
+define i128 @abd_minmax_i128(i128 %a, i128 %b) nounwind {
+; CHECK-LABEL: abd_minmax_i128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, x2
+; CHECK-NEXT:    sbcs xzr, x1, x3
+; CHECK-NEXT:    csel x8, x1, x3, lt
+; CHECK-NEXT:    csel x9, x0, x2, lt
+; CHECK-NEXT:    cmp x2, x0
+; CHECK-NEXT:    sbcs xzr, x3, x1
+; CHECK-NEXT:    csel x10, x0, x2, lt
+; CHECK-NEXT:    csel x11, x1, x3, lt
+; CHECK-NEXT:    subs x0, x9, x10
+; CHECK-NEXT:    sbc x1, x8, x11
+; CHECK-NEXT:    ret
+  %min = call i128 @llvm.smin.i128(i128 %a, i128 %b)
+  %max = call i128 @llvm.smax.i128(i128 %a, i128 %b)
+  %sub = sub i128 %min, %max
+  ret i128 %sub
+}
+
+;
+; select(icmp(a,b),sub(a,b),sub(b,a)) -> nabds(a,b)
+;
+
+define i8 @abd_cmp_i8(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: abd_cmp_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxtb w8, w0
+; CHECK-NEXT:    sub w9, w0, w1
+; CHECK-NEXT:    sub w10, w1, w0
+; CHECK-NEXT:    cmp w8, w1, sxtb
+; CHECK-NEXT:    csel w0, w9, w10, le
+; CHECK-NEXT:    ret
+  %cmp = icmp sle i8 %a, %b
+  %ab = sub i8 %a, %b
+  %ba = sub i8 %b, %a
+  %sel = select i1 %cmp, i8 %ab, i8 %ba
+  ret i8 %sel
+}
+
+define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_cmp_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxth w8, w0
+; CHECK-NEXT:    sub w9, w0, w1
+; CHECK-NEXT:    sub w10, w1, w0
+; CHECK-NEXT:    cmp w8, w1, sxth
+; CHECK-NEXT:    csel w0, w9, w10, lt
+; CHECK-NEXT:    ret
+  %cmp = icmp slt i16 %a, %b
+  %ab = sub i16 %a, %b
+  %ba = sub i16 %b, %a
+  %sel = select i1 %cmp, i16 %ab, i16 %ba
+  ret i16 %sel
+}
+
+define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_cmp_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub w8, w1, w0
+; CHECK-NEXT:    subs w9, w0, w1
+; CHECK-NEXT:    csel w0, w8, w9, ge
+; CHECK-NEXT:    ret
+  %cmp = icmp sge i32 %a, %b
+  %ab = sub i32 %a, %b
+  %ba = sub i32 %b, %a
+  %sel = select i1 %cmp, i32 %ba, i32 %ab
+  ret i32 %sel
+}
+
+define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: abd_cmp_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub x8, x1, x0
+; CHECK-NEXT:    subs x9, x0, x1
+; CHECK-NEXT:    csel x0, x8, x9, lt
+; CHECK-NEXT:    ret
+  %cmp = icmp slt i64 %a, %b
+  %ab = sub i64 %a, %b
+  %ba = sub i64 %b, %a
+  %sel = select i1 %cmp, i64 %ba, i64 %ab
+  ret i64 %sel
+}
+
+define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
+; CHECK-LABEL: abd_cmp_i128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, x2
+; CHECK-NEXT:    sbc x8, x1, x3
+; CHECK-NEXT:    subs x9, x2, x0
+; CHECK-NEXT:    sbc x10, x3, x1
+; CHECK-NEXT:    subs x11, x0, x2
+; CHECK-NEXT:    sbcs xzr, x1, x3
+; CHECK-NEXT:    csel x0, x9, x11, lt
+; CHECK-NEXT:    csel x1, x10, x8, lt
+; CHECK-NEXT:    ret
+  %cmp = icmp slt i128 %a, %b
+  %ab = sub i128 %a, %b
+  %ba = sub i128 %b, %a
+  %sel = select i1 %cmp, i128 %ba, i128 %ab
+  ret i128 %sel
+}
+
+;
+; nabs(sub_nsw(x, y)) -> nabds(a,b)
+;
+
+define i8 @abd_subnsw_i8(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: abd_subnsw_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub w8, w0, w1
+; CHECK-NEXT:    sbfx w9, w8, #7, #1
+; CHECK-NEXT:    eor w8, w8, w9
+; CHECK-NEXT:    sub w0, w9, w8
+; CHECK-NEXT:    ret
+  %sub = sub nsw i8 %a, %b
+  %abs = call i8 @llvm.abs.i8(i8 %sub, i1 false)
+  %nabs = sub i8 0, %abs
+  ret i8 %nabs
+}
+
+define i8 @abd_subnsw_i8_undef(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: abd_subnsw_i8_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub w8, w0, w1
+; CHECK-NEXT:    sbfx w9, w8, #7, #1
+; CHECK-NEXT:    eor w8, w8, w9
+; CHECK-NEXT:    sub w0, w9, w8
+; CHECK-NEXT:    ret
+  %sub = sub nsw i8 %a, %b
+  %abs = call i8 @llvm.abs.i8(i8 %sub, i1 true)
+  %nabs = sub i8 0, %abs
+  ret i8 %nabs
+}
+
+define i16 @abd_subnsw_i16(i16 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_subnsw_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub w8, w0, w1
+; CHECK-NEXT:    sbfx w9, w8, #15, #1
+; CHECK-NEXT:    eor w8, w8, w9
+; CHECK-NEXT:    sub w0, w9, w8
+; CHECK-NEXT:    ret
+  %sub = sub nsw i16 %a, %b
+  %abs = call i16 @llvm.abs.i16(i16 %sub, i1 false)
+  %nabs = sub i16 0, %abs
+  ret i16 %nabs
+}
+
+define i16 @abd_subnsw_i16_undef(i16 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_subnsw_i16_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub w8, w0, w1
+; CHECK-NEXT:    sbfx w9, w8, #15, #1
+; CHECK-NEXT:    eor w8, w8, w9
+; CHECK-NEXT:    sub w0, w9, w8
+; CHECK-NEXT:    ret
+  %sub = sub nsw i16 %a, %b
+  %abs = call i16 @llvm.abs.i16(i16 %sub, i1 true)
+  %nabs = sub i16 0, %abs
+  ret i16 %nabs
+}
+
+define i32 @abd_subnsw_i32(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_subnsw_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    subs w8, w0, w1
+; CHECK-NEXT:    cneg w0, w8, pl
+; CHECK-NEXT:    ret
+  %sub = sub nsw i32 %a, %b
+  %abs = call i32 @llvm.abs.i32(i32 %sub, i1 false)
+  %nabs = sub i32 0, %abs
+  ret i32 %nabs
+}
+
+define i32 @abd_subnsw_i32_undef(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_subnsw_i32_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    subs w8, w0, w1
+; CHECK-NEXT:    cneg w0, w8, pl
+; CHECK-NEXT:    ret
+  %sub = sub nsw i32 %a, %b
+  %abs = call i32 @llvm.abs.i32(i32 %sub, i1 true)
+  %nabs = sub i32 0, %abs
+  ret i32 %nabs
+}
+
+define i64 @abd_subnsw_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: abd_subnsw_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    subs x8, x0, x1
+; CHECK-NEXT:    cneg x0, x8, pl
+; CHECK-NEXT:    ret
+  %sub = sub nsw i64 %a, %b
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  ret i64 %nabs
+}
+
+define i64 @abd_subnsw_i64_undef(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: abd_subnsw_i64_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    subs x8, x0, x1
+; CHECK-NEXT:    cneg x0, x8, pl
+; CHECK-NEXT:    ret
+  %sub = sub nsw i64 %a, %b
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %nabs = sub i64 0, %abs
+  ret i64 %nabs
+}
+
+define i128 @abd_subnsw_i128(i128 %a, i128 %b) nounwind {
+; CHECK-LABEL: abd_subnsw_i128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    subs x8, x0, x2
+; CHECK-NEXT:    sbc x9, x1, x3
+; CHECK-NEXT:    asr x10, x9, #63
+; CHECK-NEXT:    eor x8, x8, x10
+; CHECK-NEXT:    eor x9, x9, x10
+; CHECK-NEXT:    subs x0, x10, x8
+; CHECK-NEXT:    sbc x1, x10, x9
+; CHECK-NEXT:    ret
+  %sub = sub nsw i128 %a, %b
+  %abs = call i128 @llvm.abs.i128(i128 %sub, i1 false)
+  %nabs = sub i128 0, %abs
+  ret i128 %nabs
+}
+
+define i128 @abd_subnsw_i128_undef(i128 %a, i128 %b) nounwind {
+; CHECK-LABEL: abd_subnsw_i128_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    subs x8, x0, x2
+; CHECK-NEXT:    sbc x9, x1, x3
+; CHECK-NEXT:    asr x10, x9, #63
+; CHECK-NEXT:    eor x8, x8, x10
+; CHECK-NEXT:    eor x9, x9, x10
+; CHECK-NEXT:    subs x0, x10, x8
+; CHECK-NEXT:    sbc x1, x10, x9
+; CHECK-NEXT:    ret
+  %sub = sub nsw i128 %a, %b
+  %abs = call i128 @llvm.abs.i128(i128 %sub, i1 true)
+  %nabs = sub i128 0, %abs
+  ret i128 %nabs
+}
+
+declare i8 @llvm.abs.i8(i8, i1)
+declare i16 @llvm.abs.i16(i16, i1)
+declare i32 @llvm.abs.i32(i32, i1)
+declare i64 @llvm.abs.i64(i64, i1)
+declare i128 @llvm.abs.i128(i128, i1)
+
+declare i8 @llvm.smax.i8(i8, i8)
+declare i16 @llvm.smax.i16(i16, i16)
+declare i32 @llvm.smax.i32(i32, i32)
+declare i64 @llvm.smax.i64(i64, i64)
+
+declare i8 @llvm.smin.i8(i8, i8)
+declare i16 @llvm.smin.i16(i16, i16)
+declare i32 @llvm.smin.i32(i32, i32)
+declare i64 @llvm.smin.i64(i64, i64)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; X64: {{.*}}
+; X86: {{.*}}

diff  --git a/llvm/test/CodeGen/AArch64/abds.ll b/llvm/test/CodeGen/AArch64/abds.ll
new file mode 100644
index 0000000000000..1e47a3c2fd185
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/abds.ll
@@ -0,0 +1,592 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64 | FileCheck %s
+
+;
+; trunc(abs(sub(sext(a),sext(b)))) -> abds(a,b)
+;
+
+define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: abd_ext_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    sxtb x8, w0
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sub x8, x8, w1, sxtb
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x0, x8, mi
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+  %aext = sext i8 %a to i64
+  %bext = sext i8 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %trunc = trunc i64 %abs to i8
+  ret i8 %trunc
+}
+
+define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_ext_i8_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    sxtb x8, w0
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sub x8, x8, w1, sxth
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x0, x8, mi
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+  %aext = sext i8 %a to i64
+  %bext = sext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %trunc = trunc i64 %abs to i8
+  ret i8 %trunc
+}
+
+define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: abd_ext_i8_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    sxtb x8, w0
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sub x8, x8, w1, sxtb
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x0, x8, mi
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+  %aext = sext i8 %a to i64
+  %bext = sext i8 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %trunc = trunc i64 %abs to i8
+  ret i8 %trunc
+}
+
+define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_ext_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    sxth x8, w0
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sub x8, x8, w1, sxth
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x0, x8, mi
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+  %aext = sext i16 %a to i64
+  %bext = sext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %trunc = trunc i64 %abs to i16
+  ret i16 %trunc
+}
+
+define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_ext_i16_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    sxth x8, w0
+; CHECK-NEXT:    sub x8, x8, w1, sxtw
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x0, x8, mi
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+  %aext = sext i16 %a to i64
+  %bext = sext i32 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %trunc = trunc i64 %abs to i16
+  ret i16 %trunc
+}
+
+define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_ext_i16_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    sxth x8, w0
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sub x8, x8, w1, sxth
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x0, x8, mi
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+  %aext = sext i16 %a to i64
+  %bext = sext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %trunc = trunc i64 %abs to i16
+  ret i16 %trunc
+}
+
+define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_ext_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    sxtw x8, w0
+; CHECK-NEXT:    sub x8, x8, w1, sxtw
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x0, x8, mi
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+  %aext = sext i32 %a to i64
+  %bext = sext i32 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %trunc = trunc i64 %abs to i32
+  ret i32 %trunc
+}
+
+define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_ext_i32_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    sxtw x8, w0
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sub x8, x8, w1, sxth
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x0, x8, mi
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+  %aext = sext i32 %a to i64
+  %bext = sext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %trunc = trunc i64 %abs to i32
+  ret i32 %trunc
+}
+
+define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_ext_i32_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    sxtw x8, w0
+; CHECK-NEXT:    sub x8, x8, w1, sxtw
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x0, x8, mi
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+  %aext = sext i32 %a to i64
+  %bext = sext i32 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %trunc = trunc i64 %abs to i32
+  ret i32 %trunc
+}
+
+define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: abd_ext_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    asr x8, x0, #63
+; CHECK-NEXT:    asr x9, x1, #63
+; CHECK-NEXT:    subs x10, x0, x1
+; CHECK-NEXT:    sbc x8, x8, x9
+; CHECK-NEXT:    asr x8, x8, #63
+; CHECK-NEXT:    eor x9, x10, x8
+; CHECK-NEXT:    sub x0, x9, x8
+; CHECK-NEXT:    ret
+  %aext = sext i64 %a to i128
+  %bext = sext i64 %b to i128
+  %sub = sub i128 %aext, %bext
+  %abs = call i128 @llvm.abs.i128(i128 %sub, i1 false)
+  %trunc = trunc i128 %abs to i64
+  ret i64 %trunc
+}
+
+define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: abd_ext_i64_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    asr x8, x0, #63
+; CHECK-NEXT:    asr x9, x1, #63
+; CHECK-NEXT:    subs x10, x0, x1
+; CHECK-NEXT:    sbc x8, x8, x9
+; CHECK-NEXT:    asr x8, x8, #63
+; CHECK-NEXT:    eor x9, x10, x8
+; CHECK-NEXT:    sub x0, x9, x8
+; CHECK-NEXT:    ret
+  %aext = sext i64 %a to i128
+  %bext = sext i64 %b to i128
+  %sub = sub i128 %aext, %bext
+  %abs = call i128 @llvm.abs.i128(i128 %sub, i1 true)
+  %trunc = trunc i128 %abs to i64
+  ret i64 %trunc
+}
+
+define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind {
+; CHECK-LABEL: abd_ext_i128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    asr x8, x1, #63
+; CHECK-NEXT:    asr x9, x3, #63
+; CHECK-NEXT:    subs x10, x0, x2
+; CHECK-NEXT:    sbcs x11, x1, x3
+; CHECK-NEXT:    sbcs xzr, x8, x9
+; CHECK-NEXT:    sbc x8, x8, x9
+; CHECK-NEXT:    asr x8, x8, #63
+; CHECK-NEXT:    eor x9, x10, x8
+; CHECK-NEXT:    eor x10, x11, x8
+; CHECK-NEXT:    subs x0, x9, x8
+; CHECK-NEXT:    sbc x1, x10, x8
+; CHECK-NEXT:    ret
+  %aext = sext i128 %a to i256
+  %bext = sext i128 %b to i256
+  %sub = sub i256 %aext, %bext
+  %abs = call i256 @llvm.abs.i256(i256 %sub, i1 false)
+  %trunc = trunc i256 %abs to i128
+  ret i128 %trunc
+}
+
+define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind {
+; CHECK-LABEL: abd_ext_i128_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    asr x8, x1, #63
+; CHECK-NEXT:    asr x9, x3, #63
+; CHECK-NEXT:    subs x10, x0, x2
+; CHECK-NEXT:    sbcs x11, x1, x3
+; CHECK-NEXT:    sbcs xzr, x8, x9
+; CHECK-NEXT:    sbc x8, x8, x9
+; CHECK-NEXT:    asr x8, x8, #63
+; CHECK-NEXT:    eor x9, x10, x8
+; CHECK-NEXT:    eor x10, x11, x8
+; CHECK-NEXT:    subs x0, x9, x8
+; CHECK-NEXT:    sbc x1, x10, x8
+; CHECK-NEXT:    ret
+  %aext = sext i128 %a to i256
+  %bext = sext i128 %b to i256
+  %sub = sub i256 %aext, %bext
+  %abs = call i256 @llvm.abs.i256(i256 %sub, i1 true)
+  %trunc = trunc i256 %abs to i128
+  ret i128 %trunc
+}
+
+;
+; sub(smax(a,b),smin(a,b)) -> abds(a,b)
+;
+
+define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: abd_minmax_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxtb w8, w1
+; CHECK-NEXT:    sxtb w9, w0
+; CHECK-NEXT:    cmp w9, w8
+; CHECK-NEXT:    csel w10, w9, w8, lt
+; CHECK-NEXT:    csel w8, w9, w8, gt
+; CHECK-NEXT:    sub w0, w8, w10
+; CHECK-NEXT:    ret
+  %min = call i8 @llvm.smin.i8(i8 %a, i8 %b)
+  %max = call i8 @llvm.smax.i8(i8 %a, i8 %b)
+  %sub = sub i8 %max, %min
+  ret i8 %sub
+}
+
+define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_minmax_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxth w8, w1
+; CHECK-NEXT:    sxth w9, w0
+; CHECK-NEXT:    cmp w9, w8
+; CHECK-NEXT:    csel w10, w9, w8, lt
+; CHECK-NEXT:    csel w8, w9, w8, gt
+; CHECK-NEXT:    sub w0, w8, w10
+; CHECK-NEXT:    ret
+  %min = call i16 @llvm.smin.i16(i16 %a, i16 %b)
+  %max = call i16 @llvm.smax.i16(i16 %a, i16 %b)
+  %sub = sub i16 %max, %min
+  ret i16 %sub
+}
+
+define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_minmax_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    csel w8, w0, w1, lt
+; CHECK-NEXT:    csel w9, w0, w1, gt
+; CHECK-NEXT:    sub w0, w9, w8
+; CHECK-NEXT:    ret
+  %min = call i32 @llvm.smin.i32(i32 %a, i32 %b)
+  %max = call i32 @llvm.smax.i32(i32 %a, i32 %b)
+  %sub = sub i32 %max, %min
+  ret i32 %sub
+}
+
+define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: abd_minmax_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, x1
+; CHECK-NEXT:    csel x8, x0, x1, lt
+; CHECK-NEXT:    csel x9, x0, x1, gt
+; CHECK-NEXT:    sub x0, x9, x8
+; CHECK-NEXT:    ret
+  %min = call i64 @llvm.smin.i64(i64 %a, i64 %b)
+  %max = call i64 @llvm.smax.i64(i64 %a, i64 %b)
+  %sub = sub i64 %max, %min
+  ret i64 %sub
+}
+
+define i128 @abd_minmax_i128(i128 %a, i128 %b) nounwind {
+; CHECK-LABEL: abd_minmax_i128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, x2
+; CHECK-NEXT:    sbcs xzr, x1, x3
+; CHECK-NEXT:    csel x8, x1, x3, lt
+; CHECK-NEXT:    csel x9, x0, x2, lt
+; CHECK-NEXT:    cmp x2, x0
+; CHECK-NEXT:    sbcs xzr, x3, x1
+; CHECK-NEXT:    csel x10, x0, x2, lt
+; CHECK-NEXT:    csel x11, x1, x3, lt
+; CHECK-NEXT:    subs x0, x10, x9
+; CHECK-NEXT:    sbc x1, x11, x8
+; CHECK-NEXT:    ret
+  %min = call i128 @llvm.smin.i128(i128 %a, i128 %b)
+  %max = call i128 @llvm.smax.i128(i128 %a, i128 %b)
+  %sub = sub i128 %max, %min
+  ret i128 %sub
+}
+
+;
+; select(icmp(a,b),sub(a,b),sub(b,a)) -> abds(a,b)
+;
+
+define i8 @abd_cmp_i8(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: abd_cmp_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxtb w8, w0
+; CHECK-NEXT:    sub w9, w0, w1
+; CHECK-NEXT:    sub w10, w1, w0
+; CHECK-NEXT:    cmp w8, w1, sxtb
+; CHECK-NEXT:    csel w0, w9, w10, gt
+; CHECK-NEXT:    ret
+  %cmp = icmp sgt i8 %a, %b
+  %ab = sub i8 %a, %b
+  %ba = sub i8 %b, %a
+  %sel = select i1 %cmp, i8 %ab, i8 %ba
+  ret i8 %sel
+}
+
+define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_cmp_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxth w8, w0
+; CHECK-NEXT:    sub w9, w0, w1
+; CHECK-NEXT:    sub w10, w1, w0
+; CHECK-NEXT:    cmp w8, w1, sxth
+; CHECK-NEXT:    csel w0, w9, w10, ge
+; CHECK-NEXT:    ret
+  %cmp = icmp sge i16 %a, %b
+  %ab = sub i16 %a, %b
+  %ba = sub i16 %b, %a
+  %sel = select i1 %cmp, i16 %ab, i16 %ba
+  ret i16 %sel
+}
+
+define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_cmp_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub w8, w1, w0
+; CHECK-NEXT:    subs w9, w0, w1
+; CHECK-NEXT:    csel w0, w8, w9, lt
+; CHECK-NEXT:    ret
+  %cmp = icmp slt i32 %a, %b
+  %ab = sub i32 %a, %b
+  %ba = sub i32 %b, %a
+  %sel = select i1 %cmp, i32 %ba, i32 %ab
+  ret i32 %sel
+}
+
+define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: abd_cmp_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub x8, x1, x0
+; CHECK-NEXT:    subs x9, x0, x1
+; CHECK-NEXT:    csel x0, x8, x9, ge
+; CHECK-NEXT:    ret
+  %cmp = icmp sge i64 %a, %b
+  %ab = sub i64 %a, %b
+  %ba = sub i64 %b, %a
+  %sel = select i1 %cmp, i64 %ba, i64 %ab
+  ret i64 %sel
+}
+
+define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
+; CHECK-LABEL: abd_cmp_i128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, x2
+; CHECK-NEXT:    sbc x8, x1, x3
+; CHECK-NEXT:    subs x9, x2, x0
+; CHECK-NEXT:    sbc x10, x3, x1
+; CHECK-NEXT:    subs x11, x0, x2
+; CHECK-NEXT:    sbcs xzr, x1, x3
+; CHECK-NEXT:    csel x0, x9, x11, ge
+; CHECK-NEXT:    csel x1, x10, x8, ge
+; CHECK-NEXT:    ret
+  %cmp = icmp sge i128 %a, %b
+  %ab = sub i128 %a, %b
+  %ba = sub i128 %b, %a
+  %sel = select i1 %cmp, i128 %ba, i128 %ab
+  ret i128 %sel
+}
+
+;
+; abs(sub_nsw(x, y)) -> abds(a,b)
+;
+
+define i8 @abd_subnsw_i8(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: abd_subnsw_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub w8, w0, w1
+; CHECK-NEXT:    sxtb w8, w8
+; CHECK-NEXT:    cmp w8, #0
+; CHECK-NEXT:    cneg w0, w8, mi
+; CHECK-NEXT:    ret
+  %sub = sub nsw i8 %a, %b
+  %abs = call i8 @llvm.abs.i8(i8 %sub, i1 false)
+  ret i8 %abs
+}
+
+define i8 @abd_subnsw_i8_undef(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: abd_subnsw_i8_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub w8, w0, w1
+; CHECK-NEXT:    sxtb w8, w8
+; CHECK-NEXT:    cmp w8, #0
+; CHECK-NEXT:    cneg w0, w8, mi
+; CHECK-NEXT:    ret
+  %sub = sub nsw i8 %a, %b
+  %abs = call i8 @llvm.abs.i8(i8 %sub, i1 true)
+  ret i8 %abs
+}
+
+define i16 @abd_subnsw_i16(i16 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_subnsw_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub w8, w0, w1
+; CHECK-NEXT:    sxth w8, w8
+; CHECK-NEXT:    cmp w8, #0
+; CHECK-NEXT:    cneg w0, w8, mi
+; CHECK-NEXT:    ret
+  %sub = sub nsw i16 %a, %b
+  %abs = call i16 @llvm.abs.i16(i16 %sub, i1 false)
+  ret i16 %abs
+}
+
+define i16 @abd_subnsw_i16_undef(i16 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_subnsw_i16_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub w8, w0, w1
+; CHECK-NEXT:    sxth w8, w8
+; CHECK-NEXT:    cmp w8, #0
+; CHECK-NEXT:    cneg w0, w8, mi
+; CHECK-NEXT:    ret
+  %sub = sub nsw i16 %a, %b
+  %abs = call i16 @llvm.abs.i16(i16 %sub, i1 true)
+  ret i16 %abs
+}
+
+define i32 @abd_subnsw_i32(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_subnsw_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    subs w8, w0, w1
+; CHECK-NEXT:    cneg w0, w8, mi
+; CHECK-NEXT:    ret
+  %sub = sub nsw i32 %a, %b
+  %abs = call i32 @llvm.abs.i32(i32 %sub, i1 false)
+  ret i32 %abs
+}
+
+define i32 @abd_subnsw_i32_undef(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_subnsw_i32_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    subs w8, w0, w1
+; CHECK-NEXT:    cneg w0, w8, mi
+; CHECK-NEXT:    ret
+  %sub = sub nsw i32 %a, %b
+  %abs = call i32 @llvm.abs.i32(i32 %sub, i1 true)
+  ret i32 %abs
+}
+
+define i64 @abd_subnsw_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: abd_subnsw_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    subs x8, x0, x1
+; CHECK-NEXT:    cneg x0, x8, mi
+; CHECK-NEXT:    ret
+  %sub = sub nsw i64 %a, %b
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  ret i64 %abs
+}
+
+define i64 @abd_subnsw_i64_undef(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: abd_subnsw_i64_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    subs x8, x0, x1
+; CHECK-NEXT:    cneg x0, x8, mi
+; CHECK-NEXT:    ret
+  %sub = sub nsw i64 %a, %b
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  ret i64 %abs
+}
+
+define i128 @abd_subnsw_i128(i128 %a, i128 %b) nounwind {
+; CHECK-LABEL: abd_subnsw_i128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    subs x8, x0, x2
+; CHECK-NEXT:    sbc x9, x1, x3
+; CHECK-NEXT:    asr x10, x9, #63
+; CHECK-NEXT:    eor x8, x8, x10
+; CHECK-NEXT:    eor x9, x9, x10
+; CHECK-NEXT:    subs x0, x8, x10
+; CHECK-NEXT:    sbc x1, x9, x10
+; CHECK-NEXT:    ret
+  %sub = sub nsw i128 %a, %b
+  %abs = call i128 @llvm.abs.i128(i128 %sub, i1 false)
+  ret i128 %abs
+}
+
+define i128 @abd_subnsw_i128_undef(i128 %a, i128 %b) nounwind {
+; CHECK-LABEL: abd_subnsw_i128_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    subs x8, x0, x2
+; CHECK-NEXT:    sbc x9, x1, x3
+; CHECK-NEXT:    asr x10, x9, #63
+; CHECK-NEXT:    eor x8, x8, x10
+; CHECK-NEXT:    eor x9, x9, x10
+; CHECK-NEXT:    subs x0, x8, x10
+; CHECK-NEXT:    sbc x1, x9, x10
+; CHECK-NEXT:    ret
+  %sub = sub nsw i128 %a, %b
+  %abs = call i128 @llvm.abs.i128(i128 %sub, i1 true)
+  ret i128 %abs
+}
+
+;
+; negative tests
+;
+
+define i32 @abd_sub_i32(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_sub_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    subs w8, w0, w1
+; CHECK-NEXT:    cneg w0, w8, mi
+; CHECK-NEXT:    ret
+  %sub = sub i32 %a, %b
+  %abs = call i32 @llvm.abs.i32(i32 %sub, i1 false)
+  ret i32 %abs
+}
+
+
+declare i8 @llvm.abs.i8(i8, i1)
+declare i16 @llvm.abs.i16(i16, i1)
+declare i32 @llvm.abs.i32(i32, i1)
+declare i64 @llvm.abs.i64(i64, i1)
+declare i128 @llvm.abs.i128(i128, i1)
+
+declare i8 @llvm.smax.i8(i8, i8)
+declare i16 @llvm.smax.i16(i16, i16)
+declare i32 @llvm.smax.i32(i32, i32)
+declare i64 @llvm.smax.i64(i64, i64)
+
+declare i8 @llvm.smin.i8(i8, i8)
+declare i16 @llvm.smin.i16(i16, i16)
+declare i32 @llvm.smin.i32(i32, i32)
+declare i64 @llvm.smin.i64(i64, i64)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; X64: {{.*}}
+; X86: {{.*}}

diff  --git a/llvm/test/CodeGen/AArch64/abdu-neg.ll b/llvm/test/CodeGen/AArch64/abdu-neg.ll
new file mode 100644
index 0000000000000..d86a269037318
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/abdu-neg.ll
@@ -0,0 +1,446 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64 | FileCheck %s
+
+;
+; trunc(nabs(sub(zext(a),zext(b)))) -> nabds(a,b)
+;
+
+define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: abd_ext_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    and x8, x0, #0xff
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sub x8, x8, w1, uxtb
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x8, x8, mi
+; CHECK-NEXT:    neg w0, w8
+; CHECK-NEXT:    ret
+  %aext = zext i8 %a to i64
+  %bext = zext i8 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i8
+  ret i8 %trunc
+}
+
+define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_ext_i8_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    and x8, x0, #0xff
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sub x8, x8, w1, uxth
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x8, x8, mi
+; CHECK-NEXT:    neg w0, w8
+; CHECK-NEXT:    ret
+  %aext = zext i8 %a to i64
+  %bext = zext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i8
+  ret i8 %trunc
+}
+
+define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: abd_ext_i8_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    and x8, x0, #0xff
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sub x8, x8, w1, uxtb
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x8, x8, mi
+; CHECK-NEXT:    neg w0, w8
+; CHECK-NEXT:    ret
+  %aext = zext i8 %a to i64
+  %bext = zext i8 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i8
+  ret i8 %trunc
+}
+
+define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_ext_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    and x8, x0, #0xffff
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sub x8, x8, w1, uxth
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x8, x8, mi
+; CHECK-NEXT:    neg w0, w8
+; CHECK-NEXT:    ret
+  %aext = zext i16 %a to i64
+  %bext = zext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i16
+  ret i16 %trunc
+}
+
+define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_ext_i16_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    and x8, x0, #0xffff
+; CHECK-NEXT:    sub x8, x8, w1, uxtw
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x8, x8, mi
+; CHECK-NEXT:    neg w0, w8
+; CHECK-NEXT:    ret
+  %aext = zext i16 %a to i64
+  %bext = zext i32 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i16
+  ret i16 %trunc
+}
+
+define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_ext_i16_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    and x8, x0, #0xffff
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sub x8, x8, w1, uxth
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x8, x8, mi
+; CHECK-NEXT:    neg w0, w8
+; CHECK-NEXT:    ret
+  %aext = zext i16 %a to i64
+  %bext = zext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i16
+  ret i16 %trunc
+}
+
+define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_ext_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, w0
+; CHECK-NEXT:    sub x8, x8, w1, uxtw
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x8, x8, mi
+; CHECK-NEXT:    neg w0, w8
+; CHECK-NEXT:    ret
+  %aext = zext i32 %a to i64
+  %bext = zext i32 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i32
+  ret i32 %trunc
+}
+
+define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_ext_i32_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, w0
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sub x8, x8, w1, uxth
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x8, x8, mi
+; CHECK-NEXT:    neg w0, w8
+; CHECK-NEXT:    ret
+  %aext = zext i32 %a to i64
+  %bext = zext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i32
+  ret i32 %trunc
+}
+
+define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_ext_i32_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, w0
+; CHECK-NEXT:    sub x8, x8, w1, uxtw
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x8, x8, mi
+; CHECK-NEXT:    neg w0, w8
+; CHECK-NEXT:    ret
+  %aext = zext i32 %a to i64
+  %bext = zext i32 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i32
+  ret i32 %trunc
+}
+
+define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: abd_ext_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    subs x8, x0, x1
+; CHECK-NEXT:    ngc x9, xzr
+; CHECK-NEXT:    eor x8, x8, x9
+; CHECK-NEXT:    sub x0, x9, x8
+; CHECK-NEXT:    ret
+  %aext = zext i64 %a to i128
+  %bext = zext i64 %b to i128
+  %sub = sub i128 %aext, %bext
+  %abs = call i128 @llvm.abs.i128(i128 %sub, i1 false)
+  %nabs = sub i128 0, %abs
+  %trunc = trunc i128 %nabs to i64
+  ret i64 %trunc
+}
+
+define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: abd_ext_i64_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    subs x8, x0, x1
+; CHECK-NEXT:    ngc x9, xzr
+; CHECK-NEXT:    eor x8, x8, x9
+; CHECK-NEXT:    sub x0, x9, x8
+; CHECK-NEXT:    ret
+  %aext = zext i64 %a to i128
+  %bext = zext i64 %b to i128
+  %sub = sub i128 %aext, %bext
+  %abs = call i128 @llvm.abs.i128(i128 %sub, i1 true)
+  %nabs = sub i128 0, %abs
+  %trunc = trunc i128 %nabs to i64
+  ret i64 %trunc
+}
+
+define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind {
+; CHECK-LABEL: abd_ext_i128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    subs x8, x0, x2
+; CHECK-NEXT:    sbcs x9, x1, x3
+; CHECK-NEXT:    ngcs xzr, xzr
+; CHECK-NEXT:    ngc x10, xzr
+; CHECK-NEXT:    eor x8, x8, x10
+; CHECK-NEXT:    eor x9, x9, x10
+; CHECK-NEXT:    subs x8, x8, x10
+; CHECK-NEXT:    sbc x9, x9, x10
+; CHECK-NEXT:    negs x0, x8
+; CHECK-NEXT:    ngc x1, x9
+; CHECK-NEXT:    ret
+  %aext = zext i128 %a to i256
+  %bext = zext i128 %b to i256
+  %sub = sub i256 %aext, %bext
+  %abs = call i256 @llvm.abs.i256(i256 %sub, i1 false)
+  %nabs = sub i256 0, %abs
+  %trunc = trunc i256 %nabs to i128
+  ret i128 %trunc
+}
+
+define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind {
+; CHECK-LABEL: abd_ext_i128_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    subs x8, x0, x2
+; CHECK-NEXT:    sbcs x9, x1, x3
+; CHECK-NEXT:    ngcs xzr, xzr
+; CHECK-NEXT:    ngc x10, xzr
+; CHECK-NEXT:    eor x8, x8, x10
+; CHECK-NEXT:    eor x9, x9, x10
+; CHECK-NEXT:    subs x8, x8, x10
+; CHECK-NEXT:    sbc x9, x9, x10
+; CHECK-NEXT:    negs x0, x8
+; CHECK-NEXT:    ngc x1, x9
+; CHECK-NEXT:    ret
+  %aext = zext i128 %a to i256
+  %bext = zext i128 %b to i256
+  %sub = sub i256 %aext, %bext
+  %abs = call i256 @llvm.abs.i256(i256 %sub, i1 true)
+  %nabs = sub i256 0, %abs
+  %trunc = trunc i256 %nabs to i128
+  ret i128 %trunc
+}
+
+;
+; sub(umin(a,b),umax(a,b)) -> nabds(a,b)
+;
+
+define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: abd_minmax_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w8, w1, #0xff
+; CHECK-NEXT:    and w9, w0, #0xff
+; CHECK-NEXT:    cmp w9, w8
+; CHECK-NEXT:    csel w10, w9, w8, lo
+; CHECK-NEXT:    csel w8, w9, w8, hi
+; CHECK-NEXT:    sub w0, w10, w8
+; CHECK-NEXT:    ret
+  %min = call i8 @llvm.umin.i8(i8 %a, i8 %b)
+  %max = call i8 @llvm.umax.i8(i8 %a, i8 %b)
+  %sub = sub i8 %min, %max
+  ret i8 %sub
+}
+
+define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_minmax_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w8, w1, #0xffff
+; CHECK-NEXT:    and w9, w0, #0xffff
+; CHECK-NEXT:    cmp w9, w8
+; CHECK-NEXT:    csel w10, w9, w8, lo
+; CHECK-NEXT:    csel w8, w9, w8, hi
+; CHECK-NEXT:    sub w0, w10, w8
+; CHECK-NEXT:    ret
+  %min = call i16 @llvm.umin.i16(i16 %a, i16 %b)
+  %max = call i16 @llvm.umax.i16(i16 %a, i16 %b)
+  %sub = sub i16 %min, %max
+  ret i16 %sub
+}
+
+define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_minmax_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    csel w8, w0, w1, lo
+; CHECK-NEXT:    csel w9, w0, w1, hi
+; CHECK-NEXT:    sub w0, w8, w9
+; CHECK-NEXT:    ret
+  %min = call i32 @llvm.umin.i32(i32 %a, i32 %b)
+  %max = call i32 @llvm.umax.i32(i32 %a, i32 %b)
+  %sub = sub i32 %min, %max
+  ret i32 %sub
+}
+
+define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: abd_minmax_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, x1
+; CHECK-NEXT:    csel x8, x0, x1, lo
+; CHECK-NEXT:    csel x9, x0, x1, hi
+; CHECK-NEXT:    sub x0, x8, x9
+; CHECK-NEXT:    ret
+  %min = call i64 @llvm.umin.i64(i64 %a, i64 %b)
+  %max = call i64 @llvm.umax.i64(i64 %a, i64 %b)
+  %sub = sub i64 %min, %max
+  ret i64 %sub
+}
+
+define i128 @abd_minmax_i128(i128 %a, i128 %b) nounwind {
+; CHECK-LABEL: abd_minmax_i128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, x2
+; CHECK-NEXT:    sbcs xzr, x1, x3
+; CHECK-NEXT:    csel x8, x1, x3, lo
+; CHECK-NEXT:    csel x9, x0, x2, lo
+; CHECK-NEXT:    cmp x2, x0
+; CHECK-NEXT:    sbcs xzr, x3, x1
+; CHECK-NEXT:    csel x10, x0, x2, lo
+; CHECK-NEXT:    csel x11, x1, x3, lo
+; CHECK-NEXT:    subs x0, x9, x10
+; CHECK-NEXT:    sbc x1, x8, x11
+; CHECK-NEXT:    ret
+  %min = call i128 @llvm.umin.i128(i128 %a, i128 %b)
+  %max = call i128 @llvm.umax.i128(i128 %a, i128 %b)
+  %sub = sub i128 %min, %max
+  ret i128 %sub
+}
+
+;
+; select(icmp(a,b),sub(a,b),sub(b,a)) -> nabds(a,b)
+;
+
+define i8 @abd_cmp_i8(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: abd_cmp_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w8, w0, #0xff
+; CHECK-NEXT:    sub w9, w0, w1
+; CHECK-NEXT:    sub w10, w1, w0
+; CHECK-NEXT:    cmp w8, w1, uxtb
+; CHECK-NEXT:    csel w0, w9, w10, ls
+; CHECK-NEXT:    ret
+  %cmp = icmp ule i8 %a, %b
+  %ab = sub i8 %a, %b
+  %ba = sub i8 %b, %a
+  %sel = select i1 %cmp, i8 %ab, i8 %ba
+  ret i8 %sel
+}
+
+define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_cmp_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w8, w0, #0xffff
+; CHECK-NEXT:    sub w9, w0, w1
+; CHECK-NEXT:    sub w10, w1, w0
+; CHECK-NEXT:    cmp w8, w1, uxth
+; CHECK-NEXT:    csel w0, w9, w10, lo
+; CHECK-NEXT:    ret
+  %cmp = icmp ult i16 %a, %b
+  %ab = sub i16 %a, %b
+  %ba = sub i16 %b, %a
+  %sel = select i1 %cmp, i16 %ab, i16 %ba
+  ret i16 %sel
+}
+
+define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_cmp_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub w8, w1, w0
+; CHECK-NEXT:    subs w9, w0, w1
+; CHECK-NEXT:    csel w0, w8, w9, hs
+; CHECK-NEXT:    ret
+  %cmp = icmp uge i32 %a, %b
+  %ab = sub i32 %a, %b
+  %ba = sub i32 %b, %a
+  %sel = select i1 %cmp, i32 %ba, i32 %ab
+  ret i32 %sel
+}
+
+define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: abd_cmp_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub x8, x1, x0
+; CHECK-NEXT:    subs x9, x0, x1
+; CHECK-NEXT:    csel x0, x8, x9, lo
+; CHECK-NEXT:    ret
+  %cmp = icmp ult i64 %a, %b
+  %ab = sub i64 %a, %b
+  %ba = sub i64 %b, %a
+  %sel = select i1 %cmp, i64 %ba, i64 %ab
+  ret i64 %sel
+}
+
+define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
+; CHECK-LABEL: abd_cmp_i128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, x2
+; CHECK-NEXT:    sbc x8, x1, x3
+; CHECK-NEXT:    subs x9, x2, x0
+; CHECK-NEXT:    sbc x10, x3, x1
+; CHECK-NEXT:    subs x11, x0, x2
+; CHECK-NEXT:    sbcs xzr, x1, x3
+; CHECK-NEXT:    csel x0, x9, x11, lo
+; CHECK-NEXT:    csel x1, x10, x8, lo
+; CHECK-NEXT:    ret
+  %cmp = icmp ult i128 %a, %b
+  %ab = sub i128 %a, %b
+  %ba = sub i128 %b, %a
+  %sel = select i1 %cmp, i128 %ba, i128 %ab
+  ret i128 %sel
+}
+
+declare i8 @llvm.abs.i8(i8, i1)
+declare i16 @llvm.abs.i16(i16, i1)
+declare i32 @llvm.abs.i32(i32, i1)
+declare i64 @llvm.abs.i64(i64, i1)
+declare i128 @llvm.abs.i128(i128, i1)
+
+declare i8 @llvm.umax.i8(i8, i8)
+declare i16 @llvm.umax.i16(i16, i16)
+declare i32 @llvm.umax.i32(i32, i32)
+declare i64 @llvm.umax.i64(i64, i64)
+
+declare i8 @llvm.umin.i8(i8, i8)
+declare i16 @llvm.umin.i16(i16, i16)
+declare i32 @llvm.umin.i32(i32, i32)
+declare i64 @llvm.umin.i64(i64, i64)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; X64: {{.*}}
+; X86: {{.*}}

diff  --git a/llvm/test/CodeGen/AArch64/abdu.ll b/llvm/test/CodeGen/AArch64/abdu.ll
new file mode 100644
index 0000000000000..5026e1024dd56
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/abdu.ll
@@ -0,0 +1,429 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64 | FileCheck %s
+
+;
+; trunc(abs(sub(zext(a),zext(b)))) -> abdu(a,b)
+;
+
+define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: abd_ext_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    and x8, x0, #0xff
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sub x8, x8, w1, uxtb
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x0, x8, mi
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+  %aext = zext i8 %a to i64
+  %bext = zext i8 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %trunc = trunc i64 %abs to i8
+  ret i8 %trunc
+}
+
+define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_ext_i8_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    and x8, x0, #0xff
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sub x8, x8, w1, uxth
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x0, x8, mi
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+  %aext = zext i8 %a to i64
+  %bext = zext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %trunc = trunc i64 %abs to i8
+  ret i8 %trunc
+}
+
+define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: abd_ext_i8_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    and x8, x0, #0xff
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sub x8, x8, w1, uxtb
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x0, x8, mi
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+  %aext = zext i8 %a to i64
+  %bext = zext i8 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %trunc = trunc i64 %abs to i8
+  ret i8 %trunc
+}
+
+define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_ext_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    and x8, x0, #0xffff
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sub x8, x8, w1, uxth
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x0, x8, mi
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+  %aext = zext i16 %a to i64
+  %bext = zext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %trunc = trunc i64 %abs to i16
+  ret i16 %trunc
+}
+
+define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_ext_i16_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    and x8, x0, #0xffff
+; CHECK-NEXT:    sub x8, x8, w1, uxtw
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x0, x8, mi
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+  %aext = zext i16 %a to i64
+  %bext = zext i32 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %trunc = trunc i64 %abs to i16
+  ret i16 %trunc
+}
+
+define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_ext_i16_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    and x8, x0, #0xffff
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sub x8, x8, w1, uxth
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x0, x8, mi
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+  %aext = zext i16 %a to i64
+  %bext = zext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %trunc = trunc i64 %abs to i16
+  ret i16 %trunc
+}
+
+define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_ext_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, w0
+; CHECK-NEXT:    sub x8, x8, w1, uxtw
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x0, x8, mi
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+  %aext = zext i32 %a to i64
+  %bext = zext i32 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %trunc = trunc i64 %abs to i32
+  ret i32 %trunc
+}
+
+define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_ext_i32_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, w0
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sub x8, x8, w1, uxth
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x0, x8, mi
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+  %aext = zext i32 %a to i64
+  %bext = zext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %trunc = trunc i64 %abs to i32
+  ret i32 %trunc
+}
+
+define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_ext_i32_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, w0
+; CHECK-NEXT:    sub x8, x8, w1, uxtw
+; CHECK-NEXT:    cmp x8, #0
+; CHECK-NEXT:    cneg x0, x8, mi
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+  %aext = zext i32 %a to i64
+  %bext = zext i32 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %trunc = trunc i64 %abs to i32
+  ret i32 %trunc
+}
+
+define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: abd_ext_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    subs x8, x0, x1
+; CHECK-NEXT:    ngc x9, xzr
+; CHECK-NEXT:    eor x8, x8, x9
+; CHECK-NEXT:    sub x0, x8, x9
+; CHECK-NEXT:    ret
+  %aext = zext i64 %a to i128
+  %bext = zext i64 %b to i128
+  %sub = sub i128 %aext, %bext
+  %abs = call i128 @llvm.abs.i128(i128 %sub, i1 false)
+  %trunc = trunc i128 %abs to i64
+  ret i64 %trunc
+}
+
+define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: abd_ext_i64_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    subs x8, x0, x1
+; CHECK-NEXT:    ngc x9, xzr
+; CHECK-NEXT:    eor x8, x8, x9
+; CHECK-NEXT:    sub x0, x8, x9
+; CHECK-NEXT:    ret
+  %aext = zext i64 %a to i128
+  %bext = zext i64 %b to i128
+  %sub = sub i128 %aext, %bext
+  %abs = call i128 @llvm.abs.i128(i128 %sub, i1 true)
+  %trunc = trunc i128 %abs to i64
+  ret i64 %trunc
+}
+
+define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind {
+; CHECK-LABEL: abd_ext_i128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    subs x8, x0, x2
+; CHECK-NEXT:    sbcs x9, x1, x3
+; CHECK-NEXT:    ngcs xzr, xzr
+; CHECK-NEXT:    ngc x10, xzr
+; CHECK-NEXT:    eor x8, x8, x10
+; CHECK-NEXT:    eor x9, x9, x10
+; CHECK-NEXT:    subs x0, x8, x10
+; CHECK-NEXT:    sbc x1, x9, x10
+; CHECK-NEXT:    ret
+  %aext = zext i128 %a to i256
+  %bext = zext i128 %b to i256
+  %sub = sub i256 %aext, %bext
+  %abs = call i256 @llvm.abs.i256(i256 %sub, i1 false)
+  %trunc = trunc i256 %abs to i128
+  ret i128 %trunc
+}
+
+define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind {
+; CHECK-LABEL: abd_ext_i128_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    subs x8, x0, x2
+; CHECK-NEXT:    sbcs x9, x1, x3
+; CHECK-NEXT:    ngcs xzr, xzr
+; CHECK-NEXT:    ngc x10, xzr
+; CHECK-NEXT:    eor x8, x8, x10
+; CHECK-NEXT:    eor x9, x9, x10
+; CHECK-NEXT:    subs x0, x8, x10
+; CHECK-NEXT:    sbc x1, x9, x10
+; CHECK-NEXT:    ret
+  %aext = zext i128 %a to i256
+  %bext = zext i128 %b to i256
+  %sub = sub i256 %aext, %bext
+  %abs = call i256 @llvm.abs.i256(i256 %sub, i1 true)
+  %trunc = trunc i256 %abs to i128
+  ret i128 %trunc
+}
+
+;
+; sub(umax(a,b),umin(a,b)) -> abdu(a,b)
+;
+
+define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: abd_minmax_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w8, w1, #0xff
+; CHECK-NEXT:    and w9, w0, #0xff
+; CHECK-NEXT:    cmp w9, w8
+; CHECK-NEXT:    csel w10, w9, w8, lo
+; CHECK-NEXT:    csel w8, w9, w8, hi
+; CHECK-NEXT:    sub w0, w8, w10
+; CHECK-NEXT:    ret
+  %min = call i8 @llvm.umin.i8(i8 %a, i8 %b)
+  %max = call i8 @llvm.umax.i8(i8 %a, i8 %b)
+  %sub = sub i8 %max, %min
+  ret i8 %sub
+}
+
+define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_minmax_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w8, w1, #0xffff
+; CHECK-NEXT:    and w9, w0, #0xffff
+; CHECK-NEXT:    cmp w9, w8
+; CHECK-NEXT:    csel w10, w9, w8, lo
+; CHECK-NEXT:    csel w8, w9, w8, hi
+; CHECK-NEXT:    sub w0, w8, w10
+; CHECK-NEXT:    ret
+  %min = call i16 @llvm.umin.i16(i16 %a, i16 %b)
+  %max = call i16 @llvm.umax.i16(i16 %a, i16 %b)
+  %sub = sub i16 %max, %min
+  ret i16 %sub
+}
+
+define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_minmax_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    csel w8, w0, w1, lo
+; CHECK-NEXT:    csel w9, w0, w1, hi
+; CHECK-NEXT:    sub w0, w9, w8
+; CHECK-NEXT:    ret
+  %min = call i32 @llvm.umin.i32(i32 %a, i32 %b)
+  %max = call i32 @llvm.umax.i32(i32 %a, i32 %b)
+  %sub = sub i32 %max, %min
+  ret i32 %sub
+}
+
+define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: abd_minmax_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, x1
+; CHECK-NEXT:    csel x8, x0, x1, lo
+; CHECK-NEXT:    csel x9, x0, x1, hi
+; CHECK-NEXT:    sub x0, x9, x8
+; CHECK-NEXT:    ret
+  %min = call i64 @llvm.umin.i64(i64 %a, i64 %b)
+  %max = call i64 @llvm.umax.i64(i64 %a, i64 %b)
+  %sub = sub i64 %max, %min
+  ret i64 %sub
+}
+
+define i128 @abd_minmax_i128(i128 %a, i128 %b) nounwind {
+; CHECK-LABEL: abd_minmax_i128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, x2
+; CHECK-NEXT:    sbcs xzr, x1, x3
+; CHECK-NEXT:    csel x8, x1, x3, lo
+; CHECK-NEXT:    csel x9, x0, x2, lo
+; CHECK-NEXT:    cmp x2, x0
+; CHECK-NEXT:    sbcs xzr, x3, x1
+; CHECK-NEXT:    csel x10, x0, x2, lo
+; CHECK-NEXT:    csel x11, x1, x3, lo
+; CHECK-NEXT:    subs x0, x10, x9
+; CHECK-NEXT:    sbc x1, x11, x8
+; CHECK-NEXT:    ret
+  %min = call i128 @llvm.umin.i128(i128 %a, i128 %b)
+  %max = call i128 @llvm.umax.i128(i128 %a, i128 %b)
+  %sub = sub i128 %max, %min
+  ret i128 %sub
+}
+
+;
+; select(icmp(a,b),sub(a,b),sub(b,a)) -> abdu(a,b)
+;
+
+define i8 @abd_cmp_i8(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: abd_cmp_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w8, w0, #0xff
+; CHECK-NEXT:    sub w9, w0, w1
+; CHECK-NEXT:    sub w10, w1, w0
+; CHECK-NEXT:    cmp w8, w1, uxtb
+; CHECK-NEXT:    csel w0, w9, w10, hi
+; CHECK-NEXT:    ret
+  %cmp = icmp ugt i8 %a, %b
+  %ab = sub i8 %a, %b
+  %ba = sub i8 %b, %a
+  %sel = select i1 %cmp, i8 %ab, i8 %ba
+  ret i8 %sel
+}
+
+define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind {
+; CHECK-LABEL: abd_cmp_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w8, w0, #0xffff
+; CHECK-NEXT:    sub w9, w0, w1
+; CHECK-NEXT:    sub w10, w1, w0
+; CHECK-NEXT:    cmp w8, w1, uxth
+; CHECK-NEXT:    csel w0, w9, w10, hs
+; CHECK-NEXT:    ret
+  %cmp = icmp uge i16 %a, %b
+  %ab = sub i16 %a, %b
+  %ba = sub i16 %b, %a
+  %sel = select i1 %cmp, i16 %ab, i16 %ba
+  ret i16 %sel
+}
+
+define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: abd_cmp_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub w8, w1, w0
+; CHECK-NEXT:    subs w9, w0, w1
+; CHECK-NEXT:    csel w0, w8, w9, lo
+; CHECK-NEXT:    ret
+  %cmp = icmp ult i32 %a, %b
+  %ab = sub i32 %a, %b
+  %ba = sub i32 %b, %a
+  %sel = select i1 %cmp, i32 %ba, i32 %ab
+  ret i32 %sel
+}
+
+define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: abd_cmp_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub x8, x1, x0
+; CHECK-NEXT:    subs x9, x0, x1
+; CHECK-NEXT:    csel x0, x8, x9, hs
+; CHECK-NEXT:    ret
+  %cmp = icmp uge i64 %a, %b
+  %ab = sub i64 %a, %b
+  %ba = sub i64 %b, %a
+  %sel = select i1 %cmp, i64 %ba, i64 %ab
+  ret i64 %sel
+}
+
+define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
+; CHECK-LABEL: abd_cmp_i128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, x2
+; CHECK-NEXT:    sbc x8, x1, x3
+; CHECK-NEXT:    subs x9, x2, x0
+; CHECK-NEXT:    sbc x10, x3, x1
+; CHECK-NEXT:    subs x11, x0, x2
+; CHECK-NEXT:    sbcs xzr, x1, x3
+; CHECK-NEXT:    csel x0, x9, x11, hs
+; CHECK-NEXT:    csel x1, x10, x8, hs
+; CHECK-NEXT:    ret
+  %cmp = icmp uge i128 %a, %b
+  %ab = sub i128 %a, %b
+  %ba = sub i128 %b, %a
+  %sel = select i1 %cmp, i128 %ba, i128 %ab
+  ret i128 %sel
+}
+
+declare i8 @llvm.abs.i8(i8, i1)
+declare i16 @llvm.abs.i16(i16, i1)
+declare i32 @llvm.abs.i32(i32, i1)
+declare i64 @llvm.abs.i64(i64, i1)
+declare i128 @llvm.abs.i128(i128, i1)
+
+declare i8 @llvm.umax.i8(i8, i8)
+declare i16 @llvm.umax.i16(i16, i16)
+declare i32 @llvm.umax.i32(i32, i32)
+declare i64 @llvm.umax.i64(i64, i64)
+
+declare i8 @llvm.umin.i8(i8, i8)
+declare i16 @llvm.umin.i16(i16, i16)
+declare i32 @llvm.umin.i32(i32, i32)
+declare i64 @llvm.umin.i64(i64, i64)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; X64: {{.*}}
+; X86: {{.*}}

diff  --git a/llvm/test/CodeGen/RISCV/abds-neg.ll b/llvm/test/CodeGen/RISCV/abds-neg.ll
new file mode 100644
index 0000000000000..4ebb92991cfc2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/abds-neg.ll
@@ -0,0 +1,2744 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv32 | FileCheck %s --check-prefixes=RV32I
+; RUN: llc < %s -mtriple=riscv64 | FileCheck %s --check-prefixes=RV64I
+; RUN: llc < %s -mtriple=riscv32 -mattr=+zbb | FileCheck %s --check-prefixes=ZBB,RV32ZBB
+; RUN: llc < %s -mtriple=riscv64 -mattr=+zbb | FileCheck %s --check-prefixes=ZBB,RV64ZBB
+;
+; trunc(nabs(sub(sext(a),sext(b)))) -> nabds(a,b)
+;
+
+define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind {
+; RV32I-LABEL: abd_ext_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    srai a0, a0, 24
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    srai a1, a1, 24
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_ext_i8:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    sext.b a0, a0
+; ZBB-NEXT:    sext.b a1, a1
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    neg a1, a0
+; ZBB-NEXT:    min a0, a0, a1
+; ZBB-NEXT:    ret
+  %aext = sext i8 %a to i64
+  %bext = sext i8 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i8
+  ret i8 %trunc
+}
+
+define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_ext_i8_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    srai a0, a0, 24
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i8_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_ext_i8_i16:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    sext.b a0, a0
+; ZBB-NEXT:    sext.h a1, a1
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    neg a1, a0
+; ZBB-NEXT:    min a0, a0, a1
+; ZBB-NEXT:    ret
+  %aext = sext i8 %a to i64
+  %bext = sext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i8
+  ret i8 %trunc
+}
+
+define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind {
+; RV32I-LABEL: abd_ext_i8_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    srai a0, a0, 24
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    srai a1, a1, 24
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i8_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_ext_i8_undef:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    sext.b a0, a0
+; ZBB-NEXT:    sext.b a1, a1
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    neg a1, a0
+; ZBB-NEXT:    min a0, a0, a1
+; ZBB-NEXT:    ret
+  %aext = sext i8 %a to i64
+  %bext = sext i8 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i8
+  ret i8 %trunc
+}
+
+define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_ext_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_ext_i16:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    sext.h a0, a0
+; ZBB-NEXT:    sext.h a1, a1
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    neg a1, a0
+; ZBB-NEXT:    min a0, a0, a1
+; ZBB-NEXT:    ret
+  %aext = sext i16 %a to i64
+  %bext = sext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i16
+  ret i16 %trunc
+}
+
+define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_ext_i16_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srai a2, a0, 31
+; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    srai a3, a1, 31
+; RV32I-NEXT:    sltu a4, a0, a1
+; RV32I-NEXT:    sub a2, a2, a3
+; RV32I-NEXT:    sub a2, a2, a4
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    bgez a2, .LBB4_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:  .LBB4_2:
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i16_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    sext.w a1, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i16_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sext.h a0, a0
+; RV32ZBB-NEXT:    srai a2, a0, 31
+; RV32ZBB-NEXT:    srai a3, a1, 31
+; RV32ZBB-NEXT:    sltu a4, a0, a1
+; RV32ZBB-NEXT:    sub a2, a2, a3
+; RV32ZBB-NEXT:    sub a2, a2, a4
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    bgez a2, .LBB4_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:  .LBB4_2:
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i16_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sext.h a0, a0
+; RV64ZBB-NEXT:    sext.w a1, a1
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    min a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %aext = sext i16 %a to i64
+  %bext = sext i32 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i16
+  ret i16 %trunc
+}
+
+define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_ext_i16_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i16_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_ext_i16_undef:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    sext.h a0, a0
+; ZBB-NEXT:    sext.h a1, a1
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    neg a1, a0
+; ZBB-NEXT:    min a0, a0, a1
+; ZBB-NEXT:    ret
+  %aext = sext i16 %a to i64
+  %bext = sext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i16
+  ret i16 %trunc
+}
+
+define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_ext_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srai a2, a0, 31
+; RV32I-NEXT:    srai a3, a1, 31
+; RV32I-NEXT:    sltu a4, a0, a1
+; RV32I-NEXT:    sub a2, a2, a3
+; RV32I-NEXT:    sub a2, a2, a4
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    bgez a2, .LBB6_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:  .LBB6_2:
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    sext.w a1, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    srai a2, a0, 31
+; RV32ZBB-NEXT:    srai a3, a1, 31
+; RV32ZBB-NEXT:    sltu a4, a0, a1
+; RV32ZBB-NEXT:    sub a2, a2, a3
+; RV32ZBB-NEXT:    sub a2, a2, a4
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    bgez a2, .LBB6_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:  .LBB6_2:
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sext.w a0, a0
+; RV64ZBB-NEXT:    sext.w a1, a1
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    min a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %aext = sext i32 %a to i64
+  %bext = sext i32 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i32
+  ret i32 %trunc
+}
+
+define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_ext_i32_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srai a2, a0, 31
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai a3, a1, 31
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    sltu a4, a0, a1
+; RV32I-NEXT:    sub a2, a2, a3
+; RV32I-NEXT:    sub a2, a2, a4
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    bgez a2, .LBB7_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:  .LBB7_2:
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i32_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i32_i16:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    srai a2, a0, 31
+; RV32ZBB-NEXT:    sext.h a1, a1
+; RV32ZBB-NEXT:    srai a3, a1, 31
+; RV32ZBB-NEXT:    sltu a4, a0, a1
+; RV32ZBB-NEXT:    sub a2, a2, a3
+; RV32ZBB-NEXT:    sub a2, a2, a4
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    bgez a2, .LBB7_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:  .LBB7_2:
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i32_i16:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sext.w a0, a0
+; RV64ZBB-NEXT:    sext.h a1, a1
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    min a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %aext = sext i32 %a to i64
+  %bext = sext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i32
+  ret i32 %trunc
+}
+
+define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_ext_i32_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srai a2, a0, 31
+; RV32I-NEXT:    srai a3, a1, 31
+; RV32I-NEXT:    sltu a4, a0, a1
+; RV32I-NEXT:    sub a2, a2, a3
+; RV32I-NEXT:    sub a2, a2, a4
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    bgez a2, .LBB8_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:  .LBB8_2:
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i32_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    sext.w a1, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i32_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    srai a2, a0, 31
+; RV32ZBB-NEXT:    srai a3, a1, 31
+; RV32ZBB-NEXT:    sltu a4, a0, a1
+; RV32ZBB-NEXT:    sub a2, a2, a3
+; RV32ZBB-NEXT:    sub a2, a2, a4
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    bgez a2, .LBB8_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:  .LBB8_2:
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i32_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sext.w a0, a0
+; RV64ZBB-NEXT:    sext.w a1, a1
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    min a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %aext = sext i32 %a to i64
+  %bext = sext i32 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i32
+  ret i32 %trunc
+}
+
+define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: abd_ext_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srai a5, a1, 31
+; RV32I-NEXT:    sltu a4, a0, a2
+; RV32I-NEXT:    srai a6, a3, 31
+; RV32I-NEXT:    mv a7, a4
+; RV32I-NEXT:    beq a1, a3, .LBB9_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu a7, a1, a3
+; RV32I-NEXT:  .LBB9_2:
+; RV32I-NEXT:    sub t0, a5, a6
+; RV32I-NEXT:    sltu a7, t0, a7
+; RV32I-NEXT:    sltu a5, a5, a6
+; RV32I-NEXT:    sub a5, t0, a5
+; RV32I-NEXT:    sub a5, a5, a7
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    bgez a5, .LBB9_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    snez a2, a0
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:  .LBB9_4:
+; RV32I-NEXT:    snez a2, a0
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srai a2, a0, 63
+; RV64I-NEXT:    srai a3, a1, 63
+; RV64I-NEXT:    sltu a4, a0, a1
+; RV64I-NEXT:    sub a2, a2, a3
+; RV64I-NEXT:    sub a2, a2, a4
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    bgez a2, .LBB9_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:  .LBB9_2:
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i64:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    srai a5, a1, 31
+; RV32ZBB-NEXT:    sltu a4, a0, a2
+; RV32ZBB-NEXT:    srai a6, a3, 31
+; RV32ZBB-NEXT:    mv a7, a4
+; RV32ZBB-NEXT:    beq a1, a3, .LBB9_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu a7, a1, a3
+; RV32ZBB-NEXT:  .LBB9_2:
+; RV32ZBB-NEXT:    sub t0, a5, a6
+; RV32ZBB-NEXT:    sltu a7, t0, a7
+; RV32ZBB-NEXT:    sltu a5, a5, a6
+; RV32ZBB-NEXT:    sub a5, t0, a5
+; RV32ZBB-NEXT:    sub a5, a5, a7
+; RV32ZBB-NEXT:    sub a1, a1, a3
+; RV32ZBB-NEXT:    sub a1, a1, a4
+; RV32ZBB-NEXT:    sub a0, a0, a2
+; RV32ZBB-NEXT:    bgez a5, .LBB9_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    snez a2, a0
+; RV32ZBB-NEXT:    add a1, a1, a2
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:  .LBB9_4:
+; RV32ZBB-NEXT:    snez a2, a0
+; RV32ZBB-NEXT:    add a1, a1, a2
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i64:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    srai a2, a0, 63
+; RV64ZBB-NEXT:    srai a3, a1, 63
+; RV64ZBB-NEXT:    sltu a4, a0, a1
+; RV64ZBB-NEXT:    sub a2, a2, a3
+; RV64ZBB-NEXT:    sub a2, a2, a4
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    bgez a2, .LBB9_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:  .LBB9_2:
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:    ret
+  %aext = sext i64 %a to i128
+  %bext = sext i64 %b to i128
+  %sub = sub i128 %aext, %bext
+  %abs = call i128 @llvm.abs.i128(i128 %sub, i1 false)
+  %nabs = sub i128 0, %abs
+  %trunc = trunc i128 %nabs to i64
+  ret i64 %trunc
+}
+
+define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: abd_ext_i64_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srai a5, a1, 31
+; RV32I-NEXT:    sltu a4, a0, a2
+; RV32I-NEXT:    srai a6, a3, 31
+; RV32I-NEXT:    mv a7, a4
+; RV32I-NEXT:    beq a1, a3, .LBB10_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu a7, a1, a3
+; RV32I-NEXT:  .LBB10_2:
+; RV32I-NEXT:    sub t0, a5, a6
+; RV32I-NEXT:    sltu a7, t0, a7
+; RV32I-NEXT:    sltu a5, a5, a6
+; RV32I-NEXT:    sub a5, t0, a5
+; RV32I-NEXT:    sub a5, a5, a7
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    bgez a5, .LBB10_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    snez a2, a0
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:  .LBB10_4:
+; RV32I-NEXT:    snez a2, a0
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i64_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srai a2, a0, 63
+; RV64I-NEXT:    srai a3, a1, 63
+; RV64I-NEXT:    sltu a4, a0, a1
+; RV64I-NEXT:    sub a2, a2, a3
+; RV64I-NEXT:    sub a2, a2, a4
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    bgez a2, .LBB10_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:  .LBB10_2:
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i64_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    srai a5, a1, 31
+; RV32ZBB-NEXT:    sltu a4, a0, a2
+; RV32ZBB-NEXT:    srai a6, a3, 31
+; RV32ZBB-NEXT:    mv a7, a4
+; RV32ZBB-NEXT:    beq a1, a3, .LBB10_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu a7, a1, a3
+; RV32ZBB-NEXT:  .LBB10_2:
+; RV32ZBB-NEXT:    sub t0, a5, a6
+; RV32ZBB-NEXT:    sltu a7, t0, a7
+; RV32ZBB-NEXT:    sltu a5, a5, a6
+; RV32ZBB-NEXT:    sub a5, t0, a5
+; RV32ZBB-NEXT:    sub a5, a5, a7
+; RV32ZBB-NEXT:    sub a1, a1, a3
+; RV32ZBB-NEXT:    sub a1, a1, a4
+; RV32ZBB-NEXT:    sub a0, a0, a2
+; RV32ZBB-NEXT:    bgez a5, .LBB10_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    snez a2, a0
+; RV32ZBB-NEXT:    add a1, a1, a2
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:  .LBB10_4:
+; RV32ZBB-NEXT:    snez a2, a0
+; RV32ZBB-NEXT:    add a1, a1, a2
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i64_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    srai a2, a0, 63
+; RV64ZBB-NEXT:    srai a3, a1, 63
+; RV64ZBB-NEXT:    sltu a4, a0, a1
+; RV64ZBB-NEXT:    sub a2, a2, a3
+; RV64ZBB-NEXT:    sub a2, a2, a4
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    bgez a2, .LBB10_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:  .LBB10_2:
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:    ret
+  %aext = sext i64 %a to i128
+  %bext = sext i64 %b to i128
+  %sub = sub i128 %aext, %bext
+  %abs = call i128 @llvm.abs.i128(i128 %sub, i1 true)
+  %nabs = sub i128 0, %abs
+  %trunc = trunc i128 %nabs to i64
+  ret i64 %trunc
+}
+
+define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind {
+; RV32I-LABEL: abd_ext_i128:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lw a3, 0(a2)
+; RV32I-NEXT:    lw a4, 0(a1)
+; RV32I-NEXT:    lw a5, 4(a2)
+; RV32I-NEXT:    lw a6, 8(a2)
+; RV32I-NEXT:    lw a7, 8(a1)
+; RV32I-NEXT:    lw a2, 12(a2)
+; RV32I-NEXT:    lw t0, 12(a1)
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    sltu t1, a7, a6
+; RV32I-NEXT:    mv t4, t1
+; RV32I-NEXT:    beq t0, a2, .LBB11_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu t4, t0, a2
+; RV32I-NEXT:  .LBB11_2:
+; RV32I-NEXT:    sltu t2, a4, a3
+; RV32I-NEXT:    mv t3, t2
+; RV32I-NEXT:    beq a1, a5, .LBB11_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    sltu t3, a1, a5
+; RV32I-NEXT:  .LBB11_4:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    srai t5, t0, 31
+; RV32I-NEXT:    xor t6, t0, a2
+; RV32I-NEXT:    xor s0, a7, a6
+; RV32I-NEXT:    or s1, s0, t6
+; RV32I-NEXT:    srai t6, a2, 31
+; RV32I-NEXT:    mv s0, t3
+; RV32I-NEXT:    beqz s1, .LBB11_6
+; RV32I-NEXT:  # %bb.5:
+; RV32I-NEXT:    mv s0, t4
+; RV32I-NEXT:  .LBB11_6:
+; RV32I-NEXT:    sub t4, t5, t6
+; RV32I-NEXT:    sltu s0, t4, s0
+; RV32I-NEXT:    sltu t5, t5, t6
+; RV32I-NEXT:    sub t6, t4, t5
+; RV32I-NEXT:    seqz s1, t6
+; RV32I-NEXT:    and s0, s1, s0
+; RV32I-NEXT:    sltu s0, t6, s0
+; RV32I-NEXT:    sltu t4, t4, t5
+; RV32I-NEXT:    sub t4, t6, t4
+; RV32I-NEXT:    sub t4, t4, s0
+; RV32I-NEXT:    sub a2, t0, a2
+; RV32I-NEXT:    sub a2, a2, t1
+; RV32I-NEXT:    sub a6, a7, a6
+; RV32I-NEXT:    sltu a7, a6, t3
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    sub a6, a6, t3
+; RV32I-NEXT:    sub a1, a1, a5
+; RV32I-NEXT:    sub a1, a1, t2
+; RV32I-NEXT:    sub a4, a4, a3
+; RV32I-NEXT:    bgez t4, .LBB11_8
+; RV32I-NEXT:  # %bb.7:
+; RV32I-NEXT:    snez a3, a1
+; RV32I-NEXT:    snez a5, a4
+; RV32I-NEXT:    or a3, a5, a3
+; RV32I-NEXT:    neg a7, a6
+; RV32I-NEXT:    sltu t0, a7, a3
+; RV32I-NEXT:    snez a6, a6
+; RV32I-NEXT:    add a2, a2, a6
+; RV32I-NEXT:    neg a2, a2
+; RV32I-NEXT:    sub a2, a2, t0
+; RV32I-NEXT:    neg a4, a4
+; RV32I-NEXT:    add a1, a1, a5
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    sub a6, a7, a3
+; RV32I-NEXT:  .LBB11_8:
+; RV32I-NEXT:    snez a3, a6
+; RV32I-NEXT:    add a2, a2, a3
+; RV32I-NEXT:    snez a3, a1
+; RV32I-NEXT:    snez a5, a4
+; RV32I-NEXT:    or a3, a5, a3
+; RV32I-NEXT:    neg a6, a6
+; RV32I-NEXT:    sltu a7, a6, a3
+; RV32I-NEXT:    neg a2, a2
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    sub a3, a6, a3
+; RV32I-NEXT:    add a1, a1, a5
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    neg a4, a4
+; RV32I-NEXT:    sw a4, 0(a0)
+; RV32I-NEXT:    sw a1, 4(a0)
+; RV32I-NEXT:    sw a3, 8(a0)
+; RV32I-NEXT:    sw a2, 12(a0)
+; RV32I-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srai a5, a1, 63
+; RV64I-NEXT:    sltu a4, a0, a2
+; RV64I-NEXT:    srai a6, a3, 63
+; RV64I-NEXT:    mv a7, a4
+; RV64I-NEXT:    beq a1, a3, .LBB11_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sltu a7, a1, a3
+; RV64I-NEXT:  .LBB11_2:
+; RV64I-NEXT:    sub t0, a5, a6
+; RV64I-NEXT:    sltu a7, t0, a7
+; RV64I-NEXT:    sltu a5, a5, a6
+; RV64I-NEXT:    sub a5, t0, a5
+; RV64I-NEXT:    sub a5, a5, a7
+; RV64I-NEXT:    sub a1, a1, a3
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    sub a0, a0, a2
+; RV64I-NEXT:    bgez a5, .LBB11_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    snez a2, a0
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:  .LBB11_4:
+; RV64I-NEXT:    snez a2, a0
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i128:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lw a3, 0(a2)
+; RV32ZBB-NEXT:    lw a4, 0(a1)
+; RV32ZBB-NEXT:    lw a5, 4(a2)
+; RV32ZBB-NEXT:    lw a6, 8(a2)
+; RV32ZBB-NEXT:    lw a7, 8(a1)
+; RV32ZBB-NEXT:    lw a2, 12(a2)
+; RV32ZBB-NEXT:    lw t0, 12(a1)
+; RV32ZBB-NEXT:    lw a1, 4(a1)
+; RV32ZBB-NEXT:    sltu t1, a7, a6
+; RV32ZBB-NEXT:    mv t4, t1
+; RV32ZBB-NEXT:    beq t0, a2, .LBB11_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu t4, t0, a2
+; RV32ZBB-NEXT:  .LBB11_2:
+; RV32ZBB-NEXT:    sltu t2, a4, a3
+; RV32ZBB-NEXT:    mv t3, t2
+; RV32ZBB-NEXT:    beq a1, a5, .LBB11_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    sltu t3, a1, a5
+; RV32ZBB-NEXT:  .LBB11_4:
+; RV32ZBB-NEXT:    addi sp, sp, -16
+; RV32ZBB-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT:    sw s1, 8(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT:    srai t5, t0, 31
+; RV32ZBB-NEXT:    xor t6, t0, a2
+; RV32ZBB-NEXT:    xor s0, a7, a6
+; RV32ZBB-NEXT:    or s1, s0, t6
+; RV32ZBB-NEXT:    srai t6, a2, 31
+; RV32ZBB-NEXT:    mv s0, t3
+; RV32ZBB-NEXT:    beqz s1, .LBB11_6
+; RV32ZBB-NEXT:  # %bb.5:
+; RV32ZBB-NEXT:    mv s0, t4
+; RV32ZBB-NEXT:  .LBB11_6:
+; RV32ZBB-NEXT:    sub t4, t5, t6
+; RV32ZBB-NEXT:    sltu s0, t4, s0
+; RV32ZBB-NEXT:    sltu t5, t5, t6
+; RV32ZBB-NEXT:    sub t6, t4, t5
+; RV32ZBB-NEXT:    seqz s1, t6
+; RV32ZBB-NEXT:    and s0, s1, s0
+; RV32ZBB-NEXT:    sltu s0, t6, s0
+; RV32ZBB-NEXT:    sltu t4, t4, t5
+; RV32ZBB-NEXT:    sub t4, t6, t4
+; RV32ZBB-NEXT:    sub t4, t4, s0
+; RV32ZBB-NEXT:    sub a2, t0, a2
+; RV32ZBB-NEXT:    sub a2, a2, t1
+; RV32ZBB-NEXT:    sub a6, a7, a6
+; RV32ZBB-NEXT:    sltu a7, a6, t3
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    sub a6, a6, t3
+; RV32ZBB-NEXT:    sub a1, a1, a5
+; RV32ZBB-NEXT:    sub a1, a1, t2
+; RV32ZBB-NEXT:    sub a4, a4, a3
+; RV32ZBB-NEXT:    bgez t4, .LBB11_8
+; RV32ZBB-NEXT:  # %bb.7:
+; RV32ZBB-NEXT:    snez a3, a1
+; RV32ZBB-NEXT:    snez a5, a4
+; RV32ZBB-NEXT:    or a3, a5, a3
+; RV32ZBB-NEXT:    neg a7, a6
+; RV32ZBB-NEXT:    sltu t0, a7, a3
+; RV32ZBB-NEXT:    snez a6, a6
+; RV32ZBB-NEXT:    add a2, a2, a6
+; RV32ZBB-NEXT:    neg a2, a2
+; RV32ZBB-NEXT:    sub a2, a2, t0
+; RV32ZBB-NEXT:    neg a4, a4
+; RV32ZBB-NEXT:    add a1, a1, a5
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:    sub a6, a7, a3
+; RV32ZBB-NEXT:  .LBB11_8:
+; RV32ZBB-NEXT:    snez a3, a6
+; RV32ZBB-NEXT:    add a2, a2, a3
+; RV32ZBB-NEXT:    snez a3, a1
+; RV32ZBB-NEXT:    snez a5, a4
+; RV32ZBB-NEXT:    or a3, a5, a3
+; RV32ZBB-NEXT:    neg a6, a6
+; RV32ZBB-NEXT:    sltu a7, a6, a3
+; RV32ZBB-NEXT:    neg a2, a2
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    sub a3, a6, a3
+; RV32ZBB-NEXT:    add a1, a1, a5
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:    neg a4, a4
+; RV32ZBB-NEXT:    sw a4, 0(a0)
+; RV32ZBB-NEXT:    sw a1, 4(a0)
+; RV32ZBB-NEXT:    sw a3, 8(a0)
+; RV32ZBB-NEXT:    sw a2, 12(a0)
+; RV32ZBB-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT:    addi sp, sp, 16
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i128:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    srai a5, a1, 63
+; RV64ZBB-NEXT:    sltu a4, a0, a2
+; RV64ZBB-NEXT:    srai a6, a3, 63
+; RV64ZBB-NEXT:    mv a7, a4
+; RV64ZBB-NEXT:    beq a1, a3, .LBB11_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    sltu a7, a1, a3
+; RV64ZBB-NEXT:  .LBB11_2:
+; RV64ZBB-NEXT:    sub t0, a5, a6
+; RV64ZBB-NEXT:    sltu a7, t0, a7
+; RV64ZBB-NEXT:    sltu a5, a5, a6
+; RV64ZBB-NEXT:    sub a5, t0, a5
+; RV64ZBB-NEXT:    sub a5, a5, a7
+; RV64ZBB-NEXT:    sub a1, a1, a3
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a0, a0, a2
+; RV64ZBB-NEXT:    bgez a5, .LBB11_4
+; RV64ZBB-NEXT:  # %bb.3:
+; RV64ZBB-NEXT:    snez a2, a0
+; RV64ZBB-NEXT:    add a1, a1, a2
+; RV64ZBB-NEXT:    neg a1, a1
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:  .LBB11_4:
+; RV64ZBB-NEXT:    snez a2, a0
+; RV64ZBB-NEXT:    add a1, a1, a2
+; RV64ZBB-NEXT:    neg a1, a1
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:    ret
+  %aext = sext i128 %a to i256
+  %bext = sext i128 %b to i256
+  %sub = sub i256 %aext, %bext
+  %abs = call i256 @llvm.abs.i256(i256 %sub, i1 false)
+  %nabs = sub i256 0, %abs
+  %trunc = trunc i256 %nabs to i128
+  ret i128 %trunc
+}
+
+define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind {
+; RV32I-LABEL: abd_ext_i128_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lw a3, 0(a2)
+; RV32I-NEXT:    lw a4, 0(a1)
+; RV32I-NEXT:    lw a5, 4(a2)
+; RV32I-NEXT:    lw a6, 8(a2)
+; RV32I-NEXT:    lw a7, 8(a1)
+; RV32I-NEXT:    lw a2, 12(a2)
+; RV32I-NEXT:    lw t0, 12(a1)
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    sltu t1, a7, a6
+; RV32I-NEXT:    mv t4, t1
+; RV32I-NEXT:    beq t0, a2, .LBB12_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu t4, t0, a2
+; RV32I-NEXT:  .LBB12_2:
+; RV32I-NEXT:    sltu t2, a4, a3
+; RV32I-NEXT:    mv t3, t2
+; RV32I-NEXT:    beq a1, a5, .LBB12_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    sltu t3, a1, a5
+; RV32I-NEXT:  .LBB12_4:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    srai t5, t0, 31
+; RV32I-NEXT:    xor t6, t0, a2
+; RV32I-NEXT:    xor s0, a7, a6
+; RV32I-NEXT:    or s1, s0, t6
+; RV32I-NEXT:    srai t6, a2, 31
+; RV32I-NEXT:    mv s0, t3
+; RV32I-NEXT:    beqz s1, .LBB12_6
+; RV32I-NEXT:  # %bb.5:
+; RV32I-NEXT:    mv s0, t4
+; RV32I-NEXT:  .LBB12_6:
+; RV32I-NEXT:    sub t4, t5, t6
+; RV32I-NEXT:    sltu s0, t4, s0
+; RV32I-NEXT:    sltu t5, t5, t6
+; RV32I-NEXT:    sub t6, t4, t5
+; RV32I-NEXT:    seqz s1, t6
+; RV32I-NEXT:    and s0, s1, s0
+; RV32I-NEXT:    sltu s0, t6, s0
+; RV32I-NEXT:    sltu t4, t4, t5
+; RV32I-NEXT:    sub t4, t6, t4
+; RV32I-NEXT:    sub t4, t4, s0
+; RV32I-NEXT:    sub a2, t0, a2
+; RV32I-NEXT:    sub a2, a2, t1
+; RV32I-NEXT:    sub a6, a7, a6
+; RV32I-NEXT:    sltu a7, a6, t3
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    sub a6, a6, t3
+; RV32I-NEXT:    sub a1, a1, a5
+; RV32I-NEXT:    sub a1, a1, t2
+; RV32I-NEXT:    sub a4, a4, a3
+; RV32I-NEXT:    bgez t4, .LBB12_8
+; RV32I-NEXT:  # %bb.7:
+; RV32I-NEXT:    snez a3, a1
+; RV32I-NEXT:    snez a5, a4
+; RV32I-NEXT:    or a3, a5, a3
+; RV32I-NEXT:    neg a7, a6
+; RV32I-NEXT:    sltu t0, a7, a3
+; RV32I-NEXT:    snez a6, a6
+; RV32I-NEXT:    add a2, a2, a6
+; RV32I-NEXT:    neg a2, a2
+; RV32I-NEXT:    sub a2, a2, t0
+; RV32I-NEXT:    neg a4, a4
+; RV32I-NEXT:    add a1, a1, a5
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    sub a6, a7, a3
+; RV32I-NEXT:  .LBB12_8:
+; RV32I-NEXT:    snez a3, a6
+; RV32I-NEXT:    add a2, a2, a3
+; RV32I-NEXT:    snez a3, a1
+; RV32I-NEXT:    snez a5, a4
+; RV32I-NEXT:    or a3, a5, a3
+; RV32I-NEXT:    neg a6, a6
+; RV32I-NEXT:    sltu a7, a6, a3
+; RV32I-NEXT:    neg a2, a2
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    sub a3, a6, a3
+; RV32I-NEXT:    add a1, a1, a5
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    neg a4, a4
+; RV32I-NEXT:    sw a4, 0(a0)
+; RV32I-NEXT:    sw a1, 4(a0)
+; RV32I-NEXT:    sw a3, 8(a0)
+; RV32I-NEXT:    sw a2, 12(a0)
+; RV32I-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i128_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srai a5, a1, 63
+; RV64I-NEXT:    sltu a4, a0, a2
+; RV64I-NEXT:    srai a6, a3, 63
+; RV64I-NEXT:    mv a7, a4
+; RV64I-NEXT:    beq a1, a3, .LBB12_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sltu a7, a1, a3
+; RV64I-NEXT:  .LBB12_2:
+; RV64I-NEXT:    sub t0, a5, a6
+; RV64I-NEXT:    sltu a7, t0, a7
+; RV64I-NEXT:    sltu a5, a5, a6
+; RV64I-NEXT:    sub a5, t0, a5
+; RV64I-NEXT:    sub a5, a5, a7
+; RV64I-NEXT:    sub a1, a1, a3
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    sub a0, a0, a2
+; RV64I-NEXT:    bgez a5, .LBB12_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    snez a2, a0
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:  .LBB12_4:
+; RV64I-NEXT:    snez a2, a0
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i128_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lw a3, 0(a2)
+; RV32ZBB-NEXT:    lw a4, 0(a1)
+; RV32ZBB-NEXT:    lw a5, 4(a2)
+; RV32ZBB-NEXT:    lw a6, 8(a2)
+; RV32ZBB-NEXT:    lw a7, 8(a1)
+; RV32ZBB-NEXT:    lw a2, 12(a2)
+; RV32ZBB-NEXT:    lw t0, 12(a1)
+; RV32ZBB-NEXT:    lw a1, 4(a1)
+; RV32ZBB-NEXT:    sltu t1, a7, a6
+; RV32ZBB-NEXT:    mv t4, t1
+; RV32ZBB-NEXT:    beq t0, a2, .LBB12_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu t4, t0, a2
+; RV32ZBB-NEXT:  .LBB12_2:
+; RV32ZBB-NEXT:    sltu t2, a4, a3
+; RV32ZBB-NEXT:    mv t3, t2
+; RV32ZBB-NEXT:    beq a1, a5, .LBB12_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    sltu t3, a1, a5
+; RV32ZBB-NEXT:  .LBB12_4:
+; RV32ZBB-NEXT:    addi sp, sp, -16
+; RV32ZBB-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT:    sw s1, 8(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT:    srai t5, t0, 31
+; RV32ZBB-NEXT:    xor t6, t0, a2
+; RV32ZBB-NEXT:    xor s0, a7, a6
+; RV32ZBB-NEXT:    or s1, s0, t6
+; RV32ZBB-NEXT:    srai t6, a2, 31
+; RV32ZBB-NEXT:    mv s0, t3
+; RV32ZBB-NEXT:    beqz s1, .LBB12_6
+; RV32ZBB-NEXT:  # %bb.5:
+; RV32ZBB-NEXT:    mv s0, t4
+; RV32ZBB-NEXT:  .LBB12_6:
+; RV32ZBB-NEXT:    sub t4, t5, t6
+; RV32ZBB-NEXT:    sltu s0, t4, s0
+; RV32ZBB-NEXT:    sltu t5, t5, t6
+; RV32ZBB-NEXT:    sub t6, t4, t5
+; RV32ZBB-NEXT:    seqz s1, t6
+; RV32ZBB-NEXT:    and s0, s1, s0
+; RV32ZBB-NEXT:    sltu s0, t6, s0
+; RV32ZBB-NEXT:    sltu t4, t4, t5
+; RV32ZBB-NEXT:    sub t4, t6, t4
+; RV32ZBB-NEXT:    sub t4, t4, s0
+; RV32ZBB-NEXT:    sub a2, t0, a2
+; RV32ZBB-NEXT:    sub a2, a2, t1
+; RV32ZBB-NEXT:    sub a6, a7, a6
+; RV32ZBB-NEXT:    sltu a7, a6, t3
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    sub a6, a6, t3
+; RV32ZBB-NEXT:    sub a1, a1, a5
+; RV32ZBB-NEXT:    sub a1, a1, t2
+; RV32ZBB-NEXT:    sub a4, a4, a3
+; RV32ZBB-NEXT:    bgez t4, .LBB12_8
+; RV32ZBB-NEXT:  # %bb.7:
+; RV32ZBB-NEXT:    snez a3, a1
+; RV32ZBB-NEXT:    snez a5, a4
+; RV32ZBB-NEXT:    or a3, a5, a3
+; RV32ZBB-NEXT:    neg a7, a6
+; RV32ZBB-NEXT:    sltu t0, a7, a3
+; RV32ZBB-NEXT:    snez a6, a6
+; RV32ZBB-NEXT:    add a2, a2, a6
+; RV32ZBB-NEXT:    neg a2, a2
+; RV32ZBB-NEXT:    sub a2, a2, t0
+; RV32ZBB-NEXT:    neg a4, a4
+; RV32ZBB-NEXT:    add a1, a1, a5
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:    sub a6, a7, a3
+; RV32ZBB-NEXT:  .LBB12_8:
+; RV32ZBB-NEXT:    snez a3, a6
+; RV32ZBB-NEXT:    add a2, a2, a3
+; RV32ZBB-NEXT:    snez a3, a1
+; RV32ZBB-NEXT:    snez a5, a4
+; RV32ZBB-NEXT:    or a3, a5, a3
+; RV32ZBB-NEXT:    neg a6, a6
+; RV32ZBB-NEXT:    sltu a7, a6, a3
+; RV32ZBB-NEXT:    neg a2, a2
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    sub a3, a6, a3
+; RV32ZBB-NEXT:    add a1, a1, a5
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:    neg a4, a4
+; RV32ZBB-NEXT:    sw a4, 0(a0)
+; RV32ZBB-NEXT:    sw a1, 4(a0)
+; RV32ZBB-NEXT:    sw a3, 8(a0)
+; RV32ZBB-NEXT:    sw a2, 12(a0)
+; RV32ZBB-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT:    addi sp, sp, 16
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i128_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    srai a5, a1, 63
+; RV64ZBB-NEXT:    sltu a4, a0, a2
+; RV64ZBB-NEXT:    srai a6, a3, 63
+; RV64ZBB-NEXT:    mv a7, a4
+; RV64ZBB-NEXT:    beq a1, a3, .LBB12_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    sltu a7, a1, a3
+; RV64ZBB-NEXT:  .LBB12_2:
+; RV64ZBB-NEXT:    sub t0, a5, a6
+; RV64ZBB-NEXT:    sltu a7, t0, a7
+; RV64ZBB-NEXT:    sltu a5, a5, a6
+; RV64ZBB-NEXT:    sub a5, t0, a5
+; RV64ZBB-NEXT:    sub a5, a5, a7
+; RV64ZBB-NEXT:    sub a1, a1, a3
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a0, a0, a2
+; RV64ZBB-NEXT:    bgez a5, .LBB12_4
+; RV64ZBB-NEXT:  # %bb.3:
+; RV64ZBB-NEXT:    snez a2, a0
+; RV64ZBB-NEXT:    add a1, a1, a2
+; RV64ZBB-NEXT:    neg a1, a1
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:  .LBB12_4:
+; RV64ZBB-NEXT:    snez a2, a0
+; RV64ZBB-NEXT:    add a1, a1, a2
+; RV64ZBB-NEXT:    neg a1, a1
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:    ret
+  %aext = sext i128 %a to i256
+  %bext = sext i128 %b to i256
+  %sub = sub i256 %aext, %bext
+  %abs = call i256 @llvm.abs.i256(i256 %sub, i1 true)
+  %nabs = sub i256 0, %abs
+  %trunc = trunc i256 %nabs to i128
+  ret i128 %trunc
+}
+
+;
+; sub(smin(a,b),smax(a,b)) -> nabds(a,b)
+;
+
+define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind {
+; RV32I-LABEL: abd_minmax_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    srai a1, a1, 24
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    srai a0, a0, 24
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bge a0, a1, .LBB13_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    bge a1, a0, .LBB13_4
+; RV32I-NEXT:  .LBB13_2:
+; RV32I-NEXT:    sub a0, a2, a0
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB13_3:
+; RV32I-NEXT:    mv a2, a1
+; RV32I-NEXT:    blt a1, a0, .LBB13_2
+; RV32I-NEXT:  .LBB13_4:
+; RV32I-NEXT:    sub a0, a2, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_minmax_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge a0, a1, .LBB13_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bge a1, a0, .LBB13_4
+; RV64I-NEXT:  .LBB13_2:
+; RV64I-NEXT:    sub a0, a2, a0
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB13_3:
+; RV64I-NEXT:    mv a2, a1
+; RV64I-NEXT:    blt a1, a0, .LBB13_2
+; RV64I-NEXT:  .LBB13_4:
+; RV64I-NEXT:    sub a0, a2, a1
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_minmax_i8:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    sext.b a1, a1
+; ZBB-NEXT:    sext.b a0, a0
+; ZBB-NEXT:    min a2, a0, a1
+; ZBB-NEXT:    max a0, a0, a1
+; ZBB-NEXT:    sub a0, a2, a0
+; ZBB-NEXT:    ret
+  %min = call i8 @llvm.smin.i8(i8 %a, i8 %b)
+  %max = call i8 @llvm.smax.i8(i8 %a, i8 %b)
+  %sub = sub i8 %min, %max
+  ret i8 %sub
+}
+
+define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_minmax_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bge a0, a1, .LBB14_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    bge a1, a0, .LBB14_4
+; RV32I-NEXT:  .LBB14_2:
+; RV32I-NEXT:    sub a0, a2, a0
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB14_3:
+; RV32I-NEXT:    mv a2, a1
+; RV32I-NEXT:    blt a1, a0, .LBB14_2
+; RV32I-NEXT:  .LBB14_4:
+; RV32I-NEXT:    sub a0, a2, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_minmax_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge a0, a1, .LBB14_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bge a1, a0, .LBB14_4
+; RV64I-NEXT:  .LBB14_2:
+; RV64I-NEXT:    sub a0, a2, a0
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB14_3:
+; RV64I-NEXT:    mv a2, a1
+; RV64I-NEXT:    blt a1, a0, .LBB14_2
+; RV64I-NEXT:  .LBB14_4:
+; RV64I-NEXT:    sub a0, a2, a1
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_minmax_i16:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    sext.h a1, a1
+; ZBB-NEXT:    sext.h a0, a0
+; ZBB-NEXT:    min a2, a0, a1
+; ZBB-NEXT:    max a0, a0, a1
+; ZBB-NEXT:    sub a0, a2, a0
+; ZBB-NEXT:    ret
+  %min = call i16 @llvm.smin.i16(i16 %a, i16 %b)
+  %max = call i16 @llvm.smax.i16(i16 %a, i16 %b)
+  %sub = sub i16 %min, %max
+  ret i16 %sub
+}
+
+define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_minmax_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bge a0, a1, .LBB15_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    bge a1, a0, .LBB15_4
+; RV32I-NEXT:  .LBB15_2:
+; RV32I-NEXT:    sub a0, a2, a0
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB15_3:
+; RV32I-NEXT:    mv a2, a1
+; RV32I-NEXT:    blt a1, a0, .LBB15_2
+; RV32I-NEXT:  .LBB15_4:
+; RV32I-NEXT:    sub a0, a2, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_minmax_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a1, a1
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge a0, a1, .LBB15_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bge a1, a0, .LBB15_4
+; RV64I-NEXT:  .LBB15_2:
+; RV64I-NEXT:    subw a0, a2, a0
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB15_3:
+; RV64I-NEXT:    mv a2, a1
+; RV64I-NEXT:    blt a1, a0, .LBB15_2
+; RV64I-NEXT:  .LBB15_4:
+; RV64I-NEXT:    subw a0, a2, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_minmax_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    min a2, a0, a1
+; RV32ZBB-NEXT:    max a0, a0, a1
+; RV32ZBB-NEXT:    sub a0, a2, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_minmax_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sext.w a1, a1
+; RV64ZBB-NEXT:    sext.w a0, a0
+; RV64ZBB-NEXT:    min a2, a0, a1
+; RV64ZBB-NEXT:    max a0, a0, a1
+; RV64ZBB-NEXT:    subw a0, a2, a0
+; RV64ZBB-NEXT:    ret
+  %min = call i32 @llvm.smin.i32(i32 %a, i32 %b)
+  %max = call i32 @llvm.smax.i32(i32 %a, i32 %b)
+  %sub = sub i32 %min, %max
+  ret i32 %sub
+}
+
+define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: abd_minmax_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    beq a1, a3, .LBB16_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    slt a6, a1, a3
+; RV32I-NEXT:    j .LBB16_3
+; RV32I-NEXT:  .LBB16_2:
+; RV32I-NEXT:    sltu a6, a0, a2
+; RV32I-NEXT:  .LBB16_3:
+; RV32I-NEXT:    mv a4, a1
+; RV32I-NEXT:    mv a5, a0
+; RV32I-NEXT:    bnez a6, .LBB16_5
+; RV32I-NEXT:  # %bb.4:
+; RV32I-NEXT:    mv a4, a3
+; RV32I-NEXT:    mv a5, a2
+; RV32I-NEXT:  .LBB16_5:
+; RV32I-NEXT:    beq a1, a3, .LBB16_7
+; RV32I-NEXT:  # %bb.6:
+; RV32I-NEXT:    slt a6, a3, a1
+; RV32I-NEXT:    beqz a6, .LBB16_8
+; RV32I-NEXT:    j .LBB16_9
+; RV32I-NEXT:  .LBB16_7:
+; RV32I-NEXT:    sltu a6, a2, a0
+; RV32I-NEXT:    bnez a6, .LBB16_9
+; RV32I-NEXT:  .LBB16_8:
+; RV32I-NEXT:    mv a1, a3
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:  .LBB16_9:
+; RV32I-NEXT:    sltu a2, a5, a0
+; RV32I-NEXT:    sub a1, a4, a1
+; RV32I-NEXT:    sub a1, a1, a2
+; RV32I-NEXT:    sub a0, a5, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_minmax_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge a0, a1, .LBB16_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bge a1, a0, .LBB16_4
+; RV64I-NEXT:  .LBB16_2:
+; RV64I-NEXT:    sub a0, a2, a0
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB16_3:
+; RV64I-NEXT:    mv a2, a1
+; RV64I-NEXT:    blt a1, a0, .LBB16_2
+; RV64I-NEXT:  .LBB16_4:
+; RV64I-NEXT:    sub a0, a2, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_minmax_i64:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    beq a1, a3, .LBB16_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    slt a6, a1, a3
+; RV32ZBB-NEXT:    j .LBB16_3
+; RV32ZBB-NEXT:  .LBB16_2:
+; RV32ZBB-NEXT:    sltu a6, a0, a2
+; RV32ZBB-NEXT:  .LBB16_3:
+; RV32ZBB-NEXT:    mv a4, a1
+; RV32ZBB-NEXT:    mv a5, a0
+; RV32ZBB-NEXT:    bnez a6, .LBB16_5
+; RV32ZBB-NEXT:  # %bb.4:
+; RV32ZBB-NEXT:    mv a4, a3
+; RV32ZBB-NEXT:    mv a5, a2
+; RV32ZBB-NEXT:  .LBB16_5:
+; RV32ZBB-NEXT:    beq a1, a3, .LBB16_7
+; RV32ZBB-NEXT:  # %bb.6:
+; RV32ZBB-NEXT:    slt a6, a3, a1
+; RV32ZBB-NEXT:    beqz a6, .LBB16_8
+; RV32ZBB-NEXT:    j .LBB16_9
+; RV32ZBB-NEXT:  .LBB16_7:
+; RV32ZBB-NEXT:    sltu a6, a2, a0
+; RV32ZBB-NEXT:    bnez a6, .LBB16_9
+; RV32ZBB-NEXT:  .LBB16_8:
+; RV32ZBB-NEXT:    mv a1, a3
+; RV32ZBB-NEXT:    mv a0, a2
+; RV32ZBB-NEXT:  .LBB16_9:
+; RV32ZBB-NEXT:    sltu a2, a5, a0
+; RV32ZBB-NEXT:    sub a1, a4, a1
+; RV32ZBB-NEXT:    sub a1, a1, a2
+; RV32ZBB-NEXT:    sub a0, a5, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_minmax_i64:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    min a2, a0, a1
+; RV64ZBB-NEXT:    max a0, a0, a1
+; RV64ZBB-NEXT:    sub a0, a2, a0
+; RV64ZBB-NEXT:    ret
+  %min = call i64 @llvm.smin.i64(i64 %a, i64 %b)
+  %max = call i64 @llvm.smax.i64(i64 %a, i64 %b)
+  %sub = sub i64 %min, %max
+  ret i64 %sub
+}
+
+define i128 @abd_minmax_i128(i128 %a, i128 %b) nounwind {
+; RV32I-LABEL: abd_minmax_i128:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lw a6, 4(a2)
+; RV32I-NEXT:    lw a3, 4(a1)
+; RV32I-NEXT:    lw a7, 8(a2)
+; RV32I-NEXT:    lw t0, 12(a2)
+; RV32I-NEXT:    lw a5, 12(a1)
+; RV32I-NEXT:    lw a4, 8(a1)
+; RV32I-NEXT:    beq a5, t0, .LBB17_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    slt t1, a5, t0
+; RV32I-NEXT:    j .LBB17_3
+; RV32I-NEXT:  .LBB17_2:
+; RV32I-NEXT:    sltu t1, a4, a7
+; RV32I-NEXT:  .LBB17_3:
+; RV32I-NEXT:    lw t2, 0(a2)
+; RV32I-NEXT:    lw a1, 0(a1)
+; RV32I-NEXT:    beq a3, a6, .LBB17_5
+; RV32I-NEXT:  # %bb.4:
+; RV32I-NEXT:    sltu t6, a3, a6
+; RV32I-NEXT:    j .LBB17_6
+; RV32I-NEXT:  .LBB17_5:
+; RV32I-NEXT:    sltu t6, a1, t2
+; RV32I-NEXT:  .LBB17_6:
+; RV32I-NEXT:    xor a2, a5, t0
+; RV32I-NEXT:    xor t3, a4, a7
+; RV32I-NEXT:    or t5, t3, a2
+; RV32I-NEXT:    beqz t5, .LBB17_8
+; RV32I-NEXT:  # %bb.7:
+; RV32I-NEXT:    mv t6, t1
+; RV32I-NEXT:  .LBB17_8:
+; RV32I-NEXT:    mv a2, a1
+; RV32I-NEXT:    mv t1, a3
+; RV32I-NEXT:    mv t4, a5
+; RV32I-NEXT:    mv t3, a4
+; RV32I-NEXT:    bnez t6, .LBB17_10
+; RV32I-NEXT:  # %bb.9:
+; RV32I-NEXT:    mv a2, t2
+; RV32I-NEXT:    mv t1, a6
+; RV32I-NEXT:    mv t4, t0
+; RV32I-NEXT:    mv t3, a7
+; RV32I-NEXT:  .LBB17_10:
+; RV32I-NEXT:    beq a5, t0, .LBB17_12
+; RV32I-NEXT:  # %bb.11:
+; RV32I-NEXT:    slt t6, t0, a5
+; RV32I-NEXT:    j .LBB17_13
+; RV32I-NEXT:  .LBB17_12:
+; RV32I-NEXT:    sltu t6, a7, a4
+; RV32I-NEXT:  .LBB17_13:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    beq a3, a6, .LBB17_15
+; RV32I-NEXT:  # %bb.14:
+; RV32I-NEXT:    sltu s0, a6, a3
+; RV32I-NEXT:    bnez t5, .LBB17_16
+; RV32I-NEXT:    j .LBB17_17
+; RV32I-NEXT:  .LBB17_15:
+; RV32I-NEXT:    sltu s0, t2, a1
+; RV32I-NEXT:    beqz t5, .LBB17_17
+; RV32I-NEXT:  .LBB17_16:
+; RV32I-NEXT:    mv s0, t6
+; RV32I-NEXT:  .LBB17_17:
+; RV32I-NEXT:    bnez s0, .LBB17_19
+; RV32I-NEXT:  # %bb.18:
+; RV32I-NEXT:    mv a1, t2
+; RV32I-NEXT:    mv a3, a6
+; RV32I-NEXT:    mv a5, t0
+; RV32I-NEXT:    mv a4, a7
+; RV32I-NEXT:  .LBB17_19:
+; RV32I-NEXT:    sltu a6, t3, a4
+; RV32I-NEXT:    sub a7, t4, a5
+; RV32I-NEXT:    sltu a5, a2, a1
+; RV32I-NEXT:    sub a6, a7, a6
+; RV32I-NEXT:    mv a7, a5
+; RV32I-NEXT:    beq t1, a3, .LBB17_21
+; RV32I-NEXT:  # %bb.20:
+; RV32I-NEXT:    sltu a7, t1, a3
+; RV32I-NEXT:  .LBB17_21:
+; RV32I-NEXT:    sub a4, t3, a4
+; RV32I-NEXT:    sltu t0, a4, a7
+; RV32I-NEXT:    sub a6, a6, t0
+; RV32I-NEXT:    sub a4, a4, a7
+; RV32I-NEXT:    sub a3, t1, a3
+; RV32I-NEXT:    sub a3, a3, a5
+; RV32I-NEXT:    sub a2, a2, a1
+; RV32I-NEXT:    sw a2, 0(a0)
+; RV32I-NEXT:    sw a3, 4(a0)
+; RV32I-NEXT:    sw a4, 8(a0)
+; RV32I-NEXT:    sw a6, 12(a0)
+; RV32I-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_minmax_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    beq a1, a3, .LBB17_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    slt a6, a1, a3
+; RV64I-NEXT:    j .LBB17_3
+; RV64I-NEXT:  .LBB17_2:
+; RV64I-NEXT:    sltu a6, a0, a2
+; RV64I-NEXT:  .LBB17_3:
+; RV64I-NEXT:    mv a4, a1
+; RV64I-NEXT:    mv a5, a0
+; RV64I-NEXT:    bnez a6, .LBB17_5
+; RV64I-NEXT:  # %bb.4:
+; RV64I-NEXT:    mv a4, a3
+; RV64I-NEXT:    mv a5, a2
+; RV64I-NEXT:  .LBB17_5:
+; RV64I-NEXT:    beq a1, a3, .LBB17_7
+; RV64I-NEXT:  # %bb.6:
+; RV64I-NEXT:    slt a6, a3, a1
+; RV64I-NEXT:    beqz a6, .LBB17_8
+; RV64I-NEXT:    j .LBB17_9
+; RV64I-NEXT:  .LBB17_7:
+; RV64I-NEXT:    sltu a6, a2, a0
+; RV64I-NEXT:    bnez a6, .LBB17_9
+; RV64I-NEXT:  .LBB17_8:
+; RV64I-NEXT:    mv a1, a3
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:  .LBB17_9:
+; RV64I-NEXT:    sltu a2, a5, a0
+; RV64I-NEXT:    sub a1, a4, a1
+; RV64I-NEXT:    sub a1, a1, a2
+; RV64I-NEXT:    sub a0, a5, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_minmax_i128:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lw a6, 4(a2)
+; RV32ZBB-NEXT:    lw a3, 4(a1)
+; RV32ZBB-NEXT:    lw a7, 8(a2)
+; RV32ZBB-NEXT:    lw t0, 12(a2)
+; RV32ZBB-NEXT:    lw a5, 12(a1)
+; RV32ZBB-NEXT:    lw a4, 8(a1)
+; RV32ZBB-NEXT:    beq a5, t0, .LBB17_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    slt t1, a5, t0
+; RV32ZBB-NEXT:    j .LBB17_3
+; RV32ZBB-NEXT:  .LBB17_2:
+; RV32ZBB-NEXT:    sltu t1, a4, a7
+; RV32ZBB-NEXT:  .LBB17_3:
+; RV32ZBB-NEXT:    lw t2, 0(a2)
+; RV32ZBB-NEXT:    lw a1, 0(a1)
+; RV32ZBB-NEXT:    beq a3, a6, .LBB17_5
+; RV32ZBB-NEXT:  # %bb.4:
+; RV32ZBB-NEXT:    sltu t6, a3, a6
+; RV32ZBB-NEXT:    j .LBB17_6
+; RV32ZBB-NEXT:  .LBB17_5:
+; RV32ZBB-NEXT:    sltu t6, a1, t2
+; RV32ZBB-NEXT:  .LBB17_6:
+; RV32ZBB-NEXT:    xor a2, a5, t0
+; RV32ZBB-NEXT:    xor t3, a4, a7
+; RV32ZBB-NEXT:    or t5, t3, a2
+; RV32ZBB-NEXT:    beqz t5, .LBB17_8
+; RV32ZBB-NEXT:  # %bb.7:
+; RV32ZBB-NEXT:    mv t6, t1
+; RV32ZBB-NEXT:  .LBB17_8:
+; RV32ZBB-NEXT:    mv a2, a1
+; RV32ZBB-NEXT:    mv t1, a3
+; RV32ZBB-NEXT:    mv t4, a5
+; RV32ZBB-NEXT:    mv t3, a4
+; RV32ZBB-NEXT:    bnez t6, .LBB17_10
+; RV32ZBB-NEXT:  # %bb.9:
+; RV32ZBB-NEXT:    mv a2, t2
+; RV32ZBB-NEXT:    mv t1, a6
+; RV32ZBB-NEXT:    mv t4, t0
+; RV32ZBB-NEXT:    mv t3, a7
+; RV32ZBB-NEXT:  .LBB17_10:
+; RV32ZBB-NEXT:    beq a5, t0, .LBB17_12
+; RV32ZBB-NEXT:  # %bb.11:
+; RV32ZBB-NEXT:    slt t6, t0, a5
+; RV32ZBB-NEXT:    j .LBB17_13
+; RV32ZBB-NEXT:  .LBB17_12:
+; RV32ZBB-NEXT:    sltu t6, a7, a4
+; RV32ZBB-NEXT:  .LBB17_13:
+; RV32ZBB-NEXT:    addi sp, sp, -16
+; RV32ZBB-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT:    beq a3, a6, .LBB17_15
+; RV32ZBB-NEXT:  # %bb.14:
+; RV32ZBB-NEXT:    sltu s0, a6, a3
+; RV32ZBB-NEXT:    bnez t5, .LBB17_16
+; RV32ZBB-NEXT:    j .LBB17_17
+; RV32ZBB-NEXT:  .LBB17_15:
+; RV32ZBB-NEXT:    sltu s0, t2, a1
+; RV32ZBB-NEXT:    beqz t5, .LBB17_17
+; RV32ZBB-NEXT:  .LBB17_16:
+; RV32ZBB-NEXT:    mv s0, t6
+; RV32ZBB-NEXT:  .LBB17_17:
+; RV32ZBB-NEXT:    bnez s0, .LBB17_19
+; RV32ZBB-NEXT:  # %bb.18:
+; RV32ZBB-NEXT:    mv a1, t2
+; RV32ZBB-NEXT:    mv a3, a6
+; RV32ZBB-NEXT:    mv a5, t0
+; RV32ZBB-NEXT:    mv a4, a7
+; RV32ZBB-NEXT:  .LBB17_19:
+; RV32ZBB-NEXT:    sltu a6, t3, a4
+; RV32ZBB-NEXT:    sub a7, t4, a5
+; RV32ZBB-NEXT:    sltu a5, a2, a1
+; RV32ZBB-NEXT:    sub a6, a7, a6
+; RV32ZBB-NEXT:    mv a7, a5
+; RV32ZBB-NEXT:    beq t1, a3, .LBB17_21
+; RV32ZBB-NEXT:  # %bb.20:
+; RV32ZBB-NEXT:    sltu a7, t1, a3
+; RV32ZBB-NEXT:  .LBB17_21:
+; RV32ZBB-NEXT:    sub a4, t3, a4
+; RV32ZBB-NEXT:    sltu t0, a4, a7
+; RV32ZBB-NEXT:    sub a6, a6, t0
+; RV32ZBB-NEXT:    sub a4, a4, a7
+; RV32ZBB-NEXT:    sub a3, t1, a3
+; RV32ZBB-NEXT:    sub a3, a3, a5
+; RV32ZBB-NEXT:    sub a2, a2, a1
+; RV32ZBB-NEXT:    sw a2, 0(a0)
+; RV32ZBB-NEXT:    sw a3, 4(a0)
+; RV32ZBB-NEXT:    sw a4, 8(a0)
+; RV32ZBB-NEXT:    sw a6, 12(a0)
+; RV32ZBB-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT:    addi sp, sp, 16
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_minmax_i128:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    beq a1, a3, .LBB17_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    slt a6, a1, a3
+; RV64ZBB-NEXT:    j .LBB17_3
+; RV64ZBB-NEXT:  .LBB17_2:
+; RV64ZBB-NEXT:    sltu a6, a0, a2
+; RV64ZBB-NEXT:  .LBB17_3:
+; RV64ZBB-NEXT:    mv a4, a1
+; RV64ZBB-NEXT:    mv a5, a0
+; RV64ZBB-NEXT:    bnez a6, .LBB17_5
+; RV64ZBB-NEXT:  # %bb.4:
+; RV64ZBB-NEXT:    mv a4, a3
+; RV64ZBB-NEXT:    mv a5, a2
+; RV64ZBB-NEXT:  .LBB17_5:
+; RV64ZBB-NEXT:    beq a1, a3, .LBB17_7
+; RV64ZBB-NEXT:  # %bb.6:
+; RV64ZBB-NEXT:    slt a6, a3, a1
+; RV64ZBB-NEXT:    beqz a6, .LBB17_8
+; RV64ZBB-NEXT:    j .LBB17_9
+; RV64ZBB-NEXT:  .LBB17_7:
+; RV64ZBB-NEXT:    sltu a6, a2, a0
+; RV64ZBB-NEXT:    bnez a6, .LBB17_9
+; RV64ZBB-NEXT:  .LBB17_8:
+; RV64ZBB-NEXT:    mv a1, a3
+; RV64ZBB-NEXT:    mv a0, a2
+; RV64ZBB-NEXT:  .LBB17_9:
+; RV64ZBB-NEXT:    sltu a2, a5, a0
+; RV64ZBB-NEXT:    sub a1, a4, a1
+; RV64ZBB-NEXT:    sub a1, a1, a2
+; RV64ZBB-NEXT:    sub a0, a5, a0
+; RV64ZBB-NEXT:    ret
+  %min = call i128 @llvm.smin.i128(i128 %a, i128 %b)
+  %max = call i128 @llvm.smax.i128(i128 %a, i128 %b)
+  %sub = sub i128 %min, %max
+  ret i128 %sub
+}
+
+;
+; select(icmp(a,b),sub(a,b),sub(b,a)) -> nabds(a,b)
+;
+
+define i8 @abd_cmp_i8(i8 %a, i8 %b) nounwind {
+; RV32I-LABEL: abd_cmp_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a2, a0, 24
+; RV32I-NEXT:    srai a2, a2, 24
+; RV32I-NEXT:    slli a3, a1, 24
+; RV32I-NEXT:    srai a3, a3, 24
+; RV32I-NEXT:    bge a3, a2, .LBB18_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB18_2:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_cmp_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a2, a0, 56
+; RV64I-NEXT:    srai a2, a2, 56
+; RV64I-NEXT:    slli a3, a1, 56
+; RV64I-NEXT:    srai a3, a3, 56
+; RV64I-NEXT:    bge a3, a2, .LBB18_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB18_2:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_cmp_i8:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    sext.b a2, a0
+; ZBB-NEXT:    sext.b a3, a1
+; ZBB-NEXT:    bge a3, a2, .LBB18_2
+; ZBB-NEXT:  # %bb.1:
+; ZBB-NEXT:    sub a0, a1, a0
+; ZBB-NEXT:    ret
+; ZBB-NEXT:  .LBB18_2:
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    ret
+  %cmp = icmp sle i8 %a, %b
+  %ab = sub i8 %a, %b
+  %ba = sub i8 %b, %a
+  %sel = select i1 %cmp, i8 %ab, i8 %ba
+  ret i8 %sel
+}
+
+define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_cmp_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a2, a1, 16
+; RV32I-NEXT:    srai a2, a2, 16
+; RV32I-NEXT:    slli a3, a0, 16
+; RV32I-NEXT:    srai a3, a3, 16
+; RV32I-NEXT:    blt a3, a2, .LBB19_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB19_2:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_cmp_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a2, a1, 48
+; RV64I-NEXT:    srai a2, a2, 48
+; RV64I-NEXT:    slli a3, a0, 48
+; RV64I-NEXT:    srai a3, a3, 48
+; RV64I-NEXT:    blt a3, a2, .LBB19_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB19_2:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_cmp_i16:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    sext.h a2, a1
+; ZBB-NEXT:    sext.h a3, a0
+; ZBB-NEXT:    blt a3, a2, .LBB19_2
+; ZBB-NEXT:  # %bb.1:
+; ZBB-NEXT:    sub a0, a1, a0
+; ZBB-NEXT:    ret
+; ZBB-NEXT:  .LBB19_2:
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    ret
+  %cmp = icmp slt i16 %a, %b
+  %ab = sub i16 %a, %b
+  %ba = sub i16 %b, %a
+  %sel = select i1 %cmp, i16 %ab, i16 %ba
+  ret i16 %sel
+}
+
+define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_cmp_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    bge a0, a1, .LBB20_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB20_2:
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_cmp_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a2, a1
+; RV64I-NEXT:    sext.w a3, a0
+; RV64I-NEXT:    bge a3, a2, .LBB20_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB20_2:
+; RV64I-NEXT:    subw a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_cmp_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    bge a0, a1, .LBB20_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    ret
+; RV32ZBB-NEXT:  .LBB20_2:
+; RV32ZBB-NEXT:    sub a0, a1, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_cmp_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sext.w a2, a1
+; RV64ZBB-NEXT:    sext.w a3, a0
+; RV64ZBB-NEXT:    bge a3, a2, .LBB20_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    subw a0, a0, a1
+; RV64ZBB-NEXT:    ret
+; RV64ZBB-NEXT:  .LBB20_2:
+; RV64ZBB-NEXT:    subw a0, a1, a0
+; RV64ZBB-NEXT:    ret
+  %cmp = icmp sge i32 %a, %b
+  %ab = sub i32 %a, %b
+  %ba = sub i32 %b, %a
+  %sel = select i1 %cmp, i32 %ba, i32 %ab
+  ret i32 %sel
+}
+
+define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: abd_cmp_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltu a4, a0, a2
+; RV32I-NEXT:    mv a5, a4
+; RV32I-NEXT:    beq a1, a3, .LBB21_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    slt a5, a1, a3
+; RV32I-NEXT:  .LBB21_2:
+; RV32I-NEXT:    bnez a5, .LBB21_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB21_4:
+; RV32I-NEXT:    sltu a4, a2, a0
+; RV32I-NEXT:    sub a1, a3, a1
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a0, a2, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_cmp_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    blt a0, a1, .LBB21_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB21_2:
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_cmp_i64:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sltu a4, a0, a2
+; RV32ZBB-NEXT:    mv a5, a4
+; RV32ZBB-NEXT:    beq a1, a3, .LBB21_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    slt a5, a1, a3
+; RV32ZBB-NEXT:  .LBB21_2:
+; RV32ZBB-NEXT:    bnez a5, .LBB21_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    sub a1, a1, a3
+; RV32ZBB-NEXT:    sub a1, a1, a4
+; RV32ZBB-NEXT:    sub a0, a0, a2
+; RV32ZBB-NEXT:    ret
+; RV32ZBB-NEXT:  .LBB21_4:
+; RV32ZBB-NEXT:    sltu a4, a2, a0
+; RV32ZBB-NEXT:    sub a1, a3, a1
+; RV32ZBB-NEXT:    sub a1, a1, a4
+; RV32ZBB-NEXT:    sub a0, a2, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_cmp_i64:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    blt a0, a1, .LBB21_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    ret
+; RV64ZBB-NEXT:  .LBB21_2:
+; RV64ZBB-NEXT:    sub a0, a1, a0
+; RV64ZBB-NEXT:    ret
+  %cmp = icmp slt i64 %a, %b
+  %ab = sub i64 %a, %b
+  %ba = sub i64 %b, %a
+  %sel = select i1 %cmp, i64 %ba, i64 %ab
+  ret i64 %sel
+}
+
+define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
+; RV32I-LABEL: abd_cmp_i128:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lw a3, 0(a2)
+; RV32I-NEXT:    lw a4, 0(a1)
+; RV32I-NEXT:    lw a5, 4(a2)
+; RV32I-NEXT:    lw a6, 8(a2)
+; RV32I-NEXT:    lw a7, 8(a1)
+; RV32I-NEXT:    lw a2, 12(a2)
+; RV32I-NEXT:    lw t0, 12(a1)
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    sltu t1, a7, a6
+; RV32I-NEXT:    mv t4, t1
+; RV32I-NEXT:    beq t0, a2, .LBB22_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    slt t4, t0, a2
+; RV32I-NEXT:  .LBB22_2:
+; RV32I-NEXT:    sltu t2, a4, a3
+; RV32I-NEXT:    mv t3, t2
+; RV32I-NEXT:    beq a1, a5, .LBB22_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    sltu t3, a1, a5
+; RV32I-NEXT:  .LBB22_4:
+; RV32I-NEXT:    xor t5, t0, a2
+; RV32I-NEXT:    xor t6, a7, a6
+; RV32I-NEXT:    or t5, t6, t5
+; RV32I-NEXT:    mv t6, t3
+; RV32I-NEXT:    beqz t5, .LBB22_6
+; RV32I-NEXT:  # %bb.5:
+; RV32I-NEXT:    mv t6, t4
+; RV32I-NEXT:  .LBB22_6:
+; RV32I-NEXT:    sltu t4, a3, a4
+; RV32I-NEXT:    mv t5, t4
+; RV32I-NEXT:    beq a1, a5, .LBB22_8
+; RV32I-NEXT:  # %bb.7:
+; RV32I-NEXT:    sltu t5, a5, a1
+; RV32I-NEXT:  .LBB22_8:
+; RV32I-NEXT:    bnez t6, .LBB22_10
+; RV32I-NEXT:  # %bb.9:
+; RV32I-NEXT:    sub a2, t0, a2
+; RV32I-NEXT:    sub a6, a7, a6
+; RV32I-NEXT:    sub a2, a2, t1
+; RV32I-NEXT:    sltu a7, a6, t3
+; RV32I-NEXT:    sub a1, a1, a5
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    sub a6, a6, t3
+; RV32I-NEXT:    sub a1, a1, t2
+; RV32I-NEXT:    sub a3, a4, a3
+; RV32I-NEXT:    j .LBB22_11
+; RV32I-NEXT:  .LBB22_10:
+; RV32I-NEXT:    sltu t1, a6, a7
+; RV32I-NEXT:    sub a2, a2, t0
+; RV32I-NEXT:    sub a2, a2, t1
+; RV32I-NEXT:    sub a6, a6, a7
+; RV32I-NEXT:    sltu a7, a6, t5
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    sub a6, a6, t5
+; RV32I-NEXT:    sub a5, a5, a1
+; RV32I-NEXT:    sub a1, a5, t4
+; RV32I-NEXT:    sub a3, a3, a4
+; RV32I-NEXT:  .LBB22_11:
+; RV32I-NEXT:    sw a6, 8(a0)
+; RV32I-NEXT:    sw a1, 4(a0)
+; RV32I-NEXT:    sw a3, 0(a0)
+; RV32I-NEXT:    sw a2, 12(a0)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_cmp_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sltu a4, a0, a2
+; RV64I-NEXT:    mv a5, a4
+; RV64I-NEXT:    beq a1, a3, .LBB22_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    slt a5, a1, a3
+; RV64I-NEXT:  .LBB22_2:
+; RV64I-NEXT:    bnez a5, .LBB22_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    sub a1, a1, a3
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    sub a0, a0, a2
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB22_4:
+; RV64I-NEXT:    sltu a4, a2, a0
+; RV64I-NEXT:    sub a1, a3, a1
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    sub a0, a2, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_cmp_i128:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lw a3, 0(a2)
+; RV32ZBB-NEXT:    lw a4, 0(a1)
+; RV32ZBB-NEXT:    lw a5, 4(a2)
+; RV32ZBB-NEXT:    lw a6, 8(a2)
+; RV32ZBB-NEXT:    lw a7, 8(a1)
+; RV32ZBB-NEXT:    lw a2, 12(a2)
+; RV32ZBB-NEXT:    lw t0, 12(a1)
+; RV32ZBB-NEXT:    lw a1, 4(a1)
+; RV32ZBB-NEXT:    sltu t1, a7, a6
+; RV32ZBB-NEXT:    mv t4, t1
+; RV32ZBB-NEXT:    beq t0, a2, .LBB22_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    slt t4, t0, a2
+; RV32ZBB-NEXT:  .LBB22_2:
+; RV32ZBB-NEXT:    sltu t2, a4, a3
+; RV32ZBB-NEXT:    mv t3, t2
+; RV32ZBB-NEXT:    beq a1, a5, .LBB22_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    sltu t3, a1, a5
+; RV32ZBB-NEXT:  .LBB22_4:
+; RV32ZBB-NEXT:    xor t5, t0, a2
+; RV32ZBB-NEXT:    xor t6, a7, a6
+; RV32ZBB-NEXT:    or t5, t6, t5
+; RV32ZBB-NEXT:    mv t6, t3
+; RV32ZBB-NEXT:    beqz t5, .LBB22_6
+; RV32ZBB-NEXT:  # %bb.5:
+; RV32ZBB-NEXT:    mv t6, t4
+; RV32ZBB-NEXT:  .LBB22_6:
+; RV32ZBB-NEXT:    sltu t4, a3, a4
+; RV32ZBB-NEXT:    mv t5, t4
+; RV32ZBB-NEXT:    beq a1, a5, .LBB22_8
+; RV32ZBB-NEXT:  # %bb.7:
+; RV32ZBB-NEXT:    sltu t5, a5, a1
+; RV32ZBB-NEXT:  .LBB22_8:
+; RV32ZBB-NEXT:    bnez t6, .LBB22_10
+; RV32ZBB-NEXT:  # %bb.9:
+; RV32ZBB-NEXT:    sub a2, t0, a2
+; RV32ZBB-NEXT:    sub a6, a7, a6
+; RV32ZBB-NEXT:    sub a2, a2, t1
+; RV32ZBB-NEXT:    sltu a7, a6, t3
+; RV32ZBB-NEXT:    sub a1, a1, a5
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    sub a6, a6, t3
+; RV32ZBB-NEXT:    sub a1, a1, t2
+; RV32ZBB-NEXT:    sub a3, a4, a3
+; RV32ZBB-NEXT:    j .LBB22_11
+; RV32ZBB-NEXT:  .LBB22_10:
+; RV32ZBB-NEXT:    sltu t1, a6, a7
+; RV32ZBB-NEXT:    sub a2, a2, t0
+; RV32ZBB-NEXT:    sub a2, a2, t1
+; RV32ZBB-NEXT:    sub a6, a6, a7
+; RV32ZBB-NEXT:    sltu a7, a6, t5
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    sub a6, a6, t5
+; RV32ZBB-NEXT:    sub a5, a5, a1
+; RV32ZBB-NEXT:    sub a1, a5, t4
+; RV32ZBB-NEXT:    sub a3, a3, a4
+; RV32ZBB-NEXT:  .LBB22_11:
+; RV32ZBB-NEXT:    sw a6, 8(a0)
+; RV32ZBB-NEXT:    sw a1, 4(a0)
+; RV32ZBB-NEXT:    sw a3, 0(a0)
+; RV32ZBB-NEXT:    sw a2, 12(a0)
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_cmp_i128:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sltu a4, a0, a2
+; RV64ZBB-NEXT:    mv a5, a4
+; RV64ZBB-NEXT:    beq a1, a3, .LBB22_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    slt a5, a1, a3
+; RV64ZBB-NEXT:  .LBB22_2:
+; RV64ZBB-NEXT:    bnez a5, .LBB22_4
+; RV64ZBB-NEXT:  # %bb.3:
+; RV64ZBB-NEXT:    sub a1, a1, a3
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a0, a0, a2
+; RV64ZBB-NEXT:    ret
+; RV64ZBB-NEXT:  .LBB22_4:
+; RV64ZBB-NEXT:    sltu a4, a2, a0
+; RV64ZBB-NEXT:    sub a1, a3, a1
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a0, a2, a0
+; RV64ZBB-NEXT:    ret
+  %cmp = icmp slt i128 %a, %b
+  %ab = sub i128 %a, %b
+  %ba = sub i128 %b, %a
+  %sel = select i1 %cmp, i128 %ba, i128 %ab
+  ret i128 %sel
+}
+
+;
+; nabs(sub_nsw(x, y)) -> nabds(a,b)
+;
+
+define i8 @abd_subnsw_i8(i8 %a, i8 %b) nounwind {
+; RV32I-LABEL: abd_subnsw_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 24
+; RV32I-NEXT:    srai a1, a1, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_subnsw_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_subnsw_i8:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    slli a1, a0, 24
+; RV32ZBB-NEXT:    srai a1, a1, 31
+; RV32ZBB-NEXT:    xor a0, a0, a1
+; RV32ZBB-NEXT:    sub a0, a1, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_subnsw_i8:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    slli a1, a0, 56
+; RV64ZBB-NEXT:    srai a1, a1, 63
+; RV64ZBB-NEXT:    xor a0, a0, a1
+; RV64ZBB-NEXT:    sub a0, a1, a0
+; RV64ZBB-NEXT:    ret
+  %sub = sub nsw i8 %a, %b
+  %abs = call i8 @llvm.abs.i8(i8 %sub, i1 false)
+  %nabs = sub i8 0, %abs
+  ret i8 %nabs
+}
+
+define i8 @abd_subnsw_i8_undef(i8 %a, i8 %b) nounwind {
+; RV32I-LABEL: abd_subnsw_i8_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 24
+; RV32I-NEXT:    srai a1, a1, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_subnsw_i8_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_subnsw_i8_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    slli a1, a0, 24
+; RV32ZBB-NEXT:    srai a1, a1, 31
+; RV32ZBB-NEXT:    xor a0, a0, a1
+; RV32ZBB-NEXT:    sub a0, a1, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_subnsw_i8_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    slli a1, a0, 56
+; RV64ZBB-NEXT:    srai a1, a1, 63
+; RV64ZBB-NEXT:    xor a0, a0, a1
+; RV64ZBB-NEXT:    sub a0, a1, a0
+; RV64ZBB-NEXT:    ret
+  %sub = sub nsw i8 %a, %b
+  %abs = call i8 @llvm.abs.i8(i8 %sub, i1 true)
+  %nabs = sub i8 0, %abs
+  ret i8 %nabs
+}
+
+define i16 @abd_subnsw_i16(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_subnsw_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 16
+; RV32I-NEXT:    srai a1, a1, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_subnsw_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_subnsw_i16:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    slli a1, a0, 16
+; RV32ZBB-NEXT:    srai a1, a1, 31
+; RV32ZBB-NEXT:    xor a0, a0, a1
+; RV32ZBB-NEXT:    sub a0, a1, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_subnsw_i16:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    slli a1, a0, 48
+; RV64ZBB-NEXT:    srai a1, a1, 63
+; RV64ZBB-NEXT:    xor a0, a0, a1
+; RV64ZBB-NEXT:    sub a0, a1, a0
+; RV64ZBB-NEXT:    ret
+  %sub = sub nsw i16 %a, %b
+  %abs = call i16 @llvm.abs.i16(i16 %sub, i1 false)
+  %nabs = sub i16 0, %abs
+  ret i16 %nabs
+}
+
+define i16 @abd_subnsw_i16_undef(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_subnsw_i16_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 16
+; RV32I-NEXT:    srai a1, a1, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_subnsw_i16_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_subnsw_i16_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    slli a1, a0, 16
+; RV32ZBB-NEXT:    srai a1, a1, 31
+; RV32ZBB-NEXT:    xor a0, a0, a1
+; RV32ZBB-NEXT:    sub a0, a1, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_subnsw_i16_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    slli a1, a0, 48
+; RV64ZBB-NEXT:    srai a1, a1, 63
+; RV64ZBB-NEXT:    xor a0, a0, a1
+; RV64ZBB-NEXT:    sub a0, a1, a0
+; RV64ZBB-NEXT:    ret
+  %sub = sub nsw i16 %a, %b
+  %abs = call i16 @llvm.abs.i16(i16 %sub, i1 true)
+  %nabs = sub i16 0, %abs
+  ret i16 %nabs
+}
+
+define i32 @abd_subnsw_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_subnsw_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_subnsw_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    sraiw a1, a0, 31
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    subw a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_subnsw_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    neg a1, a0
+; RV32ZBB-NEXT:    min a0, a0, a1
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_subnsw_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    subw a0, a0, a1
+; RV64ZBB-NEXT:    sraiw a1, a0, 31
+; RV64ZBB-NEXT:    xor a0, a0, a1
+; RV64ZBB-NEXT:    subw a0, a1, a0
+; RV64ZBB-NEXT:    ret
+  %sub = sub nsw i32 %a, %b
+  %abs = call i32 @llvm.abs.i32(i32 %sub, i1 false)
+  %nabs = sub i32 0, %abs
+  ret i32 %nabs
+}
+
+define i32 @abd_subnsw_i32_undef(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_subnsw_i32_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_subnsw_i32_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    sraiw a1, a0, 31
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    subw a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_subnsw_i32_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    neg a1, a0
+; RV32ZBB-NEXT:    min a0, a0, a1
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_subnsw_i32_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    subw a0, a0, a1
+; RV64ZBB-NEXT:    sraiw a1, a0, 31
+; RV64ZBB-NEXT:    xor a0, a0, a1
+; RV64ZBB-NEXT:    subw a0, a1, a0
+; RV64ZBB-NEXT:    ret
+  %sub = sub nsw i32 %a, %b
+  %abs = call i32 @llvm.abs.i32(i32 %sub, i1 true)
+  %nabs = sub i32 0, %abs
+  ret i32 %nabs
+}
+
+define i64 @abd_subnsw_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: abd_subnsw_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltu a4, a0, a2
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    srai a2, a1, 31
+; RV32I-NEXT:    xor a0, a0, a2
+; RV32I-NEXT:    sltu a3, a2, a0
+; RV32I-NEXT:    xor a1, a1, a2
+; RV32I-NEXT:    sub a1, a2, a1
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sub a0, a2, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_subnsw_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_subnsw_i64:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sltu a4, a0, a2
+; RV32ZBB-NEXT:    sub a1, a1, a3
+; RV32ZBB-NEXT:    sub a1, a1, a4
+; RV32ZBB-NEXT:    sub a0, a0, a2
+; RV32ZBB-NEXT:    srai a2, a1, 31
+; RV32ZBB-NEXT:    xor a0, a0, a2
+; RV32ZBB-NEXT:    sltu a3, a2, a0
+; RV32ZBB-NEXT:    xor a1, a1, a2
+; RV32ZBB-NEXT:    sub a1, a2, a1
+; RV32ZBB-NEXT:    sub a1, a1, a3
+; RV32ZBB-NEXT:    sub a0, a2, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_subnsw_i64:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    min a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %sub = sub nsw i64 %a, %b
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  ret i64 %nabs
+}
+
+define i64 @abd_subnsw_i64_undef(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: abd_subnsw_i64_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltu a4, a0, a2
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    srai a2, a1, 31
+; RV32I-NEXT:    xor a0, a0, a2
+; RV32I-NEXT:    sltu a3, a2, a0
+; RV32I-NEXT:    xor a1, a1, a2
+; RV32I-NEXT:    sub a1, a2, a1
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sub a0, a2, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_subnsw_i64_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_subnsw_i64_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sltu a4, a0, a2
+; RV32ZBB-NEXT:    sub a1, a1, a3
+; RV32ZBB-NEXT:    sub a1, a1, a4
+; RV32ZBB-NEXT:    sub a0, a0, a2
+; RV32ZBB-NEXT:    srai a2, a1, 31
+; RV32ZBB-NEXT:    xor a0, a0, a2
+; RV32ZBB-NEXT:    sltu a3, a2, a0
+; RV32ZBB-NEXT:    xor a1, a1, a2
+; RV32ZBB-NEXT:    sub a1, a2, a1
+; RV32ZBB-NEXT:    sub a1, a1, a3
+; RV32ZBB-NEXT:    sub a0, a2, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_subnsw_i64_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    min a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %sub = sub nsw i64 %a, %b
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %nabs = sub i64 0, %abs
+  ret i64 %nabs
+}
+
+define i128 @abd_subnsw_i128(i128 %a, i128 %b) nounwind {
+; RV32I-LABEL: abd_subnsw_i128:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lw a3, 0(a2)
+; RV32I-NEXT:    lw a4, 0(a1)
+; RV32I-NEXT:    lw a7, 12(a2)
+; RV32I-NEXT:    lw a5, 8(a2)
+; RV32I-NEXT:    lw a6, 8(a1)
+; RV32I-NEXT:    lw t0, 12(a1)
+; RV32I-NEXT:    lw a2, 4(a2)
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    sltu t1, a6, a5
+; RV32I-NEXT:    sub t0, t0, a7
+; RV32I-NEXT:    sltu a7, a4, a3
+; RV32I-NEXT:    sub t1, t0, t1
+; RV32I-NEXT:    mv t0, a7
+; RV32I-NEXT:    beq a1, a2, .LBB31_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu t0, a1, a2
+; RV32I-NEXT:  .LBB31_2:
+; RV32I-NEXT:    sub a5, a6, a5
+; RV32I-NEXT:    sltu a6, a5, t0
+; RV32I-NEXT:    sub a6, t1, a6
+; RV32I-NEXT:    sub a1, a1, a2
+; RV32I-NEXT:    sub t1, a1, a7
+; RV32I-NEXT:    sub a2, a5, t0
+; RV32I-NEXT:    sub a3, a4, a3
+; RV32I-NEXT:    srai a1, a6, 31
+; RV32I-NEXT:    xor a2, a2, a1
+; RV32I-NEXT:    sltu a4, a1, a2
+; RV32I-NEXT:    xor a5, a6, a1
+; RV32I-NEXT:    sub a5, a1, a5
+; RV32I-NEXT:    sub a4, a5, a4
+; RV32I-NEXT:    xor a3, a3, a1
+; RV32I-NEXT:    sltu a5, a1, a3
+; RV32I-NEXT:    xor a6, t1, a1
+; RV32I-NEXT:    mv a7, a5
+; RV32I-NEXT:    beqz t1, .LBB31_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    sltu a7, a1, a6
+; RV32I-NEXT:  .LBB31_4:
+; RV32I-NEXT:    sub a2, a1, a2
+; RV32I-NEXT:    sltu t0, a2, a7
+; RV32I-NEXT:    sub a4, a4, t0
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    sub a6, a1, a6
+; RV32I-NEXT:    sub a5, a6, a5
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sw a1, 0(a0)
+; RV32I-NEXT:    sw a5, 4(a0)
+; RV32I-NEXT:    sw a2, 8(a0)
+; RV32I-NEXT:    sw a4, 12(a0)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_subnsw_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sltu a4, a0, a2
+; RV64I-NEXT:    sub a1, a1, a3
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    sub a0, a0, a2
+; RV64I-NEXT:    srai a2, a1, 63
+; RV64I-NEXT:    xor a0, a0, a2
+; RV64I-NEXT:    sltu a3, a2, a0
+; RV64I-NEXT:    xor a1, a1, a2
+; RV64I-NEXT:    sub a1, a2, a1
+; RV64I-NEXT:    sub a1, a1, a3
+; RV64I-NEXT:    sub a0, a2, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_subnsw_i128:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lw a3, 0(a2)
+; RV32ZBB-NEXT:    lw a4, 0(a1)
+; RV32ZBB-NEXT:    lw a7, 12(a2)
+; RV32ZBB-NEXT:    lw a5, 8(a2)
+; RV32ZBB-NEXT:    lw a6, 8(a1)
+; RV32ZBB-NEXT:    lw t0, 12(a1)
+; RV32ZBB-NEXT:    lw a2, 4(a2)
+; RV32ZBB-NEXT:    lw a1, 4(a1)
+; RV32ZBB-NEXT:    sltu t1, a6, a5
+; RV32ZBB-NEXT:    sub t0, t0, a7
+; RV32ZBB-NEXT:    sltu a7, a4, a3
+; RV32ZBB-NEXT:    sub t1, t0, t1
+; RV32ZBB-NEXT:    mv t0, a7
+; RV32ZBB-NEXT:    beq a1, a2, .LBB31_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu t0, a1, a2
+; RV32ZBB-NEXT:  .LBB31_2:
+; RV32ZBB-NEXT:    sub a5, a6, a5
+; RV32ZBB-NEXT:    sltu a6, a5, t0
+; RV32ZBB-NEXT:    sub a6, t1, a6
+; RV32ZBB-NEXT:    sub a1, a1, a2
+; RV32ZBB-NEXT:    sub t1, a1, a7
+; RV32ZBB-NEXT:    sub a2, a5, t0
+; RV32ZBB-NEXT:    sub a3, a4, a3
+; RV32ZBB-NEXT:    srai a1, a6, 31
+; RV32ZBB-NEXT:    xor a2, a2, a1
+; RV32ZBB-NEXT:    sltu a4, a1, a2
+; RV32ZBB-NEXT:    xor a5, a6, a1
+; RV32ZBB-NEXT:    sub a5, a1, a5
+; RV32ZBB-NEXT:    sub a4, a5, a4
+; RV32ZBB-NEXT:    xor a3, a3, a1
+; RV32ZBB-NEXT:    sltu a5, a1, a3
+; RV32ZBB-NEXT:    xor a6, t1, a1
+; RV32ZBB-NEXT:    mv a7, a5
+; RV32ZBB-NEXT:    beqz t1, .LBB31_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    sltu a7, a1, a6
+; RV32ZBB-NEXT:  .LBB31_4:
+; RV32ZBB-NEXT:    sub a2, a1, a2
+; RV32ZBB-NEXT:    sltu t0, a2, a7
+; RV32ZBB-NEXT:    sub a4, a4, t0
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    sub a6, a1, a6
+; RV32ZBB-NEXT:    sub a5, a6, a5
+; RV32ZBB-NEXT:    sub a1, a1, a3
+; RV32ZBB-NEXT:    sw a1, 0(a0)
+; RV32ZBB-NEXT:    sw a5, 4(a0)
+; RV32ZBB-NEXT:    sw a2, 8(a0)
+; RV32ZBB-NEXT:    sw a4, 12(a0)
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_subnsw_i128:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sltu a4, a0, a2
+; RV64ZBB-NEXT:    sub a1, a1, a3
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a0, a0, a2
+; RV64ZBB-NEXT:    srai a2, a1, 63
+; RV64ZBB-NEXT:    xor a0, a0, a2
+; RV64ZBB-NEXT:    sltu a3, a2, a0
+; RV64ZBB-NEXT:    xor a1, a1, a2
+; RV64ZBB-NEXT:    sub a1, a2, a1
+; RV64ZBB-NEXT:    sub a1, a1, a3
+; RV64ZBB-NEXT:    sub a0, a2, a0
+; RV64ZBB-NEXT:    ret
+  %sub = sub nsw i128 %a, %b
+  %abs = call i128 @llvm.abs.i128(i128 %sub, i1 false)
+  %nabs = sub i128 0, %abs
+  ret i128 %nabs
+}
+
+define i128 @abd_subnsw_i128_undef(i128 %a, i128 %b) nounwind {
+; RV32I-LABEL: abd_subnsw_i128_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lw a3, 0(a2)
+; RV32I-NEXT:    lw a4, 0(a1)
+; RV32I-NEXT:    lw a7, 12(a2)
+; RV32I-NEXT:    lw a5, 8(a2)
+; RV32I-NEXT:    lw a6, 8(a1)
+; RV32I-NEXT:    lw t0, 12(a1)
+; RV32I-NEXT:    lw a2, 4(a2)
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    sltu t1, a6, a5
+; RV32I-NEXT:    sub t0, t0, a7
+; RV32I-NEXT:    sltu a7, a4, a3
+; RV32I-NEXT:    sub t1, t0, t1
+; RV32I-NEXT:    mv t0, a7
+; RV32I-NEXT:    beq a1, a2, .LBB32_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu t0, a1, a2
+; RV32I-NEXT:  .LBB32_2:
+; RV32I-NEXT:    sub a5, a6, a5
+; RV32I-NEXT:    sltu a6, a5, t0
+; RV32I-NEXT:    sub a6, t1, a6
+; RV32I-NEXT:    sub a1, a1, a2
+; RV32I-NEXT:    sub t1, a1, a7
+; RV32I-NEXT:    sub a2, a5, t0
+; RV32I-NEXT:    sub a3, a4, a3
+; RV32I-NEXT:    srai a1, a6, 31
+; RV32I-NEXT:    xor a2, a2, a1
+; RV32I-NEXT:    sltu a4, a1, a2
+; RV32I-NEXT:    xor a5, a6, a1
+; RV32I-NEXT:    sub a5, a1, a5
+; RV32I-NEXT:    sub a4, a5, a4
+; RV32I-NEXT:    xor a3, a3, a1
+; RV32I-NEXT:    sltu a5, a1, a3
+; RV32I-NEXT:    xor a6, t1, a1
+; RV32I-NEXT:    mv a7, a5
+; RV32I-NEXT:    beqz t1, .LBB32_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    sltu a7, a1, a6
+; RV32I-NEXT:  .LBB32_4:
+; RV32I-NEXT:    sub a2, a1, a2
+; RV32I-NEXT:    sltu t0, a2, a7
+; RV32I-NEXT:    sub a4, a4, t0
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    sub a6, a1, a6
+; RV32I-NEXT:    sub a5, a6, a5
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sw a1, 0(a0)
+; RV32I-NEXT:    sw a5, 4(a0)
+; RV32I-NEXT:    sw a2, 8(a0)
+; RV32I-NEXT:    sw a4, 12(a0)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_subnsw_i128_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sltu a4, a0, a2
+; RV64I-NEXT:    sub a1, a1, a3
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    sub a0, a0, a2
+; RV64I-NEXT:    srai a2, a1, 63
+; RV64I-NEXT:    xor a0, a0, a2
+; RV64I-NEXT:    sltu a3, a2, a0
+; RV64I-NEXT:    xor a1, a1, a2
+; RV64I-NEXT:    sub a1, a2, a1
+; RV64I-NEXT:    sub a1, a1, a3
+; RV64I-NEXT:    sub a0, a2, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_subnsw_i128_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lw a3, 0(a2)
+; RV32ZBB-NEXT:    lw a4, 0(a1)
+; RV32ZBB-NEXT:    lw a7, 12(a2)
+; RV32ZBB-NEXT:    lw a5, 8(a2)
+; RV32ZBB-NEXT:    lw a6, 8(a1)
+; RV32ZBB-NEXT:    lw t0, 12(a1)
+; RV32ZBB-NEXT:    lw a2, 4(a2)
+; RV32ZBB-NEXT:    lw a1, 4(a1)
+; RV32ZBB-NEXT:    sltu t1, a6, a5
+; RV32ZBB-NEXT:    sub t0, t0, a7
+; RV32ZBB-NEXT:    sltu a7, a4, a3
+; RV32ZBB-NEXT:    sub t1, t0, t1
+; RV32ZBB-NEXT:    mv t0, a7
+; RV32ZBB-NEXT:    beq a1, a2, .LBB32_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu t0, a1, a2
+; RV32ZBB-NEXT:  .LBB32_2:
+; RV32ZBB-NEXT:    sub a5, a6, a5
+; RV32ZBB-NEXT:    sltu a6, a5, t0
+; RV32ZBB-NEXT:    sub a6, t1, a6
+; RV32ZBB-NEXT:    sub a1, a1, a2
+; RV32ZBB-NEXT:    sub t1, a1, a7
+; RV32ZBB-NEXT:    sub a2, a5, t0
+; RV32ZBB-NEXT:    sub a3, a4, a3
+; RV32ZBB-NEXT:    srai a1, a6, 31
+; RV32ZBB-NEXT:    xor a2, a2, a1
+; RV32ZBB-NEXT:    sltu a4, a1, a2
+; RV32ZBB-NEXT:    xor a5, a6, a1
+; RV32ZBB-NEXT:    sub a5, a1, a5
+; RV32ZBB-NEXT:    sub a4, a5, a4
+; RV32ZBB-NEXT:    xor a3, a3, a1
+; RV32ZBB-NEXT:    sltu a5, a1, a3
+; RV32ZBB-NEXT:    xor a6, t1, a1
+; RV32ZBB-NEXT:    mv a7, a5
+; RV32ZBB-NEXT:    beqz t1, .LBB32_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    sltu a7, a1, a6
+; RV32ZBB-NEXT:  .LBB32_4:
+; RV32ZBB-NEXT:    sub a2, a1, a2
+; RV32ZBB-NEXT:    sltu t0, a2, a7
+; RV32ZBB-NEXT:    sub a4, a4, t0
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    sub a6, a1, a6
+; RV32ZBB-NEXT:    sub a5, a6, a5
+; RV32ZBB-NEXT:    sub a1, a1, a3
+; RV32ZBB-NEXT:    sw a1, 0(a0)
+; RV32ZBB-NEXT:    sw a5, 4(a0)
+; RV32ZBB-NEXT:    sw a2, 8(a0)
+; RV32ZBB-NEXT:    sw a4, 12(a0)
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_subnsw_i128_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sltu a4, a0, a2
+; RV64ZBB-NEXT:    sub a1, a1, a3
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a0, a0, a2
+; RV64ZBB-NEXT:    srai a2, a1, 63
+; RV64ZBB-NEXT:    xor a0, a0, a2
+; RV64ZBB-NEXT:    sltu a3, a2, a0
+; RV64ZBB-NEXT:    xor a1, a1, a2
+; RV64ZBB-NEXT:    sub a1, a2, a1
+; RV64ZBB-NEXT:    sub a1, a1, a3
+; RV64ZBB-NEXT:    sub a0, a2, a0
+; RV64ZBB-NEXT:    ret
+  %sub = sub nsw i128 %a, %b
+  %abs = call i128 @llvm.abs.i128(i128 %sub, i1 true)
+  %nabs = sub i128 0, %abs
+  ret i128 %nabs
+}
+
+declare i8 @llvm.abs.i8(i8, i1)
+declare i16 @llvm.abs.i16(i16, i1)
+declare i32 @llvm.abs.i32(i32, i1)
+declare i64 @llvm.abs.i64(i64, i1)
+declare i128 @llvm.abs.i128(i128, i1)
+
+declare i8 @llvm.smax.i8(i8, i8)
+declare i16 @llvm.smax.i16(i16, i16)
+declare i32 @llvm.smax.i32(i32, i32)
+declare i64 @llvm.smax.i64(i64, i64)
+
+declare i8 @llvm.smin.i8(i8, i8)
+declare i16 @llvm.smin.i16(i16, i16)
+declare i32 @llvm.smin.i32(i32, i32)
+declare i64 @llvm.smin.i64(i64, i64)

diff  --git a/llvm/test/CodeGen/RISCV/abds.ll b/llvm/test/CodeGen/RISCV/abds.ll
new file mode 100644
index 0000000000000..053a060f3862a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/abds.ll
@@ -0,0 +1,2636 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv32 | FileCheck %s --check-prefixes=RV32I
+; RUN: llc < %s -mtriple=riscv64 | FileCheck %s --check-prefixes=RV64I
+; RUN: llc < %s -mtriple=riscv32 -mattr=+zbb | FileCheck %s --check-prefixes=ZBB,RV32ZBB
+; RUN: llc < %s -mtriple=riscv64 -mattr=+zbb | FileCheck %s --check-prefixes=ZBB,RV64ZBB
+
+;
+; trunc(abs(sub(sext(a),sext(b)))) -> abds(a,b)
+;
+
+define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind {
+; RV32I-LABEL: abd_ext_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    srai a0, a0, 24
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    srai a1, a1, 24
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_ext_i8:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    sext.b a0, a0
+; ZBB-NEXT:    sext.b a1, a1
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    neg a1, a0
+; ZBB-NEXT:    max a0, a0, a1
+; ZBB-NEXT:    ret
+  %aext = sext i8 %a to i64
+  %bext = sext i8 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %trunc = trunc i64 %abs to i8
+  ret i8 %trunc
+}
+
+define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_ext_i8_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    srai a0, a0, 24
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i8_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_ext_i8_i16:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    sext.b a0, a0
+; ZBB-NEXT:    sext.h a1, a1
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    neg a1, a0
+; ZBB-NEXT:    max a0, a0, a1
+; ZBB-NEXT:    ret
+  %aext = sext i8 %a to i64
+  %bext = sext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %trunc = trunc i64 %abs to i8
+  ret i8 %trunc
+}
+
+define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind {
+; RV32I-LABEL: abd_ext_i8_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    srai a0, a0, 24
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    srai a1, a1, 24
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i8_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_ext_i8_undef:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    sext.b a0, a0
+; ZBB-NEXT:    sext.b a1, a1
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    neg a1, a0
+; ZBB-NEXT:    max a0, a0, a1
+; ZBB-NEXT:    ret
+  %aext = sext i8 %a to i64
+  %bext = sext i8 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %trunc = trunc i64 %abs to i8
+  ret i8 %trunc
+}
+
+define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_ext_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_ext_i16:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    sext.h a0, a0
+; ZBB-NEXT:    sext.h a1, a1
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    neg a1, a0
+; ZBB-NEXT:    max a0, a0, a1
+; ZBB-NEXT:    ret
+  %aext = sext i16 %a to i64
+  %bext = sext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %trunc = trunc i64 %abs to i16
+  ret i16 %trunc
+}
+
+define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_ext_i16_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srai a2, a0, 31
+; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    srai a3, a1, 31
+; RV32I-NEXT:    sltu a4, a0, a1
+; RV32I-NEXT:    sub a2, a2, a3
+; RV32I-NEXT:    sub a2, a2, a4
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    bgez a2, .LBB4_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:  .LBB4_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i16_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    sext.w a1, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i16_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sext.h a0, a0
+; RV32ZBB-NEXT:    srai a2, a0, 31
+; RV32ZBB-NEXT:    srai a3, a1, 31
+; RV32ZBB-NEXT:    sltu a4, a0, a1
+; RV32ZBB-NEXT:    sub a2, a2, a3
+; RV32ZBB-NEXT:    sub a2, a2, a4
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    bgez a2, .LBB4_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:  .LBB4_2:
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i16_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sext.h a0, a0
+; RV64ZBB-NEXT:    sext.w a1, a1
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    max a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %aext = sext i16 %a to i64
+  %bext = sext i32 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %trunc = trunc i64 %abs to i16
+  ret i16 %trunc
+}
+
+define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_ext_i16_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i16_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_ext_i16_undef:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    sext.h a0, a0
+; ZBB-NEXT:    sext.h a1, a1
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    neg a1, a0
+; ZBB-NEXT:    max a0, a0, a1
+; ZBB-NEXT:    ret
+  %aext = sext i16 %a to i64
+  %bext = sext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %trunc = trunc i64 %abs to i16
+  ret i16 %trunc
+}
+
+define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_ext_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srai a2, a0, 31
+; RV32I-NEXT:    srai a3, a1, 31
+; RV32I-NEXT:    sltu a4, a0, a1
+; RV32I-NEXT:    sub a2, a2, a3
+; RV32I-NEXT:    sub a2, a2, a4
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    bgez a2, .LBB6_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:  .LBB6_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    sext.w a1, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    srai a2, a0, 31
+; RV32ZBB-NEXT:    srai a3, a1, 31
+; RV32ZBB-NEXT:    sltu a4, a0, a1
+; RV32ZBB-NEXT:    sub a2, a2, a3
+; RV32ZBB-NEXT:    sub a2, a2, a4
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    bgez a2, .LBB6_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:  .LBB6_2:
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sext.w a0, a0
+; RV64ZBB-NEXT:    sext.w a1, a1
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    max a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %aext = sext i32 %a to i64
+  %bext = sext i32 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %trunc = trunc i64 %abs to i32
+  ret i32 %trunc
+}
+
+define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_ext_i32_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srai a2, a0, 31
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai a3, a1, 31
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    sltu a4, a0, a1
+; RV32I-NEXT:    sub a2, a2, a3
+; RV32I-NEXT:    sub a2, a2, a4
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    bgez a2, .LBB7_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:  .LBB7_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i32_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i32_i16:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    srai a2, a0, 31
+; RV32ZBB-NEXT:    sext.h a1, a1
+; RV32ZBB-NEXT:    srai a3, a1, 31
+; RV32ZBB-NEXT:    sltu a4, a0, a1
+; RV32ZBB-NEXT:    sub a2, a2, a3
+; RV32ZBB-NEXT:    sub a2, a2, a4
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    bgez a2, .LBB7_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:  .LBB7_2:
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i32_i16:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sext.w a0, a0
+; RV64ZBB-NEXT:    sext.h a1, a1
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    max a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %aext = sext i32 %a to i64
+  %bext = sext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %trunc = trunc i64 %abs to i32
+  ret i32 %trunc
+}
+
+define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_ext_i32_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srai a2, a0, 31
+; RV32I-NEXT:    srai a3, a1, 31
+; RV32I-NEXT:    sltu a4, a0, a1
+; RV32I-NEXT:    sub a2, a2, a3
+; RV32I-NEXT:    sub a2, a2, a4
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    bgez a2, .LBB8_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:  .LBB8_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i32_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    sext.w a1, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i32_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    srai a2, a0, 31
+; RV32ZBB-NEXT:    srai a3, a1, 31
+; RV32ZBB-NEXT:    sltu a4, a0, a1
+; RV32ZBB-NEXT:    sub a2, a2, a3
+; RV32ZBB-NEXT:    sub a2, a2, a4
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    bgez a2, .LBB8_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:  .LBB8_2:
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i32_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sext.w a0, a0
+; RV64ZBB-NEXT:    sext.w a1, a1
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    max a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %aext = sext i32 %a to i64
+  %bext = sext i32 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %trunc = trunc i64 %abs to i32
+  ret i32 %trunc
+}
+
+define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: abd_ext_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srai a5, a1, 31
+; RV32I-NEXT:    sltu a4, a0, a2
+; RV32I-NEXT:    srai a6, a3, 31
+; RV32I-NEXT:    mv a7, a4
+; RV32I-NEXT:    beq a1, a3, .LBB9_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu a7, a1, a3
+; RV32I-NEXT:  .LBB9_2:
+; RV32I-NEXT:    sub t0, a5, a6
+; RV32I-NEXT:    sltu a7, t0, a7
+; RV32I-NEXT:    sltu a5, a5, a6
+; RV32I-NEXT:    sub a5, t0, a5
+; RV32I-NEXT:    sub a5, a5, a7
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    bgez a5, .LBB9_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    snez a2, a0
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:  .LBB9_4:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srai a2, a0, 63
+; RV64I-NEXT:    srai a3, a1, 63
+; RV64I-NEXT:    sltu a4, a0, a1
+; RV64I-NEXT:    sub a2, a2, a3
+; RV64I-NEXT:    sub a2, a2, a4
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    bgez a2, .LBB9_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:  .LBB9_2:
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i64:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    srai a5, a1, 31
+; RV32ZBB-NEXT:    sltu a4, a0, a2
+; RV32ZBB-NEXT:    srai a6, a3, 31
+; RV32ZBB-NEXT:    mv a7, a4
+; RV32ZBB-NEXT:    beq a1, a3, .LBB9_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu a7, a1, a3
+; RV32ZBB-NEXT:  .LBB9_2:
+; RV32ZBB-NEXT:    sub t0, a5, a6
+; RV32ZBB-NEXT:    sltu a7, t0, a7
+; RV32ZBB-NEXT:    sltu a5, a5, a6
+; RV32ZBB-NEXT:    sub a5, t0, a5
+; RV32ZBB-NEXT:    sub a5, a5, a7
+; RV32ZBB-NEXT:    sub a1, a1, a3
+; RV32ZBB-NEXT:    sub a1, a1, a4
+; RV32ZBB-NEXT:    sub a0, a0, a2
+; RV32ZBB-NEXT:    bgez a5, .LBB9_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    snez a2, a0
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:    add a1, a1, a2
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:  .LBB9_4:
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i64:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    srai a2, a0, 63
+; RV64ZBB-NEXT:    srai a3, a1, 63
+; RV64ZBB-NEXT:    sltu a4, a0, a1
+; RV64ZBB-NEXT:    sub a2, a2, a3
+; RV64ZBB-NEXT:    sub a2, a2, a4
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    bgez a2, .LBB9_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:  .LBB9_2:
+; RV64ZBB-NEXT:    ret
+  %aext = sext i64 %a to i128
+  %bext = sext i64 %b to i128
+  %sub = sub i128 %aext, %bext
+  %abs = call i128 @llvm.abs.i128(i128 %sub, i1 false)
+  %trunc = trunc i128 %abs to i64
+  ret i64 %trunc
+}
+
+define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: abd_ext_i64_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srai a5, a1, 31
+; RV32I-NEXT:    sltu a4, a0, a2
+; RV32I-NEXT:    srai a6, a3, 31
+; RV32I-NEXT:    mv a7, a4
+; RV32I-NEXT:    beq a1, a3, .LBB10_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu a7, a1, a3
+; RV32I-NEXT:  .LBB10_2:
+; RV32I-NEXT:    sub t0, a5, a6
+; RV32I-NEXT:    sltu a7, t0, a7
+; RV32I-NEXT:    sltu a5, a5, a6
+; RV32I-NEXT:    sub a5, t0, a5
+; RV32I-NEXT:    sub a5, a5, a7
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    bgez a5, .LBB10_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    snez a2, a0
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:  .LBB10_4:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i64_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srai a2, a0, 63
+; RV64I-NEXT:    srai a3, a1, 63
+; RV64I-NEXT:    sltu a4, a0, a1
+; RV64I-NEXT:    sub a2, a2, a3
+; RV64I-NEXT:    sub a2, a2, a4
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    bgez a2, .LBB10_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:  .LBB10_2:
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i64_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    srai a5, a1, 31
+; RV32ZBB-NEXT:    sltu a4, a0, a2
+; RV32ZBB-NEXT:    srai a6, a3, 31
+; RV32ZBB-NEXT:    mv a7, a4
+; RV32ZBB-NEXT:    beq a1, a3, .LBB10_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu a7, a1, a3
+; RV32ZBB-NEXT:  .LBB10_2:
+; RV32ZBB-NEXT:    sub t0, a5, a6
+; RV32ZBB-NEXT:    sltu a7, t0, a7
+; RV32ZBB-NEXT:    sltu a5, a5, a6
+; RV32ZBB-NEXT:    sub a5, t0, a5
+; RV32ZBB-NEXT:    sub a5, a5, a7
+; RV32ZBB-NEXT:    sub a1, a1, a3
+; RV32ZBB-NEXT:    sub a1, a1, a4
+; RV32ZBB-NEXT:    sub a0, a0, a2
+; RV32ZBB-NEXT:    bgez a5, .LBB10_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    snez a2, a0
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:    add a1, a1, a2
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:  .LBB10_4:
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i64_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    srai a2, a0, 63
+; RV64ZBB-NEXT:    srai a3, a1, 63
+; RV64ZBB-NEXT:    sltu a4, a0, a1
+; RV64ZBB-NEXT:    sub a2, a2, a3
+; RV64ZBB-NEXT:    sub a2, a2, a4
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    bgez a2, .LBB10_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:  .LBB10_2:
+; RV64ZBB-NEXT:    ret
+  %aext = sext i64 %a to i128
+  %bext = sext i64 %b to i128
+  %sub = sub i128 %aext, %bext
+  %abs = call i128 @llvm.abs.i128(i128 %sub, i1 true)
+  %trunc = trunc i128 %abs to i64
+  ret i64 %trunc
+}
+
+define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind {
+; RV32I-LABEL: abd_ext_i128:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lw a3, 0(a2)
+; RV32I-NEXT:    lw a4, 0(a1)
+; RV32I-NEXT:    lw a5, 4(a2)
+; RV32I-NEXT:    lw a6, 8(a2)
+; RV32I-NEXT:    lw a7, 8(a1)
+; RV32I-NEXT:    lw a2, 12(a2)
+; RV32I-NEXT:    lw t0, 12(a1)
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    sltu t1, a7, a6
+; RV32I-NEXT:    mv t4, t1
+; RV32I-NEXT:    beq t0, a2, .LBB11_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu t4, t0, a2
+; RV32I-NEXT:  .LBB11_2:
+; RV32I-NEXT:    sltu t2, a4, a3
+; RV32I-NEXT:    mv t3, t2
+; RV32I-NEXT:    beq a1, a5, .LBB11_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    sltu t3, a1, a5
+; RV32I-NEXT:  .LBB11_4:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    srai t5, t0, 31
+; RV32I-NEXT:    xor t6, t0, a2
+; RV32I-NEXT:    xor s0, a7, a6
+; RV32I-NEXT:    or s1, s0, t6
+; RV32I-NEXT:    srai t6, a2, 31
+; RV32I-NEXT:    mv s0, t3
+; RV32I-NEXT:    beqz s1, .LBB11_6
+; RV32I-NEXT:  # %bb.5:
+; RV32I-NEXT:    mv s0, t4
+; RV32I-NEXT:  .LBB11_6:
+; RV32I-NEXT:    sub t4, t5, t6
+; RV32I-NEXT:    sltu s0, t4, s0
+; RV32I-NEXT:    sltu t5, t5, t6
+; RV32I-NEXT:    sub t6, t4, t5
+; RV32I-NEXT:    seqz s1, t6
+; RV32I-NEXT:    and s0, s1, s0
+; RV32I-NEXT:    sltu s0, t6, s0
+; RV32I-NEXT:    sltu t4, t4, t5
+; RV32I-NEXT:    sub t4, t6, t4
+; RV32I-NEXT:    sub t4, t4, s0
+; RV32I-NEXT:    sub a2, t0, a2
+; RV32I-NEXT:    sub a2, a2, t1
+; RV32I-NEXT:    sub a6, a7, a6
+; RV32I-NEXT:    sltu a7, a6, t3
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    sub a6, a6, t3
+; RV32I-NEXT:    sub a1, a1, a5
+; RV32I-NEXT:    sub a1, a1, t2
+; RV32I-NEXT:    sub a4, a4, a3
+; RV32I-NEXT:    bgez t4, .LBB11_8
+; RV32I-NEXT:  # %bb.7:
+; RV32I-NEXT:    snez a3, a1
+; RV32I-NEXT:    snez a5, a4
+; RV32I-NEXT:    or a3, a5, a3
+; RV32I-NEXT:    neg a7, a6
+; RV32I-NEXT:    sltu t0, a7, a3
+; RV32I-NEXT:    snez a6, a6
+; RV32I-NEXT:    add a2, a2, a6
+; RV32I-NEXT:    neg a2, a2
+; RV32I-NEXT:    sub a2, a2, t0
+; RV32I-NEXT:    neg a4, a4
+; RV32I-NEXT:    add a1, a1, a5
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    sub a6, a7, a3
+; RV32I-NEXT:  .LBB11_8:
+; RV32I-NEXT:    sw a2, 12(a0)
+; RV32I-NEXT:    sw a6, 8(a0)
+; RV32I-NEXT:    sw a1, 4(a0)
+; RV32I-NEXT:    sw a4, 0(a0)
+; RV32I-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srai a5, a1, 63
+; RV64I-NEXT:    sltu a4, a0, a2
+; RV64I-NEXT:    srai a6, a3, 63
+; RV64I-NEXT:    mv a7, a4
+; RV64I-NEXT:    beq a1, a3, .LBB11_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sltu a7, a1, a3
+; RV64I-NEXT:  .LBB11_2:
+; RV64I-NEXT:    sub t0, a5, a6
+; RV64I-NEXT:    sltu a7, t0, a7
+; RV64I-NEXT:    sltu a5, a5, a6
+; RV64I-NEXT:    sub a5, t0, a5
+; RV64I-NEXT:    sub a5, a5, a7
+; RV64I-NEXT:    sub a1, a1, a3
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    sub a0, a0, a2
+; RV64I-NEXT:    bgez a5, .LBB11_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    snez a2, a0
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:  .LBB11_4:
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i128:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lw a3, 0(a2)
+; RV32ZBB-NEXT:    lw a4, 0(a1)
+; RV32ZBB-NEXT:    lw a5, 4(a2)
+; RV32ZBB-NEXT:    lw a6, 8(a2)
+; RV32ZBB-NEXT:    lw a7, 8(a1)
+; RV32ZBB-NEXT:    lw a2, 12(a2)
+; RV32ZBB-NEXT:    lw t0, 12(a1)
+; RV32ZBB-NEXT:    lw a1, 4(a1)
+; RV32ZBB-NEXT:    sltu t1, a7, a6
+; RV32ZBB-NEXT:    mv t4, t1
+; RV32ZBB-NEXT:    beq t0, a2, .LBB11_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu t4, t0, a2
+; RV32ZBB-NEXT:  .LBB11_2:
+; RV32ZBB-NEXT:    sltu t2, a4, a3
+; RV32ZBB-NEXT:    mv t3, t2
+; RV32ZBB-NEXT:    beq a1, a5, .LBB11_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    sltu t3, a1, a5
+; RV32ZBB-NEXT:  .LBB11_4:
+; RV32ZBB-NEXT:    addi sp, sp, -16
+; RV32ZBB-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT:    sw s1, 8(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT:    srai t5, t0, 31
+; RV32ZBB-NEXT:    xor t6, t0, a2
+; RV32ZBB-NEXT:    xor s0, a7, a6
+; RV32ZBB-NEXT:    or s1, s0, t6
+; RV32ZBB-NEXT:    srai t6, a2, 31
+; RV32ZBB-NEXT:    mv s0, t3
+; RV32ZBB-NEXT:    beqz s1, .LBB11_6
+; RV32ZBB-NEXT:  # %bb.5:
+; RV32ZBB-NEXT:    mv s0, t4
+; RV32ZBB-NEXT:  .LBB11_6:
+; RV32ZBB-NEXT:    sub t4, t5, t6
+; RV32ZBB-NEXT:    sltu s0, t4, s0
+; RV32ZBB-NEXT:    sltu t5, t5, t6
+; RV32ZBB-NEXT:    sub t6, t4, t5
+; RV32ZBB-NEXT:    seqz s1, t6
+; RV32ZBB-NEXT:    and s0, s1, s0
+; RV32ZBB-NEXT:    sltu s0, t6, s0
+; RV32ZBB-NEXT:    sltu t4, t4, t5
+; RV32ZBB-NEXT:    sub t4, t6, t4
+; RV32ZBB-NEXT:    sub t4, t4, s0
+; RV32ZBB-NEXT:    sub a2, t0, a2
+; RV32ZBB-NEXT:    sub a2, a2, t1
+; RV32ZBB-NEXT:    sub a6, a7, a6
+; RV32ZBB-NEXT:    sltu a7, a6, t3
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    sub a6, a6, t3
+; RV32ZBB-NEXT:    sub a1, a1, a5
+; RV32ZBB-NEXT:    sub a1, a1, t2
+; RV32ZBB-NEXT:    sub a4, a4, a3
+; RV32ZBB-NEXT:    bgez t4, .LBB11_8
+; RV32ZBB-NEXT:  # %bb.7:
+; RV32ZBB-NEXT:    snez a3, a1
+; RV32ZBB-NEXT:    snez a5, a4
+; RV32ZBB-NEXT:    or a3, a5, a3
+; RV32ZBB-NEXT:    neg a7, a6
+; RV32ZBB-NEXT:    sltu t0, a7, a3
+; RV32ZBB-NEXT:    snez a6, a6
+; RV32ZBB-NEXT:    add a2, a2, a6
+; RV32ZBB-NEXT:    neg a2, a2
+; RV32ZBB-NEXT:    sub a2, a2, t0
+; RV32ZBB-NEXT:    neg a4, a4
+; RV32ZBB-NEXT:    add a1, a1, a5
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:    sub a6, a7, a3
+; RV32ZBB-NEXT:  .LBB11_8:
+; RV32ZBB-NEXT:    sw a2, 12(a0)
+; RV32ZBB-NEXT:    sw a6, 8(a0)
+; RV32ZBB-NEXT:    sw a1, 4(a0)
+; RV32ZBB-NEXT:    sw a4, 0(a0)
+; RV32ZBB-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT:    addi sp, sp, 16
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i128:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    srai a5, a1, 63
+; RV64ZBB-NEXT:    sltu a4, a0, a2
+; RV64ZBB-NEXT:    srai a6, a3, 63
+; RV64ZBB-NEXT:    mv a7, a4
+; RV64ZBB-NEXT:    beq a1, a3, .LBB11_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    sltu a7, a1, a3
+; RV64ZBB-NEXT:  .LBB11_2:
+; RV64ZBB-NEXT:    sub t0, a5, a6
+; RV64ZBB-NEXT:    sltu a7, t0, a7
+; RV64ZBB-NEXT:    sltu a5, a5, a6
+; RV64ZBB-NEXT:    sub a5, t0, a5
+; RV64ZBB-NEXT:    sub a5, a5, a7
+; RV64ZBB-NEXT:    sub a1, a1, a3
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a0, a0, a2
+; RV64ZBB-NEXT:    bgez a5, .LBB11_4
+; RV64ZBB-NEXT:  # %bb.3:
+; RV64ZBB-NEXT:    snez a2, a0
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:    add a1, a1, a2
+; RV64ZBB-NEXT:    neg a1, a1
+; RV64ZBB-NEXT:  .LBB11_4:
+; RV64ZBB-NEXT:    ret
+  %aext = sext i128 %a to i256
+  %bext = sext i128 %b to i256
+  %sub = sub i256 %aext, %bext
+  %abs = call i256 @llvm.abs.i256(i256 %sub, i1 false)
+  %trunc = trunc i256 %abs to i128
+  ret i128 %trunc
+}
+
+define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind {
+; RV32I-LABEL: abd_ext_i128_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lw a3, 0(a2)
+; RV32I-NEXT:    lw a4, 0(a1)
+; RV32I-NEXT:    lw a5, 4(a2)
+; RV32I-NEXT:    lw a6, 8(a2)
+; RV32I-NEXT:    lw a7, 8(a1)
+; RV32I-NEXT:    lw a2, 12(a2)
+; RV32I-NEXT:    lw t0, 12(a1)
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    sltu t1, a7, a6
+; RV32I-NEXT:    mv t4, t1
+; RV32I-NEXT:    beq t0, a2, .LBB12_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu t4, t0, a2
+; RV32I-NEXT:  .LBB12_2:
+; RV32I-NEXT:    sltu t2, a4, a3
+; RV32I-NEXT:    mv t3, t2
+; RV32I-NEXT:    beq a1, a5, .LBB12_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    sltu t3, a1, a5
+; RV32I-NEXT:  .LBB12_4:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    srai t5, t0, 31
+; RV32I-NEXT:    xor t6, t0, a2
+; RV32I-NEXT:    xor s0, a7, a6
+; RV32I-NEXT:    or s1, s0, t6
+; RV32I-NEXT:    srai t6, a2, 31
+; RV32I-NEXT:    mv s0, t3
+; RV32I-NEXT:    beqz s1, .LBB12_6
+; RV32I-NEXT:  # %bb.5:
+; RV32I-NEXT:    mv s0, t4
+; RV32I-NEXT:  .LBB12_6:
+; RV32I-NEXT:    sub t4, t5, t6
+; RV32I-NEXT:    sltu s0, t4, s0
+; RV32I-NEXT:    sltu t5, t5, t6
+; RV32I-NEXT:    sub t6, t4, t5
+; RV32I-NEXT:    seqz s1, t6
+; RV32I-NEXT:    and s0, s1, s0
+; RV32I-NEXT:    sltu s0, t6, s0
+; RV32I-NEXT:    sltu t4, t4, t5
+; RV32I-NEXT:    sub t4, t6, t4
+; RV32I-NEXT:    sub t4, t4, s0
+; RV32I-NEXT:    sub a2, t0, a2
+; RV32I-NEXT:    sub a2, a2, t1
+; RV32I-NEXT:    sub a6, a7, a6
+; RV32I-NEXT:    sltu a7, a6, t3
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    sub a6, a6, t3
+; RV32I-NEXT:    sub a1, a1, a5
+; RV32I-NEXT:    sub a1, a1, t2
+; RV32I-NEXT:    sub a4, a4, a3
+; RV32I-NEXT:    bgez t4, .LBB12_8
+; RV32I-NEXT:  # %bb.7:
+; RV32I-NEXT:    snez a3, a1
+; RV32I-NEXT:    snez a5, a4
+; RV32I-NEXT:    or a3, a5, a3
+; RV32I-NEXT:    neg a7, a6
+; RV32I-NEXT:    sltu t0, a7, a3
+; RV32I-NEXT:    snez a6, a6
+; RV32I-NEXT:    add a2, a2, a6
+; RV32I-NEXT:    neg a2, a2
+; RV32I-NEXT:    sub a2, a2, t0
+; RV32I-NEXT:    neg a4, a4
+; RV32I-NEXT:    add a1, a1, a5
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    sub a6, a7, a3
+; RV32I-NEXT:  .LBB12_8:
+; RV32I-NEXT:    sw a2, 12(a0)
+; RV32I-NEXT:    sw a6, 8(a0)
+; RV32I-NEXT:    sw a1, 4(a0)
+; RV32I-NEXT:    sw a4, 0(a0)
+; RV32I-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i128_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srai a5, a1, 63
+; RV64I-NEXT:    sltu a4, a0, a2
+; RV64I-NEXT:    srai a6, a3, 63
+; RV64I-NEXT:    mv a7, a4
+; RV64I-NEXT:    beq a1, a3, .LBB12_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sltu a7, a1, a3
+; RV64I-NEXT:  .LBB12_2:
+; RV64I-NEXT:    sub t0, a5, a6
+; RV64I-NEXT:    sltu a7, t0, a7
+; RV64I-NEXT:    sltu a5, a5, a6
+; RV64I-NEXT:    sub a5, t0, a5
+; RV64I-NEXT:    sub a5, a5, a7
+; RV64I-NEXT:    sub a1, a1, a3
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    sub a0, a0, a2
+; RV64I-NEXT:    bgez a5, .LBB12_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    snez a2, a0
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:  .LBB12_4:
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i128_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lw a3, 0(a2)
+; RV32ZBB-NEXT:    lw a4, 0(a1)
+; RV32ZBB-NEXT:    lw a5, 4(a2)
+; RV32ZBB-NEXT:    lw a6, 8(a2)
+; RV32ZBB-NEXT:    lw a7, 8(a1)
+; RV32ZBB-NEXT:    lw a2, 12(a2)
+; RV32ZBB-NEXT:    lw t0, 12(a1)
+; RV32ZBB-NEXT:    lw a1, 4(a1)
+; RV32ZBB-NEXT:    sltu t1, a7, a6
+; RV32ZBB-NEXT:    mv t4, t1
+; RV32ZBB-NEXT:    beq t0, a2, .LBB12_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu t4, t0, a2
+; RV32ZBB-NEXT:  .LBB12_2:
+; RV32ZBB-NEXT:    sltu t2, a4, a3
+; RV32ZBB-NEXT:    mv t3, t2
+; RV32ZBB-NEXT:    beq a1, a5, .LBB12_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    sltu t3, a1, a5
+; RV32ZBB-NEXT:  .LBB12_4:
+; RV32ZBB-NEXT:    addi sp, sp, -16
+; RV32ZBB-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT:    sw s1, 8(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT:    srai t5, t0, 31
+; RV32ZBB-NEXT:    xor t6, t0, a2
+; RV32ZBB-NEXT:    xor s0, a7, a6
+; RV32ZBB-NEXT:    or s1, s0, t6
+; RV32ZBB-NEXT:    srai t6, a2, 31
+; RV32ZBB-NEXT:    mv s0, t3
+; RV32ZBB-NEXT:    beqz s1, .LBB12_6
+; RV32ZBB-NEXT:  # %bb.5:
+; RV32ZBB-NEXT:    mv s0, t4
+; RV32ZBB-NEXT:  .LBB12_6:
+; RV32ZBB-NEXT:    sub t4, t5, t6
+; RV32ZBB-NEXT:    sltu s0, t4, s0
+; RV32ZBB-NEXT:    sltu t5, t5, t6
+; RV32ZBB-NEXT:    sub t6, t4, t5
+; RV32ZBB-NEXT:    seqz s1, t6
+; RV32ZBB-NEXT:    and s0, s1, s0
+; RV32ZBB-NEXT:    sltu s0, t6, s0
+; RV32ZBB-NEXT:    sltu t4, t4, t5
+; RV32ZBB-NEXT:    sub t4, t6, t4
+; RV32ZBB-NEXT:    sub t4, t4, s0
+; RV32ZBB-NEXT:    sub a2, t0, a2
+; RV32ZBB-NEXT:    sub a2, a2, t1
+; RV32ZBB-NEXT:    sub a6, a7, a6
+; RV32ZBB-NEXT:    sltu a7, a6, t3
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    sub a6, a6, t3
+; RV32ZBB-NEXT:    sub a1, a1, a5
+; RV32ZBB-NEXT:    sub a1, a1, t2
+; RV32ZBB-NEXT:    sub a4, a4, a3
+; RV32ZBB-NEXT:    bgez t4, .LBB12_8
+; RV32ZBB-NEXT:  # %bb.7:
+; RV32ZBB-NEXT:    snez a3, a1
+; RV32ZBB-NEXT:    snez a5, a4
+; RV32ZBB-NEXT:    or a3, a5, a3
+; RV32ZBB-NEXT:    neg a7, a6
+; RV32ZBB-NEXT:    sltu t0, a7, a3
+; RV32ZBB-NEXT:    snez a6, a6
+; RV32ZBB-NEXT:    add a2, a2, a6
+; RV32ZBB-NEXT:    neg a2, a2
+; RV32ZBB-NEXT:    sub a2, a2, t0
+; RV32ZBB-NEXT:    neg a4, a4
+; RV32ZBB-NEXT:    add a1, a1, a5
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:    sub a6, a7, a3
+; RV32ZBB-NEXT:  .LBB12_8:
+; RV32ZBB-NEXT:    sw a2, 12(a0)
+; RV32ZBB-NEXT:    sw a6, 8(a0)
+; RV32ZBB-NEXT:    sw a1, 4(a0)
+; RV32ZBB-NEXT:    sw a4, 0(a0)
+; RV32ZBB-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT:    addi sp, sp, 16
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i128_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    srai a5, a1, 63
+; RV64ZBB-NEXT:    sltu a4, a0, a2
+; RV64ZBB-NEXT:    srai a6, a3, 63
+; RV64ZBB-NEXT:    mv a7, a4
+; RV64ZBB-NEXT:    beq a1, a3, .LBB12_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    sltu a7, a1, a3
+; RV64ZBB-NEXT:  .LBB12_2:
+; RV64ZBB-NEXT:    sub t0, a5, a6
+; RV64ZBB-NEXT:    sltu a7, t0, a7
+; RV64ZBB-NEXT:    sltu a5, a5, a6
+; RV64ZBB-NEXT:    sub a5, t0, a5
+; RV64ZBB-NEXT:    sub a5, a5, a7
+; RV64ZBB-NEXT:    sub a1, a1, a3
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a0, a0, a2
+; RV64ZBB-NEXT:    bgez a5, .LBB12_4
+; RV64ZBB-NEXT:  # %bb.3:
+; RV64ZBB-NEXT:    snez a2, a0
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:    add a1, a1, a2
+; RV64ZBB-NEXT:    neg a1, a1
+; RV64ZBB-NEXT:  .LBB12_4:
+; RV64ZBB-NEXT:    ret
+  %aext = sext i128 %a to i256
+  %bext = sext i128 %b to i256
+  %sub = sub i256 %aext, %bext
+  %abs = call i256 @llvm.abs.i256(i256 %sub, i1 true)
+  %trunc = trunc i256 %abs to i128
+  ret i128 %trunc
+}
+
+;
+; sub(smax(a,b),smin(a,b)) -> abds(a,b)
+;
+
+define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind {
+; RV32I-LABEL: abd_minmax_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    srai a1, a1, 24
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    srai a0, a0, 24
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bge a0, a1, .LBB13_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    bge a1, a0, .LBB13_4
+; RV32I-NEXT:  .LBB13_2:
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB13_3:
+; RV32I-NEXT:    mv a2, a1
+; RV32I-NEXT:    blt a1, a0, .LBB13_2
+; RV32I-NEXT:  .LBB13_4:
+; RV32I-NEXT:    sub a0, a1, a2
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_minmax_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge a0, a1, .LBB13_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bge a1, a0, .LBB13_4
+; RV64I-NEXT:  .LBB13_2:
+; RV64I-NEXT:    sub a0, a0, a2
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB13_3:
+; RV64I-NEXT:    mv a2, a1
+; RV64I-NEXT:    blt a1, a0, .LBB13_2
+; RV64I-NEXT:  .LBB13_4:
+; RV64I-NEXT:    sub a0, a1, a2
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_minmax_i8:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    sext.b a1, a1
+; ZBB-NEXT:    sext.b a0, a0
+; ZBB-NEXT:    min a2, a0, a1
+; ZBB-NEXT:    max a0, a0, a1
+; ZBB-NEXT:    sub a0, a0, a2
+; ZBB-NEXT:    ret
+  %min = call i8 @llvm.smin.i8(i8 %a, i8 %b)
+  %max = call i8 @llvm.smax.i8(i8 %a, i8 %b)
+  %sub = sub i8 %max, %min
+  ret i8 %sub
+}
+
+define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_minmax_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bge a0, a1, .LBB14_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    bge a1, a0, .LBB14_4
+; RV32I-NEXT:  .LBB14_2:
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB14_3:
+; RV32I-NEXT:    mv a2, a1
+; RV32I-NEXT:    blt a1, a0, .LBB14_2
+; RV32I-NEXT:  .LBB14_4:
+; RV32I-NEXT:    sub a0, a1, a2
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_minmax_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge a0, a1, .LBB14_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bge a1, a0, .LBB14_4
+; RV64I-NEXT:  .LBB14_2:
+; RV64I-NEXT:    sub a0, a0, a2
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB14_3:
+; RV64I-NEXT:    mv a2, a1
+; RV64I-NEXT:    blt a1, a0, .LBB14_2
+; RV64I-NEXT:  .LBB14_4:
+; RV64I-NEXT:    sub a0, a1, a2
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_minmax_i16:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    sext.h a1, a1
+; ZBB-NEXT:    sext.h a0, a0
+; ZBB-NEXT:    min a2, a0, a1
+; ZBB-NEXT:    max a0, a0, a1
+; ZBB-NEXT:    sub a0, a0, a2
+; ZBB-NEXT:    ret
+  %min = call i16 @llvm.smin.i16(i16 %a, i16 %b)
+  %max = call i16 @llvm.smax.i16(i16 %a, i16 %b)
+  %sub = sub i16 %max, %min
+  ret i16 %sub
+}
+
+define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_minmax_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bge a0, a1, .LBB15_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    bge a1, a0, .LBB15_4
+; RV32I-NEXT:  .LBB15_2:
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB15_3:
+; RV32I-NEXT:    mv a2, a1
+; RV32I-NEXT:    blt a1, a0, .LBB15_2
+; RV32I-NEXT:  .LBB15_4:
+; RV32I-NEXT:    sub a0, a1, a2
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_minmax_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a1, a1
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge a0, a1, .LBB15_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bge a1, a0, .LBB15_4
+; RV64I-NEXT:  .LBB15_2:
+; RV64I-NEXT:    subw a0, a0, a2
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB15_3:
+; RV64I-NEXT:    mv a2, a1
+; RV64I-NEXT:    blt a1, a0, .LBB15_2
+; RV64I-NEXT:  .LBB15_4:
+; RV64I-NEXT:    subw a0, a1, a2
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_minmax_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    min a2, a0, a1
+; RV32ZBB-NEXT:    max a0, a0, a1
+; RV32ZBB-NEXT:    sub a0, a0, a2
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_minmax_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sext.w a1, a1
+; RV64ZBB-NEXT:    sext.w a0, a0
+; RV64ZBB-NEXT:    min a2, a0, a1
+; RV64ZBB-NEXT:    max a0, a0, a1
+; RV64ZBB-NEXT:    subw a0, a0, a2
+; RV64ZBB-NEXT:    ret
+  %min = call i32 @llvm.smin.i32(i32 %a, i32 %b)
+  %max = call i32 @llvm.smax.i32(i32 %a, i32 %b)
+  %sub = sub i32 %max, %min
+  ret i32 %sub
+}
+
+define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: abd_minmax_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    beq a1, a3, .LBB16_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    slt a6, a1, a3
+; RV32I-NEXT:    j .LBB16_3
+; RV32I-NEXT:  .LBB16_2:
+; RV32I-NEXT:    sltu a6, a0, a2
+; RV32I-NEXT:  .LBB16_3:
+; RV32I-NEXT:    mv a4, a1
+; RV32I-NEXT:    mv a5, a0
+; RV32I-NEXT:    bnez a6, .LBB16_5
+; RV32I-NEXT:  # %bb.4:
+; RV32I-NEXT:    mv a4, a3
+; RV32I-NEXT:    mv a5, a2
+; RV32I-NEXT:  .LBB16_5:
+; RV32I-NEXT:    beq a1, a3, .LBB16_7
+; RV32I-NEXT:  # %bb.6:
+; RV32I-NEXT:    slt a6, a3, a1
+; RV32I-NEXT:    beqz a6, .LBB16_8
+; RV32I-NEXT:    j .LBB16_9
+; RV32I-NEXT:  .LBB16_7:
+; RV32I-NEXT:    sltu a6, a2, a0
+; RV32I-NEXT:    bnez a6, .LBB16_9
+; RV32I-NEXT:  .LBB16_8:
+; RV32I-NEXT:    mv a1, a3
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:  .LBB16_9:
+; RV32I-NEXT:    sltu a2, a0, a5
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a1, a1, a2
+; RV32I-NEXT:    sub a0, a0, a5
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_minmax_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bge a0, a1, .LBB16_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bge a1, a0, .LBB16_4
+; RV64I-NEXT:  .LBB16_2:
+; RV64I-NEXT:    sub a0, a0, a2
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB16_3:
+; RV64I-NEXT:    mv a2, a1
+; RV64I-NEXT:    blt a1, a0, .LBB16_2
+; RV64I-NEXT:  .LBB16_4:
+; RV64I-NEXT:    sub a0, a1, a2
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_minmax_i64:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    beq a1, a3, .LBB16_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    slt a6, a1, a3
+; RV32ZBB-NEXT:    j .LBB16_3
+; RV32ZBB-NEXT:  .LBB16_2:
+; RV32ZBB-NEXT:    sltu a6, a0, a2
+; RV32ZBB-NEXT:  .LBB16_3:
+; RV32ZBB-NEXT:    mv a4, a1
+; RV32ZBB-NEXT:    mv a5, a0
+; RV32ZBB-NEXT:    bnez a6, .LBB16_5
+; RV32ZBB-NEXT:  # %bb.4:
+; RV32ZBB-NEXT:    mv a4, a3
+; RV32ZBB-NEXT:    mv a5, a2
+; RV32ZBB-NEXT:  .LBB16_5:
+; RV32ZBB-NEXT:    beq a1, a3, .LBB16_7
+; RV32ZBB-NEXT:  # %bb.6:
+; RV32ZBB-NEXT:    slt a6, a3, a1
+; RV32ZBB-NEXT:    beqz a6, .LBB16_8
+; RV32ZBB-NEXT:    j .LBB16_9
+; RV32ZBB-NEXT:  .LBB16_7:
+; RV32ZBB-NEXT:    sltu a6, a2, a0
+; RV32ZBB-NEXT:    bnez a6, .LBB16_9
+; RV32ZBB-NEXT:  .LBB16_8:
+; RV32ZBB-NEXT:    mv a1, a3
+; RV32ZBB-NEXT:    mv a0, a2
+; RV32ZBB-NEXT:  .LBB16_9:
+; RV32ZBB-NEXT:    sltu a2, a0, a5
+; RV32ZBB-NEXT:    sub a1, a1, a4
+; RV32ZBB-NEXT:    sub a1, a1, a2
+; RV32ZBB-NEXT:    sub a0, a0, a5
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_minmax_i64:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    min a2, a0, a1
+; RV64ZBB-NEXT:    max a0, a0, a1
+; RV64ZBB-NEXT:    sub a0, a0, a2
+; RV64ZBB-NEXT:    ret
+  %min = call i64 @llvm.smin.i64(i64 %a, i64 %b)
+  %max = call i64 @llvm.smax.i64(i64 %a, i64 %b)
+  %sub = sub i64 %max, %min
+  ret i64 %sub
+}
+
+define i128 @abd_minmax_i128(i128 %a, i128 %b) nounwind {
+; RV32I-LABEL: abd_minmax_i128:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lw a6, 4(a2)
+; RV32I-NEXT:    lw a3, 4(a1)
+; RV32I-NEXT:    lw a7, 8(a2)
+; RV32I-NEXT:    lw t0, 12(a2)
+; RV32I-NEXT:    lw a5, 12(a1)
+; RV32I-NEXT:    lw a4, 8(a1)
+; RV32I-NEXT:    beq a5, t0, .LBB17_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    slt t1, a5, t0
+; RV32I-NEXT:    j .LBB17_3
+; RV32I-NEXT:  .LBB17_2:
+; RV32I-NEXT:    sltu t1, a4, a7
+; RV32I-NEXT:  .LBB17_3:
+; RV32I-NEXT:    lw t2, 0(a2)
+; RV32I-NEXT:    lw a1, 0(a1)
+; RV32I-NEXT:    beq a3, a6, .LBB17_5
+; RV32I-NEXT:  # %bb.4:
+; RV32I-NEXT:    sltu t6, a3, a6
+; RV32I-NEXT:    j .LBB17_6
+; RV32I-NEXT:  .LBB17_5:
+; RV32I-NEXT:    sltu t6, a1, t2
+; RV32I-NEXT:  .LBB17_6:
+; RV32I-NEXT:    xor a2, a5, t0
+; RV32I-NEXT:    xor t3, a4, a7
+; RV32I-NEXT:    or t5, t3, a2
+; RV32I-NEXT:    beqz t5, .LBB17_8
+; RV32I-NEXT:  # %bb.7:
+; RV32I-NEXT:    mv t6, t1
+; RV32I-NEXT:  .LBB17_8:
+; RV32I-NEXT:    mv a2, a1
+; RV32I-NEXT:    mv t1, a3
+; RV32I-NEXT:    mv t4, a5
+; RV32I-NEXT:    mv t3, a4
+; RV32I-NEXT:    bnez t6, .LBB17_10
+; RV32I-NEXT:  # %bb.9:
+; RV32I-NEXT:    mv a2, t2
+; RV32I-NEXT:    mv t1, a6
+; RV32I-NEXT:    mv t4, t0
+; RV32I-NEXT:    mv t3, a7
+; RV32I-NEXT:  .LBB17_10:
+; RV32I-NEXT:    beq a5, t0, .LBB17_12
+; RV32I-NEXT:  # %bb.11:
+; RV32I-NEXT:    slt t6, t0, a5
+; RV32I-NEXT:    j .LBB17_13
+; RV32I-NEXT:  .LBB17_12:
+; RV32I-NEXT:    sltu t6, a7, a4
+; RV32I-NEXT:  .LBB17_13:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    beq a3, a6, .LBB17_15
+; RV32I-NEXT:  # %bb.14:
+; RV32I-NEXT:    sltu s0, a6, a3
+; RV32I-NEXT:    bnez t5, .LBB17_16
+; RV32I-NEXT:    j .LBB17_17
+; RV32I-NEXT:  .LBB17_15:
+; RV32I-NEXT:    sltu s0, t2, a1
+; RV32I-NEXT:    beqz t5, .LBB17_17
+; RV32I-NEXT:  .LBB17_16:
+; RV32I-NEXT:    mv s0, t6
+; RV32I-NEXT:  .LBB17_17:
+; RV32I-NEXT:    bnez s0, .LBB17_19
+; RV32I-NEXT:  # %bb.18:
+; RV32I-NEXT:    mv a1, t2
+; RV32I-NEXT:    mv a3, a6
+; RV32I-NEXT:    mv a5, t0
+; RV32I-NEXT:    mv a4, a7
+; RV32I-NEXT:  .LBB17_19:
+; RV32I-NEXT:    sltu a6, a4, t3
+; RV32I-NEXT:    sub a7, a5, t4
+; RV32I-NEXT:    sltu a5, a1, a2
+; RV32I-NEXT:    sub a6, a7, a6
+; RV32I-NEXT:    mv a7, a5
+; RV32I-NEXT:    beq a3, t1, .LBB17_21
+; RV32I-NEXT:  # %bb.20:
+; RV32I-NEXT:    sltu a7, a3, t1
+; RV32I-NEXT:  .LBB17_21:
+; RV32I-NEXT:    sub a4, a4, t3
+; RV32I-NEXT:    sltu t0, a4, a7
+; RV32I-NEXT:    sub a6, a6, t0
+; RV32I-NEXT:    sub a4, a4, a7
+; RV32I-NEXT:    sub a3, a3, t1
+; RV32I-NEXT:    sub a3, a3, a5
+; RV32I-NEXT:    sub a1, a1, a2
+; RV32I-NEXT:    sw a1, 0(a0)
+; RV32I-NEXT:    sw a3, 4(a0)
+; RV32I-NEXT:    sw a4, 8(a0)
+; RV32I-NEXT:    sw a6, 12(a0)
+; RV32I-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_minmax_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    beq a1, a3, .LBB17_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    slt a6, a1, a3
+; RV64I-NEXT:    j .LBB17_3
+; RV64I-NEXT:  .LBB17_2:
+; RV64I-NEXT:    sltu a6, a0, a2
+; RV64I-NEXT:  .LBB17_3:
+; RV64I-NEXT:    mv a4, a1
+; RV64I-NEXT:    mv a5, a0
+; RV64I-NEXT:    bnez a6, .LBB17_5
+; RV64I-NEXT:  # %bb.4:
+; RV64I-NEXT:    mv a4, a3
+; RV64I-NEXT:    mv a5, a2
+; RV64I-NEXT:  .LBB17_5:
+; RV64I-NEXT:    beq a1, a3, .LBB17_7
+; RV64I-NEXT:  # %bb.6:
+; RV64I-NEXT:    slt a6, a3, a1
+; RV64I-NEXT:    beqz a6, .LBB17_8
+; RV64I-NEXT:    j .LBB17_9
+; RV64I-NEXT:  .LBB17_7:
+; RV64I-NEXT:    sltu a6, a2, a0
+; RV64I-NEXT:    bnez a6, .LBB17_9
+; RV64I-NEXT:  .LBB17_8:
+; RV64I-NEXT:    mv a1, a3
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:  .LBB17_9:
+; RV64I-NEXT:    sltu a2, a0, a5
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    sub a1, a1, a2
+; RV64I-NEXT:    sub a0, a0, a5
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_minmax_i128:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lw a6, 4(a2)
+; RV32ZBB-NEXT:    lw a3, 4(a1)
+; RV32ZBB-NEXT:    lw a7, 8(a2)
+; RV32ZBB-NEXT:    lw t0, 12(a2)
+; RV32ZBB-NEXT:    lw a5, 12(a1)
+; RV32ZBB-NEXT:    lw a4, 8(a1)
+; RV32ZBB-NEXT:    beq a5, t0, .LBB17_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    slt t1, a5, t0
+; RV32ZBB-NEXT:    j .LBB17_3
+; RV32ZBB-NEXT:  .LBB17_2:
+; RV32ZBB-NEXT:    sltu t1, a4, a7
+; RV32ZBB-NEXT:  .LBB17_3:
+; RV32ZBB-NEXT:    lw t2, 0(a2)
+; RV32ZBB-NEXT:    lw a1, 0(a1)
+; RV32ZBB-NEXT:    beq a3, a6, .LBB17_5
+; RV32ZBB-NEXT:  # %bb.4:
+; RV32ZBB-NEXT:    sltu t6, a3, a6
+; RV32ZBB-NEXT:    j .LBB17_6
+; RV32ZBB-NEXT:  .LBB17_5:
+; RV32ZBB-NEXT:    sltu t6, a1, t2
+; RV32ZBB-NEXT:  .LBB17_6:
+; RV32ZBB-NEXT:    xor a2, a5, t0
+; RV32ZBB-NEXT:    xor t3, a4, a7
+; RV32ZBB-NEXT:    or t5, t3, a2
+; RV32ZBB-NEXT:    beqz t5, .LBB17_8
+; RV32ZBB-NEXT:  # %bb.7:
+; RV32ZBB-NEXT:    mv t6, t1
+; RV32ZBB-NEXT:  .LBB17_8:
+; RV32ZBB-NEXT:    mv a2, a1
+; RV32ZBB-NEXT:    mv t1, a3
+; RV32ZBB-NEXT:    mv t4, a5
+; RV32ZBB-NEXT:    mv t3, a4
+; RV32ZBB-NEXT:    bnez t6, .LBB17_10
+; RV32ZBB-NEXT:  # %bb.9:
+; RV32ZBB-NEXT:    mv a2, t2
+; RV32ZBB-NEXT:    mv t1, a6
+; RV32ZBB-NEXT:    mv t4, t0
+; RV32ZBB-NEXT:    mv t3, a7
+; RV32ZBB-NEXT:  .LBB17_10:
+; RV32ZBB-NEXT:    beq a5, t0, .LBB17_12
+; RV32ZBB-NEXT:  # %bb.11:
+; RV32ZBB-NEXT:    slt t6, t0, a5
+; RV32ZBB-NEXT:    j .LBB17_13
+; RV32ZBB-NEXT:  .LBB17_12:
+; RV32ZBB-NEXT:    sltu t6, a7, a4
+; RV32ZBB-NEXT:  .LBB17_13:
+; RV32ZBB-NEXT:    addi sp, sp, -16
+; RV32ZBB-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT:    beq a3, a6, .LBB17_15
+; RV32ZBB-NEXT:  # %bb.14:
+; RV32ZBB-NEXT:    sltu s0, a6, a3
+; RV32ZBB-NEXT:    bnez t5, .LBB17_16
+; RV32ZBB-NEXT:    j .LBB17_17
+; RV32ZBB-NEXT:  .LBB17_15:
+; RV32ZBB-NEXT:    sltu s0, t2, a1
+; RV32ZBB-NEXT:    beqz t5, .LBB17_17
+; RV32ZBB-NEXT:  .LBB17_16:
+; RV32ZBB-NEXT:    mv s0, t6
+; RV32ZBB-NEXT:  .LBB17_17:
+; RV32ZBB-NEXT:    bnez s0, .LBB17_19
+; RV32ZBB-NEXT:  # %bb.18:
+; RV32ZBB-NEXT:    mv a1, t2
+; RV32ZBB-NEXT:    mv a3, a6
+; RV32ZBB-NEXT:    mv a5, t0
+; RV32ZBB-NEXT:    mv a4, a7
+; RV32ZBB-NEXT:  .LBB17_19:
+; RV32ZBB-NEXT:    sltu a6, a4, t3
+; RV32ZBB-NEXT:    sub a7, a5, t4
+; RV32ZBB-NEXT:    sltu a5, a1, a2
+; RV32ZBB-NEXT:    sub a6, a7, a6
+; RV32ZBB-NEXT:    mv a7, a5
+; RV32ZBB-NEXT:    beq a3, t1, .LBB17_21
+; RV32ZBB-NEXT:  # %bb.20:
+; RV32ZBB-NEXT:    sltu a7, a3, t1
+; RV32ZBB-NEXT:  .LBB17_21:
+; RV32ZBB-NEXT:    sub a4, a4, t3
+; RV32ZBB-NEXT:    sltu t0, a4, a7
+; RV32ZBB-NEXT:    sub a6, a6, t0
+; RV32ZBB-NEXT:    sub a4, a4, a7
+; RV32ZBB-NEXT:    sub a3, a3, t1
+; RV32ZBB-NEXT:    sub a3, a3, a5
+; RV32ZBB-NEXT:    sub a1, a1, a2
+; RV32ZBB-NEXT:    sw a1, 0(a0)
+; RV32ZBB-NEXT:    sw a3, 4(a0)
+; RV32ZBB-NEXT:    sw a4, 8(a0)
+; RV32ZBB-NEXT:    sw a6, 12(a0)
+; RV32ZBB-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT:    addi sp, sp, 16
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_minmax_i128:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    beq a1, a3, .LBB17_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    slt a6, a1, a3
+; RV64ZBB-NEXT:    j .LBB17_3
+; RV64ZBB-NEXT:  .LBB17_2:
+; RV64ZBB-NEXT:    sltu a6, a0, a2
+; RV64ZBB-NEXT:  .LBB17_3:
+; RV64ZBB-NEXT:    mv a4, a1
+; RV64ZBB-NEXT:    mv a5, a0
+; RV64ZBB-NEXT:    bnez a6, .LBB17_5
+; RV64ZBB-NEXT:  # %bb.4:
+; RV64ZBB-NEXT:    mv a4, a3
+; RV64ZBB-NEXT:    mv a5, a2
+; RV64ZBB-NEXT:  .LBB17_5:
+; RV64ZBB-NEXT:    beq a1, a3, .LBB17_7
+; RV64ZBB-NEXT:  # %bb.6:
+; RV64ZBB-NEXT:    slt a6, a3, a1
+; RV64ZBB-NEXT:    beqz a6, .LBB17_8
+; RV64ZBB-NEXT:    j .LBB17_9
+; RV64ZBB-NEXT:  .LBB17_7:
+; RV64ZBB-NEXT:    sltu a6, a2, a0
+; RV64ZBB-NEXT:    bnez a6, .LBB17_9
+; RV64ZBB-NEXT:  .LBB17_8:
+; RV64ZBB-NEXT:    mv a1, a3
+; RV64ZBB-NEXT:    mv a0, a2
+; RV64ZBB-NEXT:  .LBB17_9:
+; RV64ZBB-NEXT:    sltu a2, a0, a5
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a1, a1, a2
+; RV64ZBB-NEXT:    sub a0, a0, a5
+; RV64ZBB-NEXT:    ret
+  %min = call i128 @llvm.smin.i128(i128 %a, i128 %b)
+  %max = call i128 @llvm.smax.i128(i128 %a, i128 %b)
+  %sub = sub i128 %max, %min
+  ret i128 %sub
+}
+
+;
+; select(icmp(a,b),sub(a,b),sub(b,a)) -> abds(a,b)
+;
+
+define i8 @abd_cmp_i8(i8 %a, i8 %b) nounwind {
+; RV32I-LABEL: abd_cmp_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a2, a0, 24
+; RV32I-NEXT:    srai a2, a2, 24
+; RV32I-NEXT:    slli a3, a1, 24
+; RV32I-NEXT:    srai a3, a3, 24
+; RV32I-NEXT:    blt a3, a2, .LBB18_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB18_2:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_cmp_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a2, a0, 56
+; RV64I-NEXT:    srai a2, a2, 56
+; RV64I-NEXT:    slli a3, a1, 56
+; RV64I-NEXT:    srai a3, a3, 56
+; RV64I-NEXT:    blt a3, a2, .LBB18_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB18_2:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_cmp_i8:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    sext.b a2, a0
+; ZBB-NEXT:    sext.b a3, a1
+; ZBB-NEXT:    blt a3, a2, .LBB18_2
+; ZBB-NEXT:  # %bb.1:
+; ZBB-NEXT:    sub a0, a1, a0
+; ZBB-NEXT:    ret
+; ZBB-NEXT:  .LBB18_2:
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    ret
+  %cmp = icmp sgt i8 %a, %b
+  %ab = sub i8 %a, %b
+  %ba = sub i8 %b, %a
+  %sel = select i1 %cmp, i8 %ab, i8 %ba
+  ret i8 %sel
+}
+
+define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_cmp_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a2, a1, 16
+; RV32I-NEXT:    srai a2, a2, 16
+; RV32I-NEXT:    slli a3, a0, 16
+; RV32I-NEXT:    srai a3, a3, 16
+; RV32I-NEXT:    bge a3, a2, .LBB19_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB19_2:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_cmp_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a2, a1, 48
+; RV64I-NEXT:    srai a2, a2, 48
+; RV64I-NEXT:    slli a3, a0, 48
+; RV64I-NEXT:    srai a3, a3, 48
+; RV64I-NEXT:    bge a3, a2, .LBB19_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB19_2:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_cmp_i16:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    sext.h a2, a1
+; ZBB-NEXT:    sext.h a3, a0
+; ZBB-NEXT:    bge a3, a2, .LBB19_2
+; ZBB-NEXT:  # %bb.1:
+; ZBB-NEXT:    sub a0, a1, a0
+; ZBB-NEXT:    ret
+; ZBB-NEXT:  .LBB19_2:
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    ret
+  %cmp = icmp sge i16 %a, %b
+  %ab = sub i16 %a, %b
+  %ba = sub i16 %b, %a
+  %sel = select i1 %cmp, i16 %ab, i16 %ba
+  ret i16 %sel
+}
+
+define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_cmp_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    blt a0, a1, .LBB20_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB20_2:
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_cmp_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a2, a1
+; RV64I-NEXT:    sext.w a3, a0
+; RV64I-NEXT:    blt a3, a2, .LBB20_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB20_2:
+; RV64I-NEXT:    subw a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_cmp_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    blt a0, a1, .LBB20_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    ret
+; RV32ZBB-NEXT:  .LBB20_2:
+; RV32ZBB-NEXT:    sub a0, a1, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_cmp_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sext.w a2, a1
+; RV64ZBB-NEXT:    sext.w a3, a0
+; RV64ZBB-NEXT:    blt a3, a2, .LBB20_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    subw a0, a0, a1
+; RV64ZBB-NEXT:    ret
+; RV64ZBB-NEXT:  .LBB20_2:
+; RV64ZBB-NEXT:    subw a0, a1, a0
+; RV64ZBB-NEXT:    ret
+  %cmp = icmp slt i32 %a, %b
+  %ab = sub i32 %a, %b
+  %ba = sub i32 %b, %a
+  %sel = select i1 %cmp, i32 %ba, i32 %ab
+  ret i32 %sel
+}
+
+define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: abd_cmp_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltu a4, a0, a2
+; RV32I-NEXT:    mv a5, a4
+; RV32I-NEXT:    beq a1, a3, .LBB21_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    slt a5, a1, a3
+; RV32I-NEXT:  .LBB21_2:
+; RV32I-NEXT:    beqz a5, .LBB21_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB21_4:
+; RV32I-NEXT:    sltu a4, a2, a0
+; RV32I-NEXT:    sub a1, a3, a1
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a0, a2, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_cmp_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    bge a0, a1, .LBB21_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB21_2:
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_cmp_i64:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sltu a4, a0, a2
+; RV32ZBB-NEXT:    mv a5, a4
+; RV32ZBB-NEXT:    beq a1, a3, .LBB21_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    slt a5, a1, a3
+; RV32ZBB-NEXT:  .LBB21_2:
+; RV32ZBB-NEXT:    beqz a5, .LBB21_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    sub a1, a1, a3
+; RV32ZBB-NEXT:    sub a1, a1, a4
+; RV32ZBB-NEXT:    sub a0, a0, a2
+; RV32ZBB-NEXT:    ret
+; RV32ZBB-NEXT:  .LBB21_4:
+; RV32ZBB-NEXT:    sltu a4, a2, a0
+; RV32ZBB-NEXT:    sub a1, a3, a1
+; RV32ZBB-NEXT:    sub a1, a1, a4
+; RV32ZBB-NEXT:    sub a0, a2, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_cmp_i64:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    bge a0, a1, .LBB21_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    ret
+; RV64ZBB-NEXT:  .LBB21_2:
+; RV64ZBB-NEXT:    sub a0, a1, a0
+; RV64ZBB-NEXT:    ret
+  %cmp = icmp sge i64 %a, %b
+  %ab = sub i64 %a, %b
+  %ba = sub i64 %b, %a
+  %sel = select i1 %cmp, i64 %ba, i64 %ab
+  ret i64 %sel
+}
+
+define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
+; RV32I-LABEL: abd_cmp_i128:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lw a3, 0(a2)
+; RV32I-NEXT:    lw a4, 0(a1)
+; RV32I-NEXT:    lw a5, 4(a2)
+; RV32I-NEXT:    lw a6, 8(a2)
+; RV32I-NEXT:    lw a7, 8(a1)
+; RV32I-NEXT:    lw a2, 12(a2)
+; RV32I-NEXT:    lw t0, 12(a1)
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    sltu t1, a7, a6
+; RV32I-NEXT:    mv t4, t1
+; RV32I-NEXT:    beq t0, a2, .LBB22_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    slt t4, t0, a2
+; RV32I-NEXT:  .LBB22_2:
+; RV32I-NEXT:    xor t3, t0, a2
+; RV32I-NEXT:    xor t5, a7, a6
+; RV32I-NEXT:    sltu t2, a4, a3
+; RV32I-NEXT:    or t5, t5, t3
+; RV32I-NEXT:    mv t3, t2
+; RV32I-NEXT:    beq a1, a5, .LBB22_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    sltu t3, a1, a5
+; RV32I-NEXT:  .LBB22_4:
+; RV32I-NEXT:    mv t6, t3
+; RV32I-NEXT:    beqz t5, .LBB22_6
+; RV32I-NEXT:  # %bb.5:
+; RV32I-NEXT:    mv t6, t4
+; RV32I-NEXT:  .LBB22_6:
+; RV32I-NEXT:    sltu t4, a3, a4
+; RV32I-NEXT:    mv t5, t4
+; RV32I-NEXT:    beq a1, a5, .LBB22_8
+; RV32I-NEXT:  # %bb.7:
+; RV32I-NEXT:    sltu t5, a5, a1
+; RV32I-NEXT:  .LBB22_8:
+; RV32I-NEXT:    beqz t6, .LBB22_10
+; RV32I-NEXT:  # %bb.9:
+; RV32I-NEXT:    sub a2, t0, a2
+; RV32I-NEXT:    sub a6, a7, a6
+; RV32I-NEXT:    sub a2, a2, t1
+; RV32I-NEXT:    sltu a7, a6, t3
+; RV32I-NEXT:    sub a1, a1, a5
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    sub a6, a6, t3
+; RV32I-NEXT:    sub a1, a1, t2
+; RV32I-NEXT:    sub a3, a4, a3
+; RV32I-NEXT:    j .LBB22_11
+; RV32I-NEXT:  .LBB22_10:
+; RV32I-NEXT:    sltu t1, a6, a7
+; RV32I-NEXT:    sub a2, a2, t0
+; RV32I-NEXT:    sub a2, a2, t1
+; RV32I-NEXT:    sub a6, a6, a7
+; RV32I-NEXT:    sltu a7, a6, t5
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    sub a6, a6, t5
+; RV32I-NEXT:    sub a5, a5, a1
+; RV32I-NEXT:    sub a1, a5, t4
+; RV32I-NEXT:    sub a3, a3, a4
+; RV32I-NEXT:  .LBB22_11:
+; RV32I-NEXT:    sw a6, 8(a0)
+; RV32I-NEXT:    sw a1, 4(a0)
+; RV32I-NEXT:    sw a3, 0(a0)
+; RV32I-NEXT:    sw a2, 12(a0)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_cmp_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sltu a4, a0, a2
+; RV64I-NEXT:    mv a5, a4
+; RV64I-NEXT:    beq a1, a3, .LBB22_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    slt a5, a1, a3
+; RV64I-NEXT:  .LBB22_2:
+; RV64I-NEXT:    beqz a5, .LBB22_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    sub a1, a1, a3
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    sub a0, a0, a2
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB22_4:
+; RV64I-NEXT:    sltu a4, a2, a0
+; RV64I-NEXT:    sub a1, a3, a1
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    sub a0, a2, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_cmp_i128:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lw a3, 0(a2)
+; RV32ZBB-NEXT:    lw a4, 0(a1)
+; RV32ZBB-NEXT:    lw a5, 4(a2)
+; RV32ZBB-NEXT:    lw a6, 8(a2)
+; RV32ZBB-NEXT:    lw a7, 8(a1)
+; RV32ZBB-NEXT:    lw a2, 12(a2)
+; RV32ZBB-NEXT:    lw t0, 12(a1)
+; RV32ZBB-NEXT:    lw a1, 4(a1)
+; RV32ZBB-NEXT:    sltu t1, a7, a6
+; RV32ZBB-NEXT:    mv t4, t1
+; RV32ZBB-NEXT:    beq t0, a2, .LBB22_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    slt t4, t0, a2
+; RV32ZBB-NEXT:  .LBB22_2:
+; RV32ZBB-NEXT:    xor t3, t0, a2
+; RV32ZBB-NEXT:    xor t5, a7, a6
+; RV32ZBB-NEXT:    sltu t2, a4, a3
+; RV32ZBB-NEXT:    or t5, t5, t3
+; RV32ZBB-NEXT:    mv t3, t2
+; RV32ZBB-NEXT:    beq a1, a5, .LBB22_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    sltu t3, a1, a5
+; RV32ZBB-NEXT:  .LBB22_4:
+; RV32ZBB-NEXT:    mv t6, t3
+; RV32ZBB-NEXT:    beqz t5, .LBB22_6
+; RV32ZBB-NEXT:  # %bb.5:
+; RV32ZBB-NEXT:    mv t6, t4
+; RV32ZBB-NEXT:  .LBB22_6:
+; RV32ZBB-NEXT:    sltu t4, a3, a4
+; RV32ZBB-NEXT:    mv t5, t4
+; RV32ZBB-NEXT:    beq a1, a5, .LBB22_8
+; RV32ZBB-NEXT:  # %bb.7:
+; RV32ZBB-NEXT:    sltu t5, a5, a1
+; RV32ZBB-NEXT:  .LBB22_8:
+; RV32ZBB-NEXT:    beqz t6, .LBB22_10
+; RV32ZBB-NEXT:  # %bb.9:
+; RV32ZBB-NEXT:    sub a2, t0, a2
+; RV32ZBB-NEXT:    sub a6, a7, a6
+; RV32ZBB-NEXT:    sub a2, a2, t1
+; RV32ZBB-NEXT:    sltu a7, a6, t3
+; RV32ZBB-NEXT:    sub a1, a1, a5
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    sub a6, a6, t3
+; RV32ZBB-NEXT:    sub a1, a1, t2
+; RV32ZBB-NEXT:    sub a3, a4, a3
+; RV32ZBB-NEXT:    j .LBB22_11
+; RV32ZBB-NEXT:  .LBB22_10:
+; RV32ZBB-NEXT:    sltu t1, a6, a7
+; RV32ZBB-NEXT:    sub a2, a2, t0
+; RV32ZBB-NEXT:    sub a2, a2, t1
+; RV32ZBB-NEXT:    sub a6, a6, a7
+; RV32ZBB-NEXT:    sltu a7, a6, t5
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    sub a6, a6, t5
+; RV32ZBB-NEXT:    sub a5, a5, a1
+; RV32ZBB-NEXT:    sub a1, a5, t4
+; RV32ZBB-NEXT:    sub a3, a3, a4
+; RV32ZBB-NEXT:  .LBB22_11:
+; RV32ZBB-NEXT:    sw a6, 8(a0)
+; RV32ZBB-NEXT:    sw a1, 4(a0)
+; RV32ZBB-NEXT:    sw a3, 0(a0)
+; RV32ZBB-NEXT:    sw a2, 12(a0)
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_cmp_i128:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sltu a4, a0, a2
+; RV64ZBB-NEXT:    mv a5, a4
+; RV64ZBB-NEXT:    beq a1, a3, .LBB22_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    slt a5, a1, a3
+; RV64ZBB-NEXT:  .LBB22_2:
+; RV64ZBB-NEXT:    beqz a5, .LBB22_4
+; RV64ZBB-NEXT:  # %bb.3:
+; RV64ZBB-NEXT:    sub a1, a1, a3
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a0, a0, a2
+; RV64ZBB-NEXT:    ret
+; RV64ZBB-NEXT:  .LBB22_4:
+; RV64ZBB-NEXT:    sltu a4, a2, a0
+; RV64ZBB-NEXT:    sub a1, a3, a1
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a0, a2, a0
+; RV64ZBB-NEXT:    ret
+  %cmp = icmp sge i128 %a, %b
+  %ab = sub i128 %a, %b
+  %ba = sub i128 %b, %a
+  %sel = select i1 %cmp, i128 %ba, i128 %ab
+  ret i128 %sel
+}
+
+;
+; abs(sub_nsw(x, y)) -> abds(a,b)
+;
+
+define i8 @abd_subnsw_i8(i8 %a, i8 %b) nounwind {
+; RV32I-LABEL: abd_subnsw_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 24
+; RV32I-NEXT:    srai a1, a1, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_subnsw_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_subnsw_i8:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    sext.b a0, a0
+; RV32ZBB-NEXT:    neg a1, a0
+; RV32ZBB-NEXT:    max a0, a0, a1
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_subnsw_i8:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    subw a0, a0, a1
+; RV64ZBB-NEXT:    sext.b a0, a0
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    max a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %sub = sub nsw i8 %a, %b
+  %abs = call i8 @llvm.abs.i8(i8 %sub, i1 false)
+  ret i8 %abs
+}
+
+define i8 @abd_subnsw_i8_undef(i8 %a, i8 %b) nounwind {
+; RV32I-LABEL: abd_subnsw_i8_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 24
+; RV32I-NEXT:    srai a1, a1, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_subnsw_i8_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_subnsw_i8_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    sext.b a0, a0
+; RV32ZBB-NEXT:    neg a1, a0
+; RV32ZBB-NEXT:    max a0, a0, a1
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_subnsw_i8_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    subw a0, a0, a1
+; RV64ZBB-NEXT:    sext.b a0, a0
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    max a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %sub = sub nsw i8 %a, %b
+  %abs = call i8 @llvm.abs.i8(i8 %sub, i1 true)
+  ret i8 %abs
+}
+
+define i16 @abd_subnsw_i16(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_subnsw_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 16
+; RV32I-NEXT:    srai a1, a1, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_subnsw_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_subnsw_i16:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    sext.h a0, a0
+; RV32ZBB-NEXT:    neg a1, a0
+; RV32ZBB-NEXT:    max a0, a0, a1
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_subnsw_i16:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    subw a0, a0, a1
+; RV64ZBB-NEXT:    sext.h a0, a0
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    max a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %sub = sub nsw i16 %a, %b
+  %abs = call i16 @llvm.abs.i16(i16 %sub, i1 false)
+  ret i16 %abs
+}
+
+define i16 @abd_subnsw_i16_undef(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_subnsw_i16_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 16
+; RV32I-NEXT:    srai a1, a1, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_subnsw_i16_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_subnsw_i16_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    sext.h a0, a0
+; RV32ZBB-NEXT:    neg a1, a0
+; RV32ZBB-NEXT:    max a0, a0, a1
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_subnsw_i16_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    subw a0, a0, a1
+; RV64ZBB-NEXT:    sext.h a0, a0
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    max a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %sub = sub nsw i16 %a, %b
+  %abs = call i16 @llvm.abs.i16(i16 %sub, i1 true)
+  ret i16 %abs
+}
+
+define i32 @abd_subnsw_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_subnsw_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_subnsw_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    sraiw a1, a0, 31
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_subnsw_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    neg a1, a0
+; RV32ZBB-NEXT:    max a0, a0, a1
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_subnsw_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    subw a0, a0, a1
+; RV64ZBB-NEXT:    negw a1, a0
+; RV64ZBB-NEXT:    max a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %sub = sub nsw i32 %a, %b
+  %abs = call i32 @llvm.abs.i32(i32 %sub, i1 false)
+  ret i32 %abs
+}
+
+define i32 @abd_subnsw_i32_undef(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_subnsw_i32_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_subnsw_i32_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    sraiw a1, a0, 31
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_subnsw_i32_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    neg a1, a0
+; RV32ZBB-NEXT:    max a0, a0, a1
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_subnsw_i32_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    subw a0, a0, a1
+; RV64ZBB-NEXT:    negw a1, a0
+; RV64ZBB-NEXT:    max a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %sub = sub nsw i32 %a, %b
+  %abs = call i32 @llvm.abs.i32(i32 %sub, i1 true)
+  ret i32 %abs
+}
+
+define i64 @abd_subnsw_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: abd_subnsw_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltu a4, a0, a2
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    bgez a1, .LBB29_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    snez a2, a0
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:  .LBB29_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_subnsw_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_subnsw_i64:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sltu a4, a0, a2
+; RV32ZBB-NEXT:    sub a1, a1, a3
+; RV32ZBB-NEXT:    sub a1, a1, a4
+; RV32ZBB-NEXT:    sub a0, a0, a2
+; RV32ZBB-NEXT:    bgez a1, .LBB29_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    snez a2, a0
+; RV32ZBB-NEXT:    add a1, a1, a2
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:  .LBB29_2:
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_subnsw_i64:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    max a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %sub = sub nsw i64 %a, %b
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  ret i64 %abs
+}
+
+define i64 @abd_subnsw_i64_undef(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: abd_subnsw_i64_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltu a4, a0, a2
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    bgez a1, .LBB30_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    snez a2, a0
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:  .LBB30_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_subnsw_i64_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_subnsw_i64_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sltu a4, a0, a2
+; RV32ZBB-NEXT:    sub a1, a1, a3
+; RV32ZBB-NEXT:    sub a1, a1, a4
+; RV32ZBB-NEXT:    sub a0, a0, a2
+; RV32ZBB-NEXT:    bgez a1, .LBB30_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    snez a2, a0
+; RV32ZBB-NEXT:    add a1, a1, a2
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:  .LBB30_2:
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_subnsw_i64_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    max a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %sub = sub nsw i64 %a, %b
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  ret i64 %abs
+}
+
+define i128 @abd_subnsw_i128(i128 %a, i128 %b) nounwind {
+; RV32I-LABEL: abd_subnsw_i128:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lw a3, 0(a2)
+; RV32I-NEXT:    lw a5, 0(a1)
+; RV32I-NEXT:    lw t1, 12(a2)
+; RV32I-NEXT:    lw a7, 8(a2)
+; RV32I-NEXT:    lw t0, 8(a1)
+; RV32I-NEXT:    lw t2, 12(a1)
+; RV32I-NEXT:    lw a4, 4(a2)
+; RV32I-NEXT:    lw a6, 4(a1)
+; RV32I-NEXT:    sltu a1, t0, a7
+; RV32I-NEXT:    sub a2, t2, t1
+; RV32I-NEXT:    sltu t1, a5, a3
+; RV32I-NEXT:    sub a1, a2, a1
+; RV32I-NEXT:    mv a2, t1
+; RV32I-NEXT:    beq a6, a4, .LBB31_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu a2, a6, a4
+; RV32I-NEXT:  .LBB31_2:
+; RV32I-NEXT:    sub a7, t0, a7
+; RV32I-NEXT:    sltu t0, a7, a2
+; RV32I-NEXT:    sub a1, a1, t0
+; RV32I-NEXT:    sub a2, a7, a2
+; RV32I-NEXT:    sub a4, a6, a4
+; RV32I-NEXT:    sub a4, a4, t1
+; RV32I-NEXT:    sub a3, a5, a3
+; RV32I-NEXT:    bgez a1, .LBB31_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    snez a5, a4
+; RV32I-NEXT:    snez a6, a3
+; RV32I-NEXT:    or a5, a6, a5
+; RV32I-NEXT:    neg a7, a2
+; RV32I-NEXT:    sltu t0, a7, a5
+; RV32I-NEXT:    snez a2, a2
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    sub a1, a1, t0
+; RV32I-NEXT:    sub a2, a7, a5
+; RV32I-NEXT:    neg a3, a3
+; RV32I-NEXT:    add a4, a4, a6
+; RV32I-NEXT:    neg a4, a4
+; RV32I-NEXT:  .LBB31_4:
+; RV32I-NEXT:    sw a4, 4(a0)
+; RV32I-NEXT:    sw a3, 0(a0)
+; RV32I-NEXT:    sw a2, 8(a0)
+; RV32I-NEXT:    sw a1, 12(a0)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_subnsw_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sltu a4, a0, a2
+; RV64I-NEXT:    sub a1, a1, a3
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    sub a0, a0, a2
+; RV64I-NEXT:    bgez a1, .LBB31_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    snez a2, a0
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:  .LBB31_2:
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_subnsw_i128:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lw a3, 0(a2)
+; RV32ZBB-NEXT:    lw a5, 0(a1)
+; RV32ZBB-NEXT:    lw t1, 12(a2)
+; RV32ZBB-NEXT:    lw a7, 8(a2)
+; RV32ZBB-NEXT:    lw t0, 8(a1)
+; RV32ZBB-NEXT:    lw t2, 12(a1)
+; RV32ZBB-NEXT:    lw a4, 4(a2)
+; RV32ZBB-NEXT:    lw a6, 4(a1)
+; RV32ZBB-NEXT:    sltu a1, t0, a7
+; RV32ZBB-NEXT:    sub a2, t2, t1
+; RV32ZBB-NEXT:    sltu t1, a5, a3
+; RV32ZBB-NEXT:    sub a1, a2, a1
+; RV32ZBB-NEXT:    mv a2, t1
+; RV32ZBB-NEXT:    beq a6, a4, .LBB31_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu a2, a6, a4
+; RV32ZBB-NEXT:  .LBB31_2:
+; RV32ZBB-NEXT:    sub a7, t0, a7
+; RV32ZBB-NEXT:    sltu t0, a7, a2
+; RV32ZBB-NEXT:    sub a1, a1, t0
+; RV32ZBB-NEXT:    sub a2, a7, a2
+; RV32ZBB-NEXT:    sub a4, a6, a4
+; RV32ZBB-NEXT:    sub a4, a4, t1
+; RV32ZBB-NEXT:    sub a3, a5, a3
+; RV32ZBB-NEXT:    bgez a1, .LBB31_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    snez a5, a4
+; RV32ZBB-NEXT:    snez a6, a3
+; RV32ZBB-NEXT:    or a5, a6, a5
+; RV32ZBB-NEXT:    neg a7, a2
+; RV32ZBB-NEXT:    sltu t0, a7, a5
+; RV32ZBB-NEXT:    snez a2, a2
+; RV32ZBB-NEXT:    add a1, a1, a2
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:    sub a1, a1, t0
+; RV32ZBB-NEXT:    sub a2, a7, a5
+; RV32ZBB-NEXT:    neg a3, a3
+; RV32ZBB-NEXT:    add a4, a4, a6
+; RV32ZBB-NEXT:    neg a4, a4
+; RV32ZBB-NEXT:  .LBB31_4:
+; RV32ZBB-NEXT:    sw a4, 4(a0)
+; RV32ZBB-NEXT:    sw a3, 0(a0)
+; RV32ZBB-NEXT:    sw a2, 8(a0)
+; RV32ZBB-NEXT:    sw a1, 12(a0)
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_subnsw_i128:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sltu a4, a0, a2
+; RV64ZBB-NEXT:    sub a1, a1, a3
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a0, a0, a2
+; RV64ZBB-NEXT:    bgez a1, .LBB31_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    snez a2, a0
+; RV64ZBB-NEXT:    add a1, a1, a2
+; RV64ZBB-NEXT:    neg a1, a1
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:  .LBB31_2:
+; RV64ZBB-NEXT:    ret
+  %sub = sub nsw i128 %a, %b
+  %abs = call i128 @llvm.abs.i128(i128 %sub, i1 false)
+  ret i128 %abs
+}
+
+define i128 @abd_subnsw_i128_undef(i128 %a, i128 %b) nounwind {
+; RV32I-LABEL: abd_subnsw_i128_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lw a3, 0(a2)
+; RV32I-NEXT:    lw a5, 0(a1)
+; RV32I-NEXT:    lw t1, 12(a2)
+; RV32I-NEXT:    lw a7, 8(a2)
+; RV32I-NEXT:    lw t0, 8(a1)
+; RV32I-NEXT:    lw t2, 12(a1)
+; RV32I-NEXT:    lw a4, 4(a2)
+; RV32I-NEXT:    lw a6, 4(a1)
+; RV32I-NEXT:    sltu a1, t0, a7
+; RV32I-NEXT:    sub a2, t2, t1
+; RV32I-NEXT:    sltu t1, a5, a3
+; RV32I-NEXT:    sub a1, a2, a1
+; RV32I-NEXT:    mv a2, t1
+; RV32I-NEXT:    beq a6, a4, .LBB32_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu a2, a6, a4
+; RV32I-NEXT:  .LBB32_2:
+; RV32I-NEXT:    sub a7, t0, a7
+; RV32I-NEXT:    sltu t0, a7, a2
+; RV32I-NEXT:    sub a1, a1, t0
+; RV32I-NEXT:    sub a2, a7, a2
+; RV32I-NEXT:    sub a4, a6, a4
+; RV32I-NEXT:    sub a4, a4, t1
+; RV32I-NEXT:    sub a3, a5, a3
+; RV32I-NEXT:    bgez a1, .LBB32_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    snez a5, a4
+; RV32I-NEXT:    snez a6, a3
+; RV32I-NEXT:    or a5, a6, a5
+; RV32I-NEXT:    neg a7, a2
+; RV32I-NEXT:    sltu t0, a7, a5
+; RV32I-NEXT:    snez a2, a2
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    sub a1, a1, t0
+; RV32I-NEXT:    sub a2, a7, a5
+; RV32I-NEXT:    neg a3, a3
+; RV32I-NEXT:    add a4, a4, a6
+; RV32I-NEXT:    neg a4, a4
+; RV32I-NEXT:  .LBB32_4:
+; RV32I-NEXT:    sw a4, 4(a0)
+; RV32I-NEXT:    sw a3, 0(a0)
+; RV32I-NEXT:    sw a2, 8(a0)
+; RV32I-NEXT:    sw a1, 12(a0)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_subnsw_i128_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sltu a4, a0, a2
+; RV64I-NEXT:    sub a1, a1, a3
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    sub a0, a0, a2
+; RV64I-NEXT:    bgez a1, .LBB32_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    snez a2, a0
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:  .LBB32_2:
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_subnsw_i128_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lw a3, 0(a2)
+; RV32ZBB-NEXT:    lw a5, 0(a1)
+; RV32ZBB-NEXT:    lw t1, 12(a2)
+; RV32ZBB-NEXT:    lw a7, 8(a2)
+; RV32ZBB-NEXT:    lw t0, 8(a1)
+; RV32ZBB-NEXT:    lw t2, 12(a1)
+; RV32ZBB-NEXT:    lw a4, 4(a2)
+; RV32ZBB-NEXT:    lw a6, 4(a1)
+; RV32ZBB-NEXT:    sltu a1, t0, a7
+; RV32ZBB-NEXT:    sub a2, t2, t1
+; RV32ZBB-NEXT:    sltu t1, a5, a3
+; RV32ZBB-NEXT:    sub a1, a2, a1
+; RV32ZBB-NEXT:    mv a2, t1
+; RV32ZBB-NEXT:    beq a6, a4, .LBB32_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu a2, a6, a4
+; RV32ZBB-NEXT:  .LBB32_2:
+; RV32ZBB-NEXT:    sub a7, t0, a7
+; RV32ZBB-NEXT:    sltu t0, a7, a2
+; RV32ZBB-NEXT:    sub a1, a1, t0
+; RV32ZBB-NEXT:    sub a2, a7, a2
+; RV32ZBB-NEXT:    sub a4, a6, a4
+; RV32ZBB-NEXT:    sub a4, a4, t1
+; RV32ZBB-NEXT:    sub a3, a5, a3
+; RV32ZBB-NEXT:    bgez a1, .LBB32_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    snez a5, a4
+; RV32ZBB-NEXT:    snez a6, a3
+; RV32ZBB-NEXT:    or a5, a6, a5
+; RV32ZBB-NEXT:    neg a7, a2
+; RV32ZBB-NEXT:    sltu t0, a7, a5
+; RV32ZBB-NEXT:    snez a2, a2
+; RV32ZBB-NEXT:    add a1, a1, a2
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:    sub a1, a1, t0
+; RV32ZBB-NEXT:    sub a2, a7, a5
+; RV32ZBB-NEXT:    neg a3, a3
+; RV32ZBB-NEXT:    add a4, a4, a6
+; RV32ZBB-NEXT:    neg a4, a4
+; RV32ZBB-NEXT:  .LBB32_4:
+; RV32ZBB-NEXT:    sw a4, 4(a0)
+; RV32ZBB-NEXT:    sw a3, 0(a0)
+; RV32ZBB-NEXT:    sw a2, 8(a0)
+; RV32ZBB-NEXT:    sw a1, 12(a0)
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_subnsw_i128_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sltu a4, a0, a2
+; RV64ZBB-NEXT:    sub a1, a1, a3
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a0, a0, a2
+; RV64ZBB-NEXT:    bgez a1, .LBB32_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    snez a2, a0
+; RV64ZBB-NEXT:    add a1, a1, a2
+; RV64ZBB-NEXT:    neg a1, a1
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:  .LBB32_2:
+; RV64ZBB-NEXT:    ret
+  %sub = sub nsw i128 %a, %b
+  %abs = call i128 @llvm.abs.i128(i128 %sub, i1 true)
+  ret i128 %abs
+}
+
+;
+; negative tests
+;
+
+define i32 @abd_sub_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_sub_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_sub_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    sraiw a1, a0, 31
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_sub_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    neg a1, a0
+; RV32ZBB-NEXT:    max a0, a0, a1
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_sub_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    subw a0, a0, a1
+; RV64ZBB-NEXT:    negw a1, a0
+; RV64ZBB-NEXT:    max a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %sub = sub i32 %a, %b
+  %abs = call i32 @llvm.abs.i32(i32 %sub, i1 false)
+  ret i32 %abs
+}
+
+
+declare i8 @llvm.abs.i8(i8, i1)
+declare i16 @llvm.abs.i16(i16, i1)
+declare i32 @llvm.abs.i32(i32, i1)
+declare i64 @llvm.abs.i64(i64, i1)
+declare i128 @llvm.abs.i128(i128, i1)
+
+declare i8 @llvm.smax.i8(i8, i8)
+declare i16 @llvm.smax.i16(i16, i16)
+declare i32 @llvm.smax.i32(i32, i32)
+declare i64 @llvm.smax.i64(i64, i64)
+
+declare i8 @llvm.smin.i8(i8, i8)
+declare i16 @llvm.smin.i16(i16, i16)
+declare i32 @llvm.smin.i32(i32, i32)
+declare i64 @llvm.smin.i64(i64, i64)

diff  --git a/llvm/test/CodeGen/RISCV/abdu-neg.ll b/llvm/test/CodeGen/RISCV/abdu-neg.ll
new file mode 100644
index 0000000000000..c74e0c2d9af16
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/abdu-neg.ll
@@ -0,0 +1,1936 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv32 | FileCheck %s --check-prefixes=NOZBB,RV32I
+; RUN: llc < %s -mtriple=riscv64 | FileCheck %s --check-prefixes=NOZBB,RV64I
+; RUN: llc < %s -mtriple=riscv32 -mattr=+zbb | FileCheck %s --check-prefixes=ZBB,RV32ZBB
+; RUN: llc < %s -mtriple=riscv64 -mattr=+zbb | FileCheck %s --check-prefixes=ZBB,RV64ZBB
+
+;
+; trunc(nabs(sub(zext(a),zext(b)))) -> nabds(a,b)
+;
+
+define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind {
+; RV32I-LABEL: abd_ext_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 255
+; RV32I-NEXT:    andi a1, a1, 255
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 255
+; RV64I-NEXT:    andi a1, a1, 255
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_ext_i8:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    andi a0, a0, 255
+; ZBB-NEXT:    andi a1, a1, 255
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    neg a1, a0
+; ZBB-NEXT:    min a0, a0, a1
+; ZBB-NEXT:    ret
+  %aext = zext i8 %a to i64
+  %bext = zext i8 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i8
+  ret i8 %trunc
+}
+
+define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_ext_i8_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 255
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srli a1, a1, 16
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i8_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 255
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srli a1, a1, 48
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_ext_i8_i16:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    andi a0, a0, 255
+; ZBB-NEXT:    zext.h a1, a1
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    neg a1, a0
+; ZBB-NEXT:    min a0, a0, a1
+; ZBB-NEXT:    ret
+  %aext = zext i8 %a to i64
+  %bext = zext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i8
+  ret i8 %trunc
+}
+
+define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind {
+; RV32I-LABEL: abd_ext_i8_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 255
+; RV32I-NEXT:    andi a1, a1, 255
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i8_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 255
+; RV64I-NEXT:    andi a1, a1, 255
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_ext_i8_undef:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    andi a0, a0, 255
+; ZBB-NEXT:    andi a1, a1, 255
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    neg a1, a0
+; ZBB-NEXT:    min a0, a0, a1
+; ZBB-NEXT:    ret
+  %aext = zext i8 %a to i64
+  %bext = zext i8 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i8
+  ret i8 %trunc
+}
+
+define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_ext_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a2, 16
+; RV32I-NEXT:    addi a2, a2, -1
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a2, 16
+; RV64I-NEXT:    addiw a2, a2, -1
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_ext_i16:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    zext.h a0, a0
+; ZBB-NEXT:    zext.h a1, a1
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    neg a1, a0
+; ZBB-NEXT:    min a0, a0, a1
+; ZBB-NEXT:    ret
+  %aext = zext i16 %a to i64
+  %bext = zext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i16
+  ret i16 %trunc
+}
+
+define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_ext_i16_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
+; RV32I-NEXT:    sltu a2, a0, a1
+; RV32I-NEXT:    neg a2, a2
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    bgez a2, .LBB4_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:  .LBB4_2:
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i16_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
+; RV64I-NEXT:    slli a1, a1, 32
+; RV64I-NEXT:    srli a1, a1, 32
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i16_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    zext.h a0, a0
+; RV32ZBB-NEXT:    sltu a2, a0, a1
+; RV32ZBB-NEXT:    neg a2, a2
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    bgez a2, .LBB4_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:  .LBB4_2:
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i16_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    zext.h a0, a0
+; RV64ZBB-NEXT:    slli a1, a1, 32
+; RV64ZBB-NEXT:    srli a1, a1, 32
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    min a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %aext = zext i16 %a to i64
+  %bext = zext i32 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i16
+  ret i16 %trunc
+}
+
+define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_ext_i16_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a2, 16
+; RV32I-NEXT:    addi a2, a2, -1
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i16_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a2, 16
+; RV64I-NEXT:    addiw a2, a2, -1
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_ext_i16_undef:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    zext.h a0, a0
+; ZBB-NEXT:    zext.h a1, a1
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    neg a1, a0
+; ZBB-NEXT:    min a0, a0, a1
+; ZBB-NEXT:    ret
+  %aext = zext i16 %a to i64
+  %bext = zext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i16
+  ret i16 %trunc
+}
+
+define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_ext_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltu a2, a0, a1
+; RV32I-NEXT:    neg a2, a2
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    bgez a2, .LBB6_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:  .LBB6_2:
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    slli a1, a1, 32
+; RV64I-NEXT:    srli a1, a1, 32
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sltu a2, a0, a1
+; RV32ZBB-NEXT:    neg a2, a2
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    bgez a2, .LBB6_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:  .LBB6_2:
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    slli a0, a0, 32
+; RV64ZBB-NEXT:    srli a0, a0, 32
+; RV64ZBB-NEXT:    slli a1, a1, 32
+; RV64ZBB-NEXT:    srli a1, a1, 32
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    min a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %aext = zext i32 %a to i64
+  %bext = zext i32 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i32
+  ret i32 %trunc
+}
+
+define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_ext_i32_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srli a1, a1, 16
+; RV32I-NEXT:    sltu a2, a0, a1
+; RV32I-NEXT:    neg a2, a2
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    bgez a2, .LBB7_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:  .LBB7_2:
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i32_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srli a1, a1, 48
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i32_i16:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    zext.h a1, a1
+; RV32ZBB-NEXT:    sltu a2, a0, a1
+; RV32ZBB-NEXT:    neg a2, a2
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    bgez a2, .LBB7_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:  .LBB7_2:
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i32_i16:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    slli a0, a0, 32
+; RV64ZBB-NEXT:    srli a0, a0, 32
+; RV64ZBB-NEXT:    zext.h a1, a1
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    min a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %aext = zext i32 %a to i64
+  %bext = zext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i32
+  ret i32 %trunc
+}
+
+define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_ext_i32_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltu a2, a0, a1
+; RV32I-NEXT:    neg a2, a2
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    bgez a2, .LBB8_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:  .LBB8_2:
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i32_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    slli a1, a1, 32
+; RV64I-NEXT:    srli a1, a1, 32
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i32_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sltu a2, a0, a1
+; RV32ZBB-NEXT:    neg a2, a2
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    bgez a2, .LBB8_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:  .LBB8_2:
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i32_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    slli a0, a0, 32
+; RV64ZBB-NEXT:    srli a0, a0, 32
+; RV64ZBB-NEXT:    slli a1, a1, 32
+; RV64ZBB-NEXT:    srli a1, a1, 32
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    min a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %aext = zext i32 %a to i64
+  %bext = zext i32 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %nabs = sub i64 0, %abs
+  %trunc = trunc i64 %nabs to i32
+  ret i32 %trunc
+}
+
+define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: abd_ext_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltu a4, a0, a2
+; RV32I-NEXT:    mv a5, a4
+; RV32I-NEXT:    beq a1, a3, .LBB9_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu a5, a1, a3
+; RV32I-NEXT:  .LBB9_2:
+; RV32I-NEXT:    neg a5, a5
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    bgez a5, .LBB9_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    snez a2, a0
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:  .LBB9_4:
+; RV32I-NEXT:    snez a2, a0
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sltu a2, a0, a1
+; RV64I-NEXT:    neg a2, a2
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    bgez a2, .LBB9_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:  .LBB9_2:
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i64:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sltu a4, a0, a2
+; RV32ZBB-NEXT:    mv a5, a4
+; RV32ZBB-NEXT:    beq a1, a3, .LBB9_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu a5, a1, a3
+; RV32ZBB-NEXT:  .LBB9_2:
+; RV32ZBB-NEXT:    neg a5, a5
+; RV32ZBB-NEXT:    sub a1, a1, a3
+; RV32ZBB-NEXT:    sub a1, a1, a4
+; RV32ZBB-NEXT:    sub a0, a0, a2
+; RV32ZBB-NEXT:    bgez a5, .LBB9_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    snez a2, a0
+; RV32ZBB-NEXT:    add a1, a1, a2
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:  .LBB9_4:
+; RV32ZBB-NEXT:    snez a2, a0
+; RV32ZBB-NEXT:    add a1, a1, a2
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i64:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sltu a2, a0, a1
+; RV64ZBB-NEXT:    neg a2, a2
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    bgez a2, .LBB9_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:  .LBB9_2:
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:    ret
+  %aext = zext i64 %a to i128
+  %bext = zext i64 %b to i128
+  %sub = sub i128 %aext, %bext
+  %abs = call i128 @llvm.abs.i128(i128 %sub, i1 false)
+  %nabs = sub i128 0, %abs
+  %trunc = trunc i128 %nabs to i64
+  ret i64 %trunc
+}
+
+define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: abd_ext_i64_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltu a4, a0, a2
+; RV32I-NEXT:    mv a5, a4
+; RV32I-NEXT:    beq a1, a3, .LBB10_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu a5, a1, a3
+; RV32I-NEXT:  .LBB10_2:
+; RV32I-NEXT:    neg a5, a5
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    bgez a5, .LBB10_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    snez a2, a0
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:  .LBB10_4:
+; RV32I-NEXT:    snez a2, a0
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i64_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sltu a2, a0, a1
+; RV64I-NEXT:    neg a2, a2
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    bgez a2, .LBB10_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:  .LBB10_2:
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i64_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sltu a4, a0, a2
+; RV32ZBB-NEXT:    mv a5, a4
+; RV32ZBB-NEXT:    beq a1, a3, .LBB10_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu a5, a1, a3
+; RV32ZBB-NEXT:  .LBB10_2:
+; RV32ZBB-NEXT:    neg a5, a5
+; RV32ZBB-NEXT:    sub a1, a1, a3
+; RV32ZBB-NEXT:    sub a1, a1, a4
+; RV32ZBB-NEXT:    sub a0, a0, a2
+; RV32ZBB-NEXT:    bgez a5, .LBB10_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    snez a2, a0
+; RV32ZBB-NEXT:    add a1, a1, a2
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:  .LBB10_4:
+; RV32ZBB-NEXT:    snez a2, a0
+; RV32ZBB-NEXT:    add a1, a1, a2
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i64_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sltu a2, a0, a1
+; RV64ZBB-NEXT:    neg a2, a2
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    bgez a2, .LBB10_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:  .LBB10_2:
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:    ret
+  %aext = zext i64 %a to i128
+  %bext = zext i64 %b to i128
+  %sub = sub i128 %aext, %bext
+  %abs = call i128 @llvm.abs.i128(i128 %sub, i1 true)
+  %nabs = sub i128 0, %abs
+  %trunc = trunc i128 %nabs to i64
+  ret i64 %trunc
+}
+
+define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind {
+; RV32I-LABEL: abd_ext_i128:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lw a3, 0(a2)
+; RV32I-NEXT:    lw a4, 0(a1)
+; RV32I-NEXT:    lw a5, 4(a2)
+; RV32I-NEXT:    lw a6, 8(a2)
+; RV32I-NEXT:    lw a7, 8(a1)
+; RV32I-NEXT:    lw a2, 12(a2)
+; RV32I-NEXT:    lw t0, 12(a1)
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    sltu t1, a7, a6
+; RV32I-NEXT:    mv t4, t1
+; RV32I-NEXT:    beq t0, a2, .LBB11_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu t4, t0, a2
+; RV32I-NEXT:  .LBB11_2:
+; RV32I-NEXT:    sltu t2, a4, a3
+; RV32I-NEXT:    mv t3, t2
+; RV32I-NEXT:    beq a1, a5, .LBB11_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    sltu t3, a1, a5
+; RV32I-NEXT:  .LBB11_4:
+; RV32I-NEXT:    xor t5, t0, a2
+; RV32I-NEXT:    xor t6, a7, a6
+; RV32I-NEXT:    or t6, t6, t5
+; RV32I-NEXT:    mv t5, t3
+; RV32I-NEXT:    beqz t6, .LBB11_6
+; RV32I-NEXT:  # %bb.5:
+; RV32I-NEXT:    mv t5, t4
+; RV32I-NEXT:  .LBB11_6:
+; RV32I-NEXT:    neg t4, t5
+; RV32I-NEXT:    sub a2, t0, a2
+; RV32I-NEXT:    sub a2, a2, t1
+; RV32I-NEXT:    sub a6, a7, a6
+; RV32I-NEXT:    sltu a7, a6, t3
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    sub a6, a6, t3
+; RV32I-NEXT:    sub a1, a1, a5
+; RV32I-NEXT:    sub a1, a1, t2
+; RV32I-NEXT:    sub a4, a4, a3
+; RV32I-NEXT:    bgez t4, .LBB11_8
+; RV32I-NEXT:  # %bb.7:
+; RV32I-NEXT:    snez a3, a1
+; RV32I-NEXT:    snez a5, a4
+; RV32I-NEXT:    or a3, a5, a3
+; RV32I-NEXT:    neg a7, a6
+; RV32I-NEXT:    sltu t0, a7, a3
+; RV32I-NEXT:    snez a6, a6
+; RV32I-NEXT:    add a2, a2, a6
+; RV32I-NEXT:    neg a2, a2
+; RV32I-NEXT:    sub a2, a2, t0
+; RV32I-NEXT:    sub a6, a7, a3
+; RV32I-NEXT:    neg a4, a4
+; RV32I-NEXT:    add a1, a1, a5
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:  .LBB11_8:
+; RV32I-NEXT:    snez a3, a1
+; RV32I-NEXT:    snez a5, a4
+; RV32I-NEXT:    or a3, a5, a3
+; RV32I-NEXT:    neg a7, a6
+; RV32I-NEXT:    sltu t0, a7, a3
+; RV32I-NEXT:    snez a6, a6
+; RV32I-NEXT:    add a2, a2, a6
+; RV32I-NEXT:    neg a2, a2
+; RV32I-NEXT:    sub a2, a2, t0
+; RV32I-NEXT:    sub a3, a7, a3
+; RV32I-NEXT:    add a1, a1, a5
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    neg a4, a4
+; RV32I-NEXT:    sw a4, 0(a0)
+; RV32I-NEXT:    sw a1, 4(a0)
+; RV32I-NEXT:    sw a3, 8(a0)
+; RV32I-NEXT:    sw a2, 12(a0)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sltu a4, a0, a2
+; RV64I-NEXT:    mv a5, a4
+; RV64I-NEXT:    beq a1, a3, .LBB11_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sltu a5, a1, a3
+; RV64I-NEXT:  .LBB11_2:
+; RV64I-NEXT:    neg a5, a5
+; RV64I-NEXT:    sub a1, a1, a3
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    sub a0, a0, a2
+; RV64I-NEXT:    bgez a5, .LBB11_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    snez a2, a0
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:  .LBB11_4:
+; RV64I-NEXT:    snez a2, a0
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i128:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lw a3, 0(a2)
+; RV32ZBB-NEXT:    lw a4, 0(a1)
+; RV32ZBB-NEXT:    lw a5, 4(a2)
+; RV32ZBB-NEXT:    lw a6, 8(a2)
+; RV32ZBB-NEXT:    lw a7, 8(a1)
+; RV32ZBB-NEXT:    lw a2, 12(a2)
+; RV32ZBB-NEXT:    lw t0, 12(a1)
+; RV32ZBB-NEXT:    lw a1, 4(a1)
+; RV32ZBB-NEXT:    sltu t1, a7, a6
+; RV32ZBB-NEXT:    mv t4, t1
+; RV32ZBB-NEXT:    beq t0, a2, .LBB11_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu t4, t0, a2
+; RV32ZBB-NEXT:  .LBB11_2:
+; RV32ZBB-NEXT:    sltu t2, a4, a3
+; RV32ZBB-NEXT:    mv t3, t2
+; RV32ZBB-NEXT:    beq a1, a5, .LBB11_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    sltu t3, a1, a5
+; RV32ZBB-NEXT:  .LBB11_4:
+; RV32ZBB-NEXT:    xor t5, t0, a2
+; RV32ZBB-NEXT:    xor t6, a7, a6
+; RV32ZBB-NEXT:    or t6, t6, t5
+; RV32ZBB-NEXT:    mv t5, t3
+; RV32ZBB-NEXT:    beqz t6, .LBB11_6
+; RV32ZBB-NEXT:  # %bb.5:
+; RV32ZBB-NEXT:    mv t5, t4
+; RV32ZBB-NEXT:  .LBB11_6:
+; RV32ZBB-NEXT:    neg t4, t5
+; RV32ZBB-NEXT:    sub a2, t0, a2
+; RV32ZBB-NEXT:    sub a2, a2, t1
+; RV32ZBB-NEXT:    sub a6, a7, a6
+; RV32ZBB-NEXT:    sltu a7, a6, t3
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    sub a6, a6, t3
+; RV32ZBB-NEXT:    sub a1, a1, a5
+; RV32ZBB-NEXT:    sub a1, a1, t2
+; RV32ZBB-NEXT:    sub a4, a4, a3
+; RV32ZBB-NEXT:    bgez t4, .LBB11_8
+; RV32ZBB-NEXT:  # %bb.7:
+; RV32ZBB-NEXT:    snez a3, a1
+; RV32ZBB-NEXT:    snez a5, a4
+; RV32ZBB-NEXT:    or a3, a5, a3
+; RV32ZBB-NEXT:    neg a7, a6
+; RV32ZBB-NEXT:    sltu t0, a7, a3
+; RV32ZBB-NEXT:    snez a6, a6
+; RV32ZBB-NEXT:    add a2, a2, a6
+; RV32ZBB-NEXT:    neg a2, a2
+; RV32ZBB-NEXT:    sub a2, a2, t0
+; RV32ZBB-NEXT:    sub a6, a7, a3
+; RV32ZBB-NEXT:    neg a4, a4
+; RV32ZBB-NEXT:    add a1, a1, a5
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:  .LBB11_8:
+; RV32ZBB-NEXT:    snez a3, a1
+; RV32ZBB-NEXT:    snez a5, a4
+; RV32ZBB-NEXT:    or a3, a5, a3
+; RV32ZBB-NEXT:    neg a7, a6
+; RV32ZBB-NEXT:    sltu t0, a7, a3
+; RV32ZBB-NEXT:    snez a6, a6
+; RV32ZBB-NEXT:    add a2, a2, a6
+; RV32ZBB-NEXT:    neg a2, a2
+; RV32ZBB-NEXT:    sub a2, a2, t0
+; RV32ZBB-NEXT:    sub a3, a7, a3
+; RV32ZBB-NEXT:    add a1, a1, a5
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:    neg a4, a4
+; RV32ZBB-NEXT:    sw a4, 0(a0)
+; RV32ZBB-NEXT:    sw a1, 4(a0)
+; RV32ZBB-NEXT:    sw a3, 8(a0)
+; RV32ZBB-NEXT:    sw a2, 12(a0)
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i128:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sltu a4, a0, a2
+; RV64ZBB-NEXT:    mv a5, a4
+; RV64ZBB-NEXT:    beq a1, a3, .LBB11_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    sltu a5, a1, a3
+; RV64ZBB-NEXT:  .LBB11_2:
+; RV64ZBB-NEXT:    neg a5, a5
+; RV64ZBB-NEXT:    sub a1, a1, a3
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a0, a0, a2
+; RV64ZBB-NEXT:    bgez a5, .LBB11_4
+; RV64ZBB-NEXT:  # %bb.3:
+; RV64ZBB-NEXT:    snez a2, a0
+; RV64ZBB-NEXT:    add a1, a1, a2
+; RV64ZBB-NEXT:    neg a1, a1
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:  .LBB11_4:
+; RV64ZBB-NEXT:    snez a2, a0
+; RV64ZBB-NEXT:    add a1, a1, a2
+; RV64ZBB-NEXT:    neg a1, a1
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:    ret
+  %aext = zext i128 %a to i256
+  %bext = zext i128 %b to i256
+  %sub = sub i256 %aext, %bext
+  %abs = call i256 @llvm.abs.i256(i256 %sub, i1 false)
+  %nabs = sub i256 0, %abs
+  %trunc = trunc i256 %nabs to i128
+  ret i128 %trunc
+}
+
+define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind {
+; RV32I-LABEL: abd_ext_i128_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lw a3, 0(a2)
+; RV32I-NEXT:    lw a4, 0(a1)
+; RV32I-NEXT:    lw a5, 4(a2)
+; RV32I-NEXT:    lw a6, 8(a2)
+; RV32I-NEXT:    lw a7, 8(a1)
+; RV32I-NEXT:    lw a2, 12(a2)
+; RV32I-NEXT:    lw t0, 12(a1)
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    sltu t1, a7, a6
+; RV32I-NEXT:    mv t4, t1
+; RV32I-NEXT:    beq t0, a2, .LBB12_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu t4, t0, a2
+; RV32I-NEXT:  .LBB12_2:
+; RV32I-NEXT:    sltu t2, a4, a3
+; RV32I-NEXT:    mv t3, t2
+; RV32I-NEXT:    beq a1, a5, .LBB12_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    sltu t3, a1, a5
+; RV32I-NEXT:  .LBB12_4:
+; RV32I-NEXT:    xor t5, t0, a2
+; RV32I-NEXT:    xor t6, a7, a6
+; RV32I-NEXT:    or t6, t6, t5
+; RV32I-NEXT:    mv t5, t3
+; RV32I-NEXT:    beqz t6, .LBB12_6
+; RV32I-NEXT:  # %bb.5:
+; RV32I-NEXT:    mv t5, t4
+; RV32I-NEXT:  .LBB12_6:
+; RV32I-NEXT:    neg t4, t5
+; RV32I-NEXT:    sub a2, t0, a2
+; RV32I-NEXT:    sub a2, a2, t1
+; RV32I-NEXT:    sub a6, a7, a6
+; RV32I-NEXT:    sltu a7, a6, t3
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    sub a6, a6, t3
+; RV32I-NEXT:    sub a1, a1, a5
+; RV32I-NEXT:    sub a1, a1, t2
+; RV32I-NEXT:    sub a4, a4, a3
+; RV32I-NEXT:    bgez t4, .LBB12_8
+; RV32I-NEXT:  # %bb.7:
+; RV32I-NEXT:    snez a3, a1
+; RV32I-NEXT:    snez a5, a4
+; RV32I-NEXT:    or a3, a5, a3
+; RV32I-NEXT:    neg a7, a6
+; RV32I-NEXT:    sltu t0, a7, a3
+; RV32I-NEXT:    snez a6, a6
+; RV32I-NEXT:    add a2, a2, a6
+; RV32I-NEXT:    neg a2, a2
+; RV32I-NEXT:    sub a2, a2, t0
+; RV32I-NEXT:    sub a6, a7, a3
+; RV32I-NEXT:    neg a4, a4
+; RV32I-NEXT:    add a1, a1, a5
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:  .LBB12_8:
+; RV32I-NEXT:    snez a3, a1
+; RV32I-NEXT:    snez a5, a4
+; RV32I-NEXT:    or a3, a5, a3
+; RV32I-NEXT:    neg a7, a6
+; RV32I-NEXT:    sltu t0, a7, a3
+; RV32I-NEXT:    snez a6, a6
+; RV32I-NEXT:    add a2, a2, a6
+; RV32I-NEXT:    neg a2, a2
+; RV32I-NEXT:    sub a2, a2, t0
+; RV32I-NEXT:    sub a3, a7, a3
+; RV32I-NEXT:    add a1, a1, a5
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    neg a4, a4
+; RV32I-NEXT:    sw a4, 0(a0)
+; RV32I-NEXT:    sw a1, 4(a0)
+; RV32I-NEXT:    sw a3, 8(a0)
+; RV32I-NEXT:    sw a2, 12(a0)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i128_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sltu a4, a0, a2
+; RV64I-NEXT:    mv a5, a4
+; RV64I-NEXT:    beq a1, a3, .LBB12_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sltu a5, a1, a3
+; RV64I-NEXT:  .LBB12_2:
+; RV64I-NEXT:    neg a5, a5
+; RV64I-NEXT:    sub a1, a1, a3
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    sub a0, a0, a2
+; RV64I-NEXT:    bgez a5, .LBB12_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    snez a2, a0
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:  .LBB12_4:
+; RV64I-NEXT:    snez a2, a0
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i128_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lw a3, 0(a2)
+; RV32ZBB-NEXT:    lw a4, 0(a1)
+; RV32ZBB-NEXT:    lw a5, 4(a2)
+; RV32ZBB-NEXT:    lw a6, 8(a2)
+; RV32ZBB-NEXT:    lw a7, 8(a1)
+; RV32ZBB-NEXT:    lw a2, 12(a2)
+; RV32ZBB-NEXT:    lw t0, 12(a1)
+; RV32ZBB-NEXT:    lw a1, 4(a1)
+; RV32ZBB-NEXT:    sltu t1, a7, a6
+; RV32ZBB-NEXT:    mv t4, t1
+; RV32ZBB-NEXT:    beq t0, a2, .LBB12_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu t4, t0, a2
+; RV32ZBB-NEXT:  .LBB12_2:
+; RV32ZBB-NEXT:    sltu t2, a4, a3
+; RV32ZBB-NEXT:    mv t3, t2
+; RV32ZBB-NEXT:    beq a1, a5, .LBB12_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    sltu t3, a1, a5
+; RV32ZBB-NEXT:  .LBB12_4:
+; RV32ZBB-NEXT:    xor t5, t0, a2
+; RV32ZBB-NEXT:    xor t6, a7, a6
+; RV32ZBB-NEXT:    or t6, t6, t5
+; RV32ZBB-NEXT:    mv t5, t3
+; RV32ZBB-NEXT:    beqz t6, .LBB12_6
+; RV32ZBB-NEXT:  # %bb.5:
+; RV32ZBB-NEXT:    mv t5, t4
+; RV32ZBB-NEXT:  .LBB12_6:
+; RV32ZBB-NEXT:    neg t4, t5
+; RV32ZBB-NEXT:    sub a2, t0, a2
+; RV32ZBB-NEXT:    sub a2, a2, t1
+; RV32ZBB-NEXT:    sub a6, a7, a6
+; RV32ZBB-NEXT:    sltu a7, a6, t3
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    sub a6, a6, t3
+; RV32ZBB-NEXT:    sub a1, a1, a5
+; RV32ZBB-NEXT:    sub a1, a1, t2
+; RV32ZBB-NEXT:    sub a4, a4, a3
+; RV32ZBB-NEXT:    bgez t4, .LBB12_8
+; RV32ZBB-NEXT:  # %bb.7:
+; RV32ZBB-NEXT:    snez a3, a1
+; RV32ZBB-NEXT:    snez a5, a4
+; RV32ZBB-NEXT:    or a3, a5, a3
+; RV32ZBB-NEXT:    neg a7, a6
+; RV32ZBB-NEXT:    sltu t0, a7, a3
+; RV32ZBB-NEXT:    snez a6, a6
+; RV32ZBB-NEXT:    add a2, a2, a6
+; RV32ZBB-NEXT:    neg a2, a2
+; RV32ZBB-NEXT:    sub a2, a2, t0
+; RV32ZBB-NEXT:    sub a6, a7, a3
+; RV32ZBB-NEXT:    neg a4, a4
+; RV32ZBB-NEXT:    add a1, a1, a5
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:  .LBB12_8:
+; RV32ZBB-NEXT:    snez a3, a1
+; RV32ZBB-NEXT:    snez a5, a4
+; RV32ZBB-NEXT:    or a3, a5, a3
+; RV32ZBB-NEXT:    neg a7, a6
+; RV32ZBB-NEXT:    sltu t0, a7, a3
+; RV32ZBB-NEXT:    snez a6, a6
+; RV32ZBB-NEXT:    add a2, a2, a6
+; RV32ZBB-NEXT:    neg a2, a2
+; RV32ZBB-NEXT:    sub a2, a2, t0
+; RV32ZBB-NEXT:    sub a3, a7, a3
+; RV32ZBB-NEXT:    add a1, a1, a5
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:    neg a4, a4
+; RV32ZBB-NEXT:    sw a4, 0(a0)
+; RV32ZBB-NEXT:    sw a1, 4(a0)
+; RV32ZBB-NEXT:    sw a3, 8(a0)
+; RV32ZBB-NEXT:    sw a2, 12(a0)
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i128_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sltu a4, a0, a2
+; RV64ZBB-NEXT:    mv a5, a4
+; RV64ZBB-NEXT:    beq a1, a3, .LBB12_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    sltu a5, a1, a3
+; RV64ZBB-NEXT:  .LBB12_2:
+; RV64ZBB-NEXT:    neg a5, a5
+; RV64ZBB-NEXT:    sub a1, a1, a3
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a0, a0, a2
+; RV64ZBB-NEXT:    bgez a5, .LBB12_4
+; RV64ZBB-NEXT:  # %bb.3:
+; RV64ZBB-NEXT:    snez a2, a0
+; RV64ZBB-NEXT:    add a1, a1, a2
+; RV64ZBB-NEXT:    neg a1, a1
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:  .LBB12_4:
+; RV64ZBB-NEXT:    snez a2, a0
+; RV64ZBB-NEXT:    add a1, a1, a2
+; RV64ZBB-NEXT:    neg a1, a1
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:    ret
+  %aext = zext i128 %a to i256
+  %bext = zext i128 %b to i256
+  %sub = sub i256 %aext, %bext
+  %abs = call i256 @llvm.abs.i256(i256 %sub, i1 true)
+  %nabs = sub i256 0, %abs
+  %trunc = trunc i256 %nabs to i128
+  ret i128 %trunc
+}
+
+;
+; sub(umin(a,b),umax(a,b)) -> nabds(a,b)
+;
+
+define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind {
+; NOZBB-LABEL: abd_minmax_i8:
+; NOZBB:       # %bb.0:
+; NOZBB-NEXT:    andi a1, a1, 255
+; NOZBB-NEXT:    andi a0, a0, 255
+; NOZBB-NEXT:    mv a2, a0
+; NOZBB-NEXT:    bgeu a0, a1, .LBB13_3
+; NOZBB-NEXT:  # %bb.1:
+; NOZBB-NEXT:    bgeu a1, a0, .LBB13_4
+; NOZBB-NEXT:  .LBB13_2:
+; NOZBB-NEXT:    sub a0, a2, a0
+; NOZBB-NEXT:    ret
+; NOZBB-NEXT:  .LBB13_3:
+; NOZBB-NEXT:    mv a2, a1
+; NOZBB-NEXT:    bltu a1, a0, .LBB13_2
+; NOZBB-NEXT:  .LBB13_4:
+; NOZBB-NEXT:    sub a0, a2, a1
+; NOZBB-NEXT:    ret
+;
+; ZBB-LABEL: abd_minmax_i8:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    andi a1, a1, 255
+; ZBB-NEXT:    andi a0, a0, 255
+; ZBB-NEXT:    minu a2, a0, a1
+; ZBB-NEXT:    maxu a0, a0, a1
+; ZBB-NEXT:    sub a0, a2, a0
+; ZBB-NEXT:    ret
+  %min = call i8 @llvm.umin.i8(i8 %a, i8 %b)
+  %max = call i8 @llvm.umax.i8(i8 %a, i8 %b)
+  %sub = sub i8 %min, %max
+  ret i8 %sub
+}
+
+define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_minmax_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a2, 16
+; RV32I-NEXT:    addi a2, a2, -1
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bgeu a0, a1, .LBB14_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    bgeu a1, a0, .LBB14_4
+; RV32I-NEXT:  .LBB14_2:
+; RV32I-NEXT:    sub a0, a2, a0
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB14_3:
+; RV32I-NEXT:    mv a2, a1
+; RV32I-NEXT:    bltu a1, a0, .LBB14_2
+; RV32I-NEXT:  .LBB14_4:
+; RV32I-NEXT:    sub a0, a2, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_minmax_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a2, 16
+; RV64I-NEXT:    addiw a2, a2, -1
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu a0, a1, .LBB14_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bgeu a1, a0, .LBB14_4
+; RV64I-NEXT:  .LBB14_2:
+; RV64I-NEXT:    sub a0, a2, a0
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB14_3:
+; RV64I-NEXT:    mv a2, a1
+; RV64I-NEXT:    bltu a1, a0, .LBB14_2
+; RV64I-NEXT:  .LBB14_4:
+; RV64I-NEXT:    sub a0, a2, a1
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_minmax_i16:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    zext.h a1, a1
+; ZBB-NEXT:    zext.h a0, a0
+; ZBB-NEXT:    minu a2, a0, a1
+; ZBB-NEXT:    maxu a0, a0, a1
+; ZBB-NEXT:    sub a0, a2, a0
+; ZBB-NEXT:    ret
+  %min = call i16 @llvm.umin.i16(i16 %a, i16 %b)
+  %max = call i16 @llvm.umax.i16(i16 %a, i16 %b)
+  %sub = sub i16 %min, %max
+  ret i16 %sub
+}
+
+define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_minmax_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bgeu a0, a1, .LBB15_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    bgeu a1, a0, .LBB15_4
+; RV32I-NEXT:  .LBB15_2:
+; RV32I-NEXT:    sub a0, a2, a0
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB15_3:
+; RV32I-NEXT:    mv a2, a1
+; RV32I-NEXT:    bltu a1, a0, .LBB15_2
+; RV32I-NEXT:  .LBB15_4:
+; RV32I-NEXT:    sub a0, a2, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_minmax_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a1, a1
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu a0, a1, .LBB15_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bgeu a1, a0, .LBB15_4
+; RV64I-NEXT:  .LBB15_2:
+; RV64I-NEXT:    subw a0, a2, a0
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB15_3:
+; RV64I-NEXT:    mv a2, a1
+; RV64I-NEXT:    bltu a1, a0, .LBB15_2
+; RV64I-NEXT:  .LBB15_4:
+; RV64I-NEXT:    subw a0, a2, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_minmax_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    minu a2, a0, a1
+; RV32ZBB-NEXT:    maxu a0, a0, a1
+; RV32ZBB-NEXT:    sub a0, a2, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_minmax_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sext.w a1, a1
+; RV64ZBB-NEXT:    sext.w a0, a0
+; RV64ZBB-NEXT:    minu a2, a0, a1
+; RV64ZBB-NEXT:    maxu a0, a0, a1
+; RV64ZBB-NEXT:    subw a0, a2, a0
+; RV64ZBB-NEXT:    ret
+  %min = call i32 @llvm.umin.i32(i32 %a, i32 %b)
+  %max = call i32 @llvm.umax.i32(i32 %a, i32 %b)
+  %sub = sub i32 %min, %max
+  ret i32 %sub
+}
+
+define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: abd_minmax_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    beq a1, a3, .LBB16_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu a6, a1, a3
+; RV32I-NEXT:    j .LBB16_3
+; RV32I-NEXT:  .LBB16_2:
+; RV32I-NEXT:    sltu a6, a0, a2
+; RV32I-NEXT:  .LBB16_3:
+; RV32I-NEXT:    mv a4, a1
+; RV32I-NEXT:    mv a5, a0
+; RV32I-NEXT:    bnez a6, .LBB16_5
+; RV32I-NEXT:  # %bb.4:
+; RV32I-NEXT:    mv a4, a3
+; RV32I-NEXT:    mv a5, a2
+; RV32I-NEXT:  .LBB16_5:
+; RV32I-NEXT:    beq a1, a3, .LBB16_7
+; RV32I-NEXT:  # %bb.6:
+; RV32I-NEXT:    sltu a6, a3, a1
+; RV32I-NEXT:    beqz a6, .LBB16_8
+; RV32I-NEXT:    j .LBB16_9
+; RV32I-NEXT:  .LBB16_7:
+; RV32I-NEXT:    sltu a6, a2, a0
+; RV32I-NEXT:    bnez a6, .LBB16_9
+; RV32I-NEXT:  .LBB16_8:
+; RV32I-NEXT:    mv a1, a3
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:  .LBB16_9:
+; RV32I-NEXT:    sltu a2, a5, a0
+; RV32I-NEXT:    sub a1, a4, a1
+; RV32I-NEXT:    sub a1, a1, a2
+; RV32I-NEXT:    sub a0, a5, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_minmax_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu a0, a1, .LBB16_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bgeu a1, a0, .LBB16_4
+; RV64I-NEXT:  .LBB16_2:
+; RV64I-NEXT:    sub a0, a2, a0
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB16_3:
+; RV64I-NEXT:    mv a2, a1
+; RV64I-NEXT:    bltu a1, a0, .LBB16_2
+; RV64I-NEXT:  .LBB16_4:
+; RV64I-NEXT:    sub a0, a2, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_minmax_i64:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    beq a1, a3, .LBB16_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu a6, a1, a3
+; RV32ZBB-NEXT:    j .LBB16_3
+; RV32ZBB-NEXT:  .LBB16_2:
+; RV32ZBB-NEXT:    sltu a6, a0, a2
+; RV32ZBB-NEXT:  .LBB16_3:
+; RV32ZBB-NEXT:    mv a4, a1
+; RV32ZBB-NEXT:    mv a5, a0
+; RV32ZBB-NEXT:    bnez a6, .LBB16_5
+; RV32ZBB-NEXT:  # %bb.4:
+; RV32ZBB-NEXT:    mv a4, a3
+; RV32ZBB-NEXT:    mv a5, a2
+; RV32ZBB-NEXT:  .LBB16_5:
+; RV32ZBB-NEXT:    beq a1, a3, .LBB16_7
+; RV32ZBB-NEXT:  # %bb.6:
+; RV32ZBB-NEXT:    sltu a6, a3, a1
+; RV32ZBB-NEXT:    beqz a6, .LBB16_8
+; RV32ZBB-NEXT:    j .LBB16_9
+; RV32ZBB-NEXT:  .LBB16_7:
+; RV32ZBB-NEXT:    sltu a6, a2, a0
+; RV32ZBB-NEXT:    bnez a6, .LBB16_9
+; RV32ZBB-NEXT:  .LBB16_8:
+; RV32ZBB-NEXT:    mv a1, a3
+; RV32ZBB-NEXT:    mv a0, a2
+; RV32ZBB-NEXT:  .LBB16_9:
+; RV32ZBB-NEXT:    sltu a2, a5, a0
+; RV32ZBB-NEXT:    sub a1, a4, a1
+; RV32ZBB-NEXT:    sub a1, a1, a2
+; RV32ZBB-NEXT:    sub a0, a5, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_minmax_i64:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    minu a2, a0, a1
+; RV64ZBB-NEXT:    maxu a0, a0, a1
+; RV64ZBB-NEXT:    sub a0, a2, a0
+; RV64ZBB-NEXT:    ret
+  %min = call i64 @llvm.umin.i64(i64 %a, i64 %b)
+  %max = call i64 @llvm.umax.i64(i64 %a, i64 %b)
+  %sub = sub i64 %min, %max
+  ret i64 %sub
+}
+
+define i128 @abd_minmax_i128(i128 %a, i128 %b) nounwind {
+; RV32I-LABEL: abd_minmax_i128:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lw a6, 4(a2)
+; RV32I-NEXT:    lw a3, 4(a1)
+; RV32I-NEXT:    lw a7, 8(a2)
+; RV32I-NEXT:    lw t0, 12(a2)
+; RV32I-NEXT:    lw a5, 12(a1)
+; RV32I-NEXT:    lw a4, 8(a1)
+; RV32I-NEXT:    beq a5, t0, .LBB17_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu t1, a5, t0
+; RV32I-NEXT:    j .LBB17_3
+; RV32I-NEXT:  .LBB17_2:
+; RV32I-NEXT:    sltu t1, a4, a7
+; RV32I-NEXT:  .LBB17_3:
+; RV32I-NEXT:    lw t2, 0(a2)
+; RV32I-NEXT:    lw a1, 0(a1)
+; RV32I-NEXT:    beq a3, a6, .LBB17_5
+; RV32I-NEXT:  # %bb.4:
+; RV32I-NEXT:    sltu t6, a3, a6
+; RV32I-NEXT:    j .LBB17_6
+; RV32I-NEXT:  .LBB17_5:
+; RV32I-NEXT:    sltu t6, a1, t2
+; RV32I-NEXT:  .LBB17_6:
+; RV32I-NEXT:    xor a2, a5, t0
+; RV32I-NEXT:    xor t3, a4, a7
+; RV32I-NEXT:    or t5, t3, a2
+; RV32I-NEXT:    beqz t5, .LBB17_8
+; RV32I-NEXT:  # %bb.7:
+; RV32I-NEXT:    mv t6, t1
+; RV32I-NEXT:  .LBB17_8:
+; RV32I-NEXT:    mv a2, a1
+; RV32I-NEXT:    mv t1, a3
+; RV32I-NEXT:    mv t4, a5
+; RV32I-NEXT:    mv t3, a4
+; RV32I-NEXT:    bnez t6, .LBB17_10
+; RV32I-NEXT:  # %bb.9:
+; RV32I-NEXT:    mv a2, t2
+; RV32I-NEXT:    mv t1, a6
+; RV32I-NEXT:    mv t4, t0
+; RV32I-NEXT:    mv t3, a7
+; RV32I-NEXT:  .LBB17_10:
+; RV32I-NEXT:    beq a5, t0, .LBB17_12
+; RV32I-NEXT:  # %bb.11:
+; RV32I-NEXT:    sltu t6, t0, a5
+; RV32I-NEXT:    j .LBB17_13
+; RV32I-NEXT:  .LBB17_12:
+; RV32I-NEXT:    sltu t6, a7, a4
+; RV32I-NEXT:  .LBB17_13:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    beq a3, a6, .LBB17_15
+; RV32I-NEXT:  # %bb.14:
+; RV32I-NEXT:    sltu s0, a6, a3
+; RV32I-NEXT:    bnez t5, .LBB17_16
+; RV32I-NEXT:    j .LBB17_17
+; RV32I-NEXT:  .LBB17_15:
+; RV32I-NEXT:    sltu s0, t2, a1
+; RV32I-NEXT:    beqz t5, .LBB17_17
+; RV32I-NEXT:  .LBB17_16:
+; RV32I-NEXT:    mv s0, t6
+; RV32I-NEXT:  .LBB17_17:
+; RV32I-NEXT:    bnez s0, .LBB17_19
+; RV32I-NEXT:  # %bb.18:
+; RV32I-NEXT:    mv a1, t2
+; RV32I-NEXT:    mv a3, a6
+; RV32I-NEXT:    mv a5, t0
+; RV32I-NEXT:    mv a4, a7
+; RV32I-NEXT:  .LBB17_19:
+; RV32I-NEXT:    sltu a6, t3, a4
+; RV32I-NEXT:    sub a7, t4, a5
+; RV32I-NEXT:    sltu a5, a2, a1
+; RV32I-NEXT:    sub a6, a7, a6
+; RV32I-NEXT:    mv a7, a5
+; RV32I-NEXT:    beq t1, a3, .LBB17_21
+; RV32I-NEXT:  # %bb.20:
+; RV32I-NEXT:    sltu a7, t1, a3
+; RV32I-NEXT:  .LBB17_21:
+; RV32I-NEXT:    sub a4, t3, a4
+; RV32I-NEXT:    sltu t0, a4, a7
+; RV32I-NEXT:    sub a6, a6, t0
+; RV32I-NEXT:    sub a4, a4, a7
+; RV32I-NEXT:    sub a3, t1, a3
+; RV32I-NEXT:    sub a3, a3, a5
+; RV32I-NEXT:    sub a2, a2, a1
+; RV32I-NEXT:    sw a2, 0(a0)
+; RV32I-NEXT:    sw a3, 4(a0)
+; RV32I-NEXT:    sw a4, 8(a0)
+; RV32I-NEXT:    sw a6, 12(a0)
+; RV32I-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_minmax_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    beq a1, a3, .LBB17_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sltu a6, a1, a3
+; RV64I-NEXT:    j .LBB17_3
+; RV64I-NEXT:  .LBB17_2:
+; RV64I-NEXT:    sltu a6, a0, a2
+; RV64I-NEXT:  .LBB17_3:
+; RV64I-NEXT:    mv a4, a1
+; RV64I-NEXT:    mv a5, a0
+; RV64I-NEXT:    bnez a6, .LBB17_5
+; RV64I-NEXT:  # %bb.4:
+; RV64I-NEXT:    mv a4, a3
+; RV64I-NEXT:    mv a5, a2
+; RV64I-NEXT:  .LBB17_5:
+; RV64I-NEXT:    beq a1, a3, .LBB17_7
+; RV64I-NEXT:  # %bb.6:
+; RV64I-NEXT:    sltu a6, a3, a1
+; RV64I-NEXT:    beqz a6, .LBB17_8
+; RV64I-NEXT:    j .LBB17_9
+; RV64I-NEXT:  .LBB17_7:
+; RV64I-NEXT:    sltu a6, a2, a0
+; RV64I-NEXT:    bnez a6, .LBB17_9
+; RV64I-NEXT:  .LBB17_8:
+; RV64I-NEXT:    mv a1, a3
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:  .LBB17_9:
+; RV64I-NEXT:    sltu a2, a5, a0
+; RV64I-NEXT:    sub a1, a4, a1
+; RV64I-NEXT:    sub a1, a1, a2
+; RV64I-NEXT:    sub a0, a5, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_minmax_i128:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lw a6, 4(a2)
+; RV32ZBB-NEXT:    lw a3, 4(a1)
+; RV32ZBB-NEXT:    lw a7, 8(a2)
+; RV32ZBB-NEXT:    lw t0, 12(a2)
+; RV32ZBB-NEXT:    lw a5, 12(a1)
+; RV32ZBB-NEXT:    lw a4, 8(a1)
+; RV32ZBB-NEXT:    beq a5, t0, .LBB17_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu t1, a5, t0
+; RV32ZBB-NEXT:    j .LBB17_3
+; RV32ZBB-NEXT:  .LBB17_2:
+; RV32ZBB-NEXT:    sltu t1, a4, a7
+; RV32ZBB-NEXT:  .LBB17_3:
+; RV32ZBB-NEXT:    lw t2, 0(a2)
+; RV32ZBB-NEXT:    lw a1, 0(a1)
+; RV32ZBB-NEXT:    beq a3, a6, .LBB17_5
+; RV32ZBB-NEXT:  # %bb.4:
+; RV32ZBB-NEXT:    sltu t6, a3, a6
+; RV32ZBB-NEXT:    j .LBB17_6
+; RV32ZBB-NEXT:  .LBB17_5:
+; RV32ZBB-NEXT:    sltu t6, a1, t2
+; RV32ZBB-NEXT:  .LBB17_6:
+; RV32ZBB-NEXT:    xor a2, a5, t0
+; RV32ZBB-NEXT:    xor t3, a4, a7
+; RV32ZBB-NEXT:    or t5, t3, a2
+; RV32ZBB-NEXT:    beqz t5, .LBB17_8
+; RV32ZBB-NEXT:  # %bb.7:
+; RV32ZBB-NEXT:    mv t6, t1
+; RV32ZBB-NEXT:  .LBB17_8:
+; RV32ZBB-NEXT:    mv a2, a1
+; RV32ZBB-NEXT:    mv t1, a3
+; RV32ZBB-NEXT:    mv t4, a5
+; RV32ZBB-NEXT:    mv t3, a4
+; RV32ZBB-NEXT:    bnez t6, .LBB17_10
+; RV32ZBB-NEXT:  # %bb.9:
+; RV32ZBB-NEXT:    mv a2, t2
+; RV32ZBB-NEXT:    mv t1, a6
+; RV32ZBB-NEXT:    mv t4, t0
+; RV32ZBB-NEXT:    mv t3, a7
+; RV32ZBB-NEXT:  .LBB17_10:
+; RV32ZBB-NEXT:    beq a5, t0, .LBB17_12
+; RV32ZBB-NEXT:  # %bb.11:
+; RV32ZBB-NEXT:    sltu t6, t0, a5
+; RV32ZBB-NEXT:    j .LBB17_13
+; RV32ZBB-NEXT:  .LBB17_12:
+; RV32ZBB-NEXT:    sltu t6, a7, a4
+; RV32ZBB-NEXT:  .LBB17_13:
+; RV32ZBB-NEXT:    addi sp, sp, -16
+; RV32ZBB-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT:    beq a3, a6, .LBB17_15
+; RV32ZBB-NEXT:  # %bb.14:
+; RV32ZBB-NEXT:    sltu s0, a6, a3
+; RV32ZBB-NEXT:    bnez t5, .LBB17_16
+; RV32ZBB-NEXT:    j .LBB17_17
+; RV32ZBB-NEXT:  .LBB17_15:
+; RV32ZBB-NEXT:    sltu s0, t2, a1
+; RV32ZBB-NEXT:    beqz t5, .LBB17_17
+; RV32ZBB-NEXT:  .LBB17_16:
+; RV32ZBB-NEXT:    mv s0, t6
+; RV32ZBB-NEXT:  .LBB17_17:
+; RV32ZBB-NEXT:    bnez s0, .LBB17_19
+; RV32ZBB-NEXT:  # %bb.18:
+; RV32ZBB-NEXT:    mv a1, t2
+; RV32ZBB-NEXT:    mv a3, a6
+; RV32ZBB-NEXT:    mv a5, t0
+; RV32ZBB-NEXT:    mv a4, a7
+; RV32ZBB-NEXT:  .LBB17_19:
+; RV32ZBB-NEXT:    sltu a6, t3, a4
+; RV32ZBB-NEXT:    sub a7, t4, a5
+; RV32ZBB-NEXT:    sltu a5, a2, a1
+; RV32ZBB-NEXT:    sub a6, a7, a6
+; RV32ZBB-NEXT:    mv a7, a5
+; RV32ZBB-NEXT:    beq t1, a3, .LBB17_21
+; RV32ZBB-NEXT:  # %bb.20:
+; RV32ZBB-NEXT:    sltu a7, t1, a3
+; RV32ZBB-NEXT:  .LBB17_21:
+; RV32ZBB-NEXT:    sub a4, t3, a4
+; RV32ZBB-NEXT:    sltu t0, a4, a7
+; RV32ZBB-NEXT:    sub a6, a6, t0
+; RV32ZBB-NEXT:    sub a4, a4, a7
+; RV32ZBB-NEXT:    sub a3, t1, a3
+; RV32ZBB-NEXT:    sub a3, a3, a5
+; RV32ZBB-NEXT:    sub a2, a2, a1
+; RV32ZBB-NEXT:    sw a2, 0(a0)
+; RV32ZBB-NEXT:    sw a3, 4(a0)
+; RV32ZBB-NEXT:    sw a4, 8(a0)
+; RV32ZBB-NEXT:    sw a6, 12(a0)
+; RV32ZBB-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT:    addi sp, sp, 16
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_minmax_i128:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    beq a1, a3, .LBB17_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    sltu a6, a1, a3
+; RV64ZBB-NEXT:    j .LBB17_3
+; RV64ZBB-NEXT:  .LBB17_2:
+; RV64ZBB-NEXT:    sltu a6, a0, a2
+; RV64ZBB-NEXT:  .LBB17_3:
+; RV64ZBB-NEXT:    mv a4, a1
+; RV64ZBB-NEXT:    mv a5, a0
+; RV64ZBB-NEXT:    bnez a6, .LBB17_5
+; RV64ZBB-NEXT:  # %bb.4:
+; RV64ZBB-NEXT:    mv a4, a3
+; RV64ZBB-NEXT:    mv a5, a2
+; RV64ZBB-NEXT:  .LBB17_5:
+; RV64ZBB-NEXT:    beq a1, a3, .LBB17_7
+; RV64ZBB-NEXT:  # %bb.6:
+; RV64ZBB-NEXT:    sltu a6, a3, a1
+; RV64ZBB-NEXT:    beqz a6, .LBB17_8
+; RV64ZBB-NEXT:    j .LBB17_9
+; RV64ZBB-NEXT:  .LBB17_7:
+; RV64ZBB-NEXT:    sltu a6, a2, a0
+; RV64ZBB-NEXT:    bnez a6, .LBB17_9
+; RV64ZBB-NEXT:  .LBB17_8:
+; RV64ZBB-NEXT:    mv a1, a3
+; RV64ZBB-NEXT:    mv a0, a2
+; RV64ZBB-NEXT:  .LBB17_9:
+; RV64ZBB-NEXT:    sltu a2, a5, a0
+; RV64ZBB-NEXT:    sub a1, a4, a1
+; RV64ZBB-NEXT:    sub a1, a1, a2
+; RV64ZBB-NEXT:    sub a0, a5, a0
+; RV64ZBB-NEXT:    ret
+  %min = call i128 @llvm.umin.i128(i128 %a, i128 %b)
+  %max = call i128 @llvm.umax.i128(i128 %a, i128 %b)
+  %sub = sub i128 %min, %max
+  ret i128 %sub
+}
+
+;
+; select(icmp(a,b),sub(a,b),sub(b,a)) -> nabds(a,b)
+;
+
+define i8 @abd_cmp_i8(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: abd_cmp_i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    andi a2, a0, 255
+; CHECK-NEXT:    andi a3, a1, 255
+; CHECK-NEXT:    bgeu a3, a2, .LBB18_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    sub a0, a1, a0
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB18_2:
+; CHECK-NEXT:    sub a0, a0, a1
+; CHECK-NEXT:    ret
+; NOZBB-LABEL: abd_cmp_i8:
+; NOZBB:       # %bb.0:
+; NOZBB-NEXT:    andi a2, a0, 255
+; NOZBB-NEXT:    andi a3, a1, 255
+; NOZBB-NEXT:    bgeu a3, a2, .LBB18_2
+; NOZBB-NEXT:  # %bb.1:
+; NOZBB-NEXT:    sub a0, a1, a0
+; NOZBB-NEXT:    ret
+; NOZBB-NEXT:  .LBB18_2:
+; NOZBB-NEXT:    sub a0, a0, a1
+; NOZBB-NEXT:    ret
+;
+; ZBB-LABEL: abd_cmp_i8:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    andi a2, a0, 255
+; ZBB-NEXT:    andi a3, a1, 255
+; ZBB-NEXT:    bgeu a3, a2, .LBB18_2
+; ZBB-NEXT:  # %bb.1:
+; ZBB-NEXT:    sub a0, a1, a0
+; ZBB-NEXT:    ret
+; ZBB-NEXT:  .LBB18_2:
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    ret
+  %cmp = icmp ule i8 %a, %b
+  %ab = sub i8 %a, %b
+  %ba = sub i8 %b, %a
+  %sel = select i1 %cmp, i8 %ab, i8 %ba
+  ret i8 %sel
+}
+
+define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_cmp_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a2, 16
+; RV32I-NEXT:    addi a2, a2, -1
+; RV32I-NEXT:    and a3, a1, a2
+; RV32I-NEXT:    and a2, a0, a2
+; RV32I-NEXT:    bltu a2, a3, .LBB19_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB19_2:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_cmp_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a2, 16
+; RV64I-NEXT:    addiw a2, a2, -1
+; RV64I-NEXT:    and a3, a1, a2
+; RV64I-NEXT:    and a2, a0, a2
+; RV64I-NEXT:    bltu a2, a3, .LBB19_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB19_2:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_cmp_i16:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    zext.h a2, a1
+; ZBB-NEXT:    zext.h a3, a0
+; ZBB-NEXT:    bltu a3, a2, .LBB19_2
+; ZBB-NEXT:  # %bb.1:
+; ZBB-NEXT:    sub a0, a1, a0
+; ZBB-NEXT:    ret
+; ZBB-NEXT:  .LBB19_2:
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    ret
+  %cmp = icmp ult i16 %a, %b
+  %ab = sub i16 %a, %b
+  %ba = sub i16 %b, %a
+  %sel = select i1 %cmp, i16 %ab, i16 %ba
+  ret i16 %sel
+}
+
+define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_cmp_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    bgeu a0, a1, .LBB20_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB20_2:
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_cmp_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a2, a1
+; RV64I-NEXT:    sext.w a3, a0
+; RV64I-NEXT:    bgeu a3, a2, .LBB20_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB20_2:
+; RV64I-NEXT:    subw a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_cmp_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    bgeu a0, a1, .LBB20_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    ret
+; RV32ZBB-NEXT:  .LBB20_2:
+; RV32ZBB-NEXT:    sub a0, a1, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_cmp_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sext.w a2, a1
+; RV64ZBB-NEXT:    sext.w a3, a0
+; RV64ZBB-NEXT:    bgeu a3, a2, .LBB20_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    subw a0, a0, a1
+; RV64ZBB-NEXT:    ret
+; RV64ZBB-NEXT:  .LBB20_2:
+; RV64ZBB-NEXT:    subw a0, a1, a0
+; RV64ZBB-NEXT:    ret
+  %cmp = icmp uge i32 %a, %b
+  %ab = sub i32 %a, %b
+  %ba = sub i32 %b, %a
+  %sel = select i1 %cmp, i32 %ba, i32 %ab
+  ret i32 %sel
+}
+
+define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: abd_cmp_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltu a4, a0, a2
+; RV32I-NEXT:    mv a5, a4
+; RV32I-NEXT:    beq a1, a3, .LBB21_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu a5, a1, a3
+; RV32I-NEXT:  .LBB21_2:
+; RV32I-NEXT:    bnez a5, .LBB21_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB21_4:
+; RV32I-NEXT:    sltu a4, a2, a0
+; RV32I-NEXT:    sub a1, a3, a1
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a0, a2, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_cmp_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    bltu a0, a1, .LBB21_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB21_2:
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_cmp_i64:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sltu a4, a0, a2
+; RV32ZBB-NEXT:    mv a5, a4
+; RV32ZBB-NEXT:    beq a1, a3, .LBB21_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu a5, a1, a3
+; RV32ZBB-NEXT:  .LBB21_2:
+; RV32ZBB-NEXT:    bnez a5, .LBB21_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    sub a1, a1, a3
+; RV32ZBB-NEXT:    sub a1, a1, a4
+; RV32ZBB-NEXT:    sub a0, a0, a2
+; RV32ZBB-NEXT:    ret
+; RV32ZBB-NEXT:  .LBB21_4:
+; RV32ZBB-NEXT:    sltu a4, a2, a0
+; RV32ZBB-NEXT:    sub a1, a3, a1
+; RV32ZBB-NEXT:    sub a1, a1, a4
+; RV32ZBB-NEXT:    sub a0, a2, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_cmp_i64:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    bltu a0, a1, .LBB21_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    ret
+; RV64ZBB-NEXT:  .LBB21_2:
+; RV64ZBB-NEXT:    sub a0, a1, a0
+; RV64ZBB-NEXT:    ret
+  %cmp = icmp ult i64 %a, %b
+  %ab = sub i64 %a, %b
+  %ba = sub i64 %b, %a
+  %sel = select i1 %cmp, i64 %ba, i64 %ab
+  ret i64 %sel
+}
+
+define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
+; RV32I-LABEL: abd_cmp_i128:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lw a3, 0(a2)
+; RV32I-NEXT:    lw a4, 0(a1)
+; RV32I-NEXT:    lw a5, 4(a2)
+; RV32I-NEXT:    lw a6, 8(a2)
+; RV32I-NEXT:    lw a7, 8(a1)
+; RV32I-NEXT:    lw a2, 12(a2)
+; RV32I-NEXT:    lw t0, 12(a1)
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    sltu t1, a7, a6
+; RV32I-NEXT:    mv t4, t1
+; RV32I-NEXT:    beq t0, a2, .LBB22_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu t4, t0, a2
+; RV32I-NEXT:  .LBB22_2:
+; RV32I-NEXT:    sltu t2, a4, a3
+; RV32I-NEXT:    mv t3, t2
+; RV32I-NEXT:    beq a1, a5, .LBB22_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    sltu t3, a1, a5
+; RV32I-NEXT:  .LBB22_4:
+; RV32I-NEXT:    xor t5, t0, a2
+; RV32I-NEXT:    xor t6, a7, a6
+; RV32I-NEXT:    or t5, t6, t5
+; RV32I-NEXT:    mv t6, t3
+; RV32I-NEXT:    beqz t5, .LBB22_6
+; RV32I-NEXT:  # %bb.5:
+; RV32I-NEXT:    mv t6, t4
+; RV32I-NEXT:  .LBB22_6:
+; RV32I-NEXT:    sltu t4, a3, a4
+; RV32I-NEXT:    mv t5, t4
+; RV32I-NEXT:    beq a1, a5, .LBB22_8
+; RV32I-NEXT:  # %bb.7:
+; RV32I-NEXT:    sltu t5, a5, a1
+; RV32I-NEXT:  .LBB22_8:
+; RV32I-NEXT:    bnez t6, .LBB22_10
+; RV32I-NEXT:  # %bb.9:
+; RV32I-NEXT:    sub a2, t0, a2
+; RV32I-NEXT:    sub a6, a7, a6
+; RV32I-NEXT:    sub a2, a2, t1
+; RV32I-NEXT:    sltu a7, a6, t3
+; RV32I-NEXT:    sub a1, a1, a5
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    sub a6, a6, t3
+; RV32I-NEXT:    sub a1, a1, t2
+; RV32I-NEXT:    sub a3, a4, a3
+; RV32I-NEXT:    j .LBB22_11
+; RV32I-NEXT:  .LBB22_10:
+; RV32I-NEXT:    sltu t1, a6, a7
+; RV32I-NEXT:    sub a2, a2, t0
+; RV32I-NEXT:    sub a2, a2, t1
+; RV32I-NEXT:    sub a6, a6, a7
+; RV32I-NEXT:    sltu a7, a6, t5
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    sub a6, a6, t5
+; RV32I-NEXT:    sub a5, a5, a1
+; RV32I-NEXT:    sub a1, a5, t4
+; RV32I-NEXT:    sub a3, a3, a4
+; RV32I-NEXT:  .LBB22_11:
+; RV32I-NEXT:    sw a6, 8(a0)
+; RV32I-NEXT:    sw a1, 4(a0)
+; RV32I-NEXT:    sw a3, 0(a0)
+; RV32I-NEXT:    sw a2, 12(a0)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_cmp_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sltu a4, a0, a2
+; RV64I-NEXT:    mv a5, a4
+; RV64I-NEXT:    beq a1, a3, .LBB22_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sltu a5, a1, a3
+; RV64I-NEXT:  .LBB22_2:
+; RV64I-NEXT:    bnez a5, .LBB22_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    sub a1, a1, a3
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    sub a0, a0, a2
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB22_4:
+; RV64I-NEXT:    sltu a4, a2, a0
+; RV64I-NEXT:    sub a1, a3, a1
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    sub a0, a2, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_cmp_i128:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lw a3, 0(a2)
+; RV32ZBB-NEXT:    lw a4, 0(a1)
+; RV32ZBB-NEXT:    lw a5, 4(a2)
+; RV32ZBB-NEXT:    lw a6, 8(a2)
+; RV32ZBB-NEXT:    lw a7, 8(a1)
+; RV32ZBB-NEXT:    lw a2, 12(a2)
+; RV32ZBB-NEXT:    lw t0, 12(a1)
+; RV32ZBB-NEXT:    lw a1, 4(a1)
+; RV32ZBB-NEXT:    sltu t1, a7, a6
+; RV32ZBB-NEXT:    mv t4, t1
+; RV32ZBB-NEXT:    beq t0, a2, .LBB22_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu t4, t0, a2
+; RV32ZBB-NEXT:  .LBB22_2:
+; RV32ZBB-NEXT:    sltu t2, a4, a3
+; RV32ZBB-NEXT:    mv t3, t2
+; RV32ZBB-NEXT:    beq a1, a5, .LBB22_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    sltu t3, a1, a5
+; RV32ZBB-NEXT:  .LBB22_4:
+; RV32ZBB-NEXT:    xor t5, t0, a2
+; RV32ZBB-NEXT:    xor t6, a7, a6
+; RV32ZBB-NEXT:    or t5, t6, t5
+; RV32ZBB-NEXT:    mv t6, t3
+; RV32ZBB-NEXT:    beqz t5, .LBB22_6
+; RV32ZBB-NEXT:  # %bb.5:
+; RV32ZBB-NEXT:    mv t6, t4
+; RV32ZBB-NEXT:  .LBB22_6:
+; RV32ZBB-NEXT:    sltu t4, a3, a4
+; RV32ZBB-NEXT:    mv t5, t4
+; RV32ZBB-NEXT:    beq a1, a5, .LBB22_8
+; RV32ZBB-NEXT:  # %bb.7:
+; RV32ZBB-NEXT:    sltu t5, a5, a1
+; RV32ZBB-NEXT:  .LBB22_8:
+; RV32ZBB-NEXT:    bnez t6, .LBB22_10
+; RV32ZBB-NEXT:  # %bb.9:
+; RV32ZBB-NEXT:    sub a2, t0, a2
+; RV32ZBB-NEXT:    sub a6, a7, a6
+; RV32ZBB-NEXT:    sub a2, a2, t1
+; RV32ZBB-NEXT:    sltu a7, a6, t3
+; RV32ZBB-NEXT:    sub a1, a1, a5
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    sub a6, a6, t3
+; RV32ZBB-NEXT:    sub a1, a1, t2
+; RV32ZBB-NEXT:    sub a3, a4, a3
+; RV32ZBB-NEXT:    j .LBB22_11
+; RV32ZBB-NEXT:  .LBB22_10:
+; RV32ZBB-NEXT:    sltu t1, a6, a7
+; RV32ZBB-NEXT:    sub a2, a2, t0
+; RV32ZBB-NEXT:    sub a2, a2, t1
+; RV32ZBB-NEXT:    sub a6, a6, a7
+; RV32ZBB-NEXT:    sltu a7, a6, t5
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    sub a6, a6, t5
+; RV32ZBB-NEXT:    sub a5, a5, a1
+; RV32ZBB-NEXT:    sub a1, a5, t4
+; RV32ZBB-NEXT:    sub a3, a3, a4
+; RV32ZBB-NEXT:  .LBB22_11:
+; RV32ZBB-NEXT:    sw a6, 8(a0)
+; RV32ZBB-NEXT:    sw a1, 4(a0)
+; RV32ZBB-NEXT:    sw a3, 0(a0)
+; RV32ZBB-NEXT:    sw a2, 12(a0)
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_cmp_i128:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sltu a4, a0, a2
+; RV64ZBB-NEXT:    mv a5, a4
+; RV64ZBB-NEXT:    beq a1, a3, .LBB22_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    sltu a5, a1, a3
+; RV64ZBB-NEXT:  .LBB22_2:
+; RV64ZBB-NEXT:    bnez a5, .LBB22_4
+; RV64ZBB-NEXT:  # %bb.3:
+; RV64ZBB-NEXT:    sub a1, a1, a3
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a0, a0, a2
+; RV64ZBB-NEXT:    ret
+; RV64ZBB-NEXT:  .LBB22_4:
+; RV64ZBB-NEXT:    sltu a4, a2, a0
+; RV64ZBB-NEXT:    sub a1, a3, a1
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a0, a2, a0
+; RV64ZBB-NEXT:    ret
+  %cmp = icmp ult i128 %a, %b
+  %ab = sub i128 %a, %b
+  %ba = sub i128 %b, %a
+  %sel = select i1 %cmp, i128 %ba, i128 %ab
+  ret i128 %sel
+}
+
+declare i8 @llvm.abs.i8(i8, i1)
+declare i16 @llvm.abs.i16(i16, i1)
+declare i32 @llvm.abs.i32(i32, i1)
+declare i64 @llvm.abs.i64(i64, i1)
+declare i128 @llvm.abs.i128(i128, i1)
+
+declare i8 @llvm.umax.i8(i8, i8)
+declare i16 @llvm.umax.i16(i16, i16)
+declare i32 @llvm.umax.i32(i32, i32)
+declare i64 @llvm.umax.i64(i64, i64)
+
+declare i8 @llvm.umin.i8(i8, i8)
+declare i16 @llvm.umin.i16(i16, i16)
+declare i32 @llvm.umin.i32(i32, i32)
+declare i64 @llvm.umin.i64(i64, i64)

diff  --git a/llvm/test/CodeGen/RISCV/abdu.ll b/llvm/test/CodeGen/RISCV/abdu.ll
new file mode 100644
index 0000000000000..d4b87366bab6e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/abdu.ll
@@ -0,0 +1,1804 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv32 | FileCheck %s --check-prefixes=CHECK,NOZBB,RV32I
+; RUN: llc < %s -mtriple=riscv64 | FileCheck %s --check-prefixes=CHECK,NOZBB,RV64I
+; RUN: llc < %s -mtriple=riscv32 -mattr=+zbb | FileCheck %s --check-prefixes=CHECK,ZBB,RV32ZBB
+; RUN: llc < %s -mtriple=riscv64 -mattr=+zbb | FileCheck %s --check-prefixes=CHECK,ZBB,RV64ZBB
+
+;
+; trunc(abs(sub(zext(a),zext(b)))) -> abdu(a,b)
+;
+
+define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind {
+; RV32I-LABEL: abd_ext_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 255
+; RV32I-NEXT:    andi a1, a1, 255
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 255
+; RV64I-NEXT:    andi a1, a1, 255
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_ext_i8:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    andi a0, a0, 255
+; ZBB-NEXT:    andi a1, a1, 255
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    neg a1, a0
+; ZBB-NEXT:    max a0, a0, a1
+; ZBB-NEXT:    ret
+  %aext = zext i8 %a to i64
+  %bext = zext i8 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %trunc = trunc i64 %abs to i8
+  ret i8 %trunc
+}
+
+define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_ext_i8_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 255
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srli a1, a1, 16
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i8_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 255
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srli a1, a1, 48
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_ext_i8_i16:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    andi a0, a0, 255
+; ZBB-NEXT:    zext.h a1, a1
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    neg a1, a0
+; ZBB-NEXT:    max a0, a0, a1
+; ZBB-NEXT:    ret
+  %aext = zext i8 %a to i64
+  %bext = zext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %trunc = trunc i64 %abs to i8
+  ret i8 %trunc
+}
+
+define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind {
+; RV32I-LABEL: abd_ext_i8_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 255
+; RV32I-NEXT:    andi a1, a1, 255
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i8_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 255
+; RV64I-NEXT:    andi a1, a1, 255
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_ext_i8_undef:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    andi a0, a0, 255
+; ZBB-NEXT:    andi a1, a1, 255
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    neg a1, a0
+; ZBB-NEXT:    max a0, a0, a1
+; ZBB-NEXT:    ret
+  %aext = zext i8 %a to i64
+  %bext = zext i8 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %trunc = trunc i64 %abs to i8
+  ret i8 %trunc
+}
+
+define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_ext_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a2, 16
+; RV32I-NEXT:    addi a2, a2, -1
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a2, 16
+; RV64I-NEXT:    addiw a2, a2, -1
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_ext_i16:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    zext.h a0, a0
+; ZBB-NEXT:    zext.h a1, a1
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    neg a1, a0
+; ZBB-NEXT:    max a0, a0, a1
+; ZBB-NEXT:    ret
+  %aext = zext i16 %a to i64
+  %bext = zext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %trunc = trunc i64 %abs to i16
+  ret i16 %trunc
+}
+
+define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_ext_i16_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
+; RV32I-NEXT:    sltu a2, a0, a1
+; RV32I-NEXT:    neg a2, a2
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    bgez a2, .LBB4_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:  .LBB4_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i16_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
+; RV64I-NEXT:    slli a1, a1, 32
+; RV64I-NEXT:    srli a1, a1, 32
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i16_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    zext.h a0, a0
+; RV32ZBB-NEXT:    sltu a2, a0, a1
+; RV32ZBB-NEXT:    neg a2, a2
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    bgez a2, .LBB4_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:  .LBB4_2:
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i16_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    zext.h a0, a0
+; RV64ZBB-NEXT:    slli a1, a1, 32
+; RV64ZBB-NEXT:    srli a1, a1, 32
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    max a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %aext = zext i16 %a to i64
+  %bext = zext i32 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %trunc = trunc i64 %abs to i16
+  ret i16 %trunc
+}
+
+define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_ext_i16_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a2, 16
+; RV32I-NEXT:    addi a2, a2, -1
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i16_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a2, 16
+; RV64I-NEXT:    addiw a2, a2, -1
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_ext_i16_undef:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    zext.h a0, a0
+; ZBB-NEXT:    zext.h a1, a1
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    neg a1, a0
+; ZBB-NEXT:    max a0, a0, a1
+; ZBB-NEXT:    ret
+  %aext = zext i16 %a to i64
+  %bext = zext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %trunc = trunc i64 %abs to i16
+  ret i16 %trunc
+}
+
+define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_ext_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltu a2, a0, a1
+; RV32I-NEXT:    neg a2, a2
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    bgez a2, .LBB6_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:  .LBB6_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    slli a1, a1, 32
+; RV64I-NEXT:    srli a1, a1, 32
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sltu a2, a0, a1
+; RV32ZBB-NEXT:    neg a2, a2
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    bgez a2, .LBB6_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:  .LBB6_2:
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    slli a0, a0, 32
+; RV64ZBB-NEXT:    srli a0, a0, 32
+; RV64ZBB-NEXT:    slli a1, a1, 32
+; RV64ZBB-NEXT:    srli a1, a1, 32
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    max a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %aext = zext i32 %a to i64
+  %bext = zext i32 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %trunc = trunc i64 %abs to i32
+  ret i32 %trunc
+}
+
+define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_ext_i32_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srli a1, a1, 16
+; RV32I-NEXT:    sltu a2, a0, a1
+; RV32I-NEXT:    neg a2, a2
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    bgez a2, .LBB7_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:  .LBB7_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i32_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srli a1, a1, 48
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i32_i16:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    zext.h a1, a1
+; RV32ZBB-NEXT:    sltu a2, a0, a1
+; RV32ZBB-NEXT:    neg a2, a2
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    bgez a2, .LBB7_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:  .LBB7_2:
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i32_i16:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    slli a0, a0, 32
+; RV64ZBB-NEXT:    srli a0, a0, 32
+; RV64ZBB-NEXT:    zext.h a1, a1
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    max a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %aext = zext i32 %a to i64
+  %bext = zext i16 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
+  %trunc = trunc i64 %abs to i32
+  ret i32 %trunc
+}
+
+define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_ext_i32_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltu a2, a0, a1
+; RV32I-NEXT:    neg a2, a2
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    bgez a2, .LBB8_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:  .LBB8_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i32_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    slli a1, a1, 32
+; RV64I-NEXT:    srli a1, a1, 32
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srai a1, a0, 63
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i32_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sltu a2, a0, a1
+; RV32ZBB-NEXT:    neg a2, a2
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    bgez a2, .LBB8_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:  .LBB8_2:
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i32_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    slli a0, a0, 32
+; RV64ZBB-NEXT:    srli a0, a0, 32
+; RV64ZBB-NEXT:    slli a1, a1, 32
+; RV64ZBB-NEXT:    srli a1, a1, 32
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    neg a1, a0
+; RV64ZBB-NEXT:    max a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %aext = zext i32 %a to i64
+  %bext = zext i32 %b to i64
+  %sub = sub i64 %aext, %bext
+  %abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
+  %trunc = trunc i64 %abs to i32
+  ret i32 %trunc
+}
+
+define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: abd_ext_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltu a4, a0, a2
+; RV32I-NEXT:    mv a5, a4
+; RV32I-NEXT:    beq a1, a3, .LBB9_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu a5, a1, a3
+; RV32I-NEXT:  .LBB9_2:
+; RV32I-NEXT:    neg a5, a5
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    bgez a5, .LBB9_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    snez a2, a0
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:  .LBB9_4:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sltu a2, a0, a1
+; RV64I-NEXT:    neg a2, a2
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    bgez a2, .LBB9_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:  .LBB9_2:
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i64:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sltu a4, a0, a2
+; RV32ZBB-NEXT:    mv a5, a4
+; RV32ZBB-NEXT:    beq a1, a3, .LBB9_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu a5, a1, a3
+; RV32ZBB-NEXT:  .LBB9_2:
+; RV32ZBB-NEXT:    neg a5, a5
+; RV32ZBB-NEXT:    sub a1, a1, a3
+; RV32ZBB-NEXT:    sub a1, a1, a4
+; RV32ZBB-NEXT:    sub a0, a0, a2
+; RV32ZBB-NEXT:    bgez a5, .LBB9_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    snez a2, a0
+; RV32ZBB-NEXT:    add a1, a1, a2
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:  .LBB9_4:
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i64:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sltu a2, a0, a1
+; RV64ZBB-NEXT:    neg a2, a2
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    bgez a2, .LBB9_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:  .LBB9_2:
+; RV64ZBB-NEXT:    ret
+  %aext = zext i64 %a to i128
+  %bext = zext i64 %b to i128
+  %sub = sub i128 %aext, %bext
+  %abs = call i128 @llvm.abs.i128(i128 %sub, i1 false)
+  %trunc = trunc i128 %abs to i64
+  ret i64 %trunc
+}
+
+define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: abd_ext_i64_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltu a4, a0, a2
+; RV32I-NEXT:    mv a5, a4
+; RV32I-NEXT:    beq a1, a3, .LBB10_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu a5, a1, a3
+; RV32I-NEXT:  .LBB10_2:
+; RV32I-NEXT:    neg a5, a5
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    bgez a5, .LBB10_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    snez a2, a0
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:  .LBB10_4:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i64_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sltu a2, a0, a1
+; RV64I-NEXT:    neg a2, a2
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    bgez a2, .LBB10_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:  .LBB10_2:
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i64_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sltu a4, a0, a2
+; RV32ZBB-NEXT:    mv a5, a4
+; RV32ZBB-NEXT:    beq a1, a3, .LBB10_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu a5, a1, a3
+; RV32ZBB-NEXT:  .LBB10_2:
+; RV32ZBB-NEXT:    neg a5, a5
+; RV32ZBB-NEXT:    sub a1, a1, a3
+; RV32ZBB-NEXT:    sub a1, a1, a4
+; RV32ZBB-NEXT:    sub a0, a0, a2
+; RV32ZBB-NEXT:    bgez a5, .LBB10_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    snez a2, a0
+; RV32ZBB-NEXT:    add a1, a1, a2
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:    neg a0, a0
+; RV32ZBB-NEXT:  .LBB10_4:
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i64_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sltu a2, a0, a1
+; RV64ZBB-NEXT:    neg a2, a2
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    bgez a2, .LBB10_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:  .LBB10_2:
+; RV64ZBB-NEXT:    ret
+  %aext = zext i64 %a to i128
+  %bext = zext i64 %b to i128
+  %sub = sub i128 %aext, %bext
+  %abs = call i128 @llvm.abs.i128(i128 %sub, i1 true)
+  %trunc = trunc i128 %abs to i64
+  ret i64 %trunc
+}
+
+define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind {
+; RV32I-LABEL: abd_ext_i128:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lw a3, 0(a2)
+; RV32I-NEXT:    lw a4, 0(a1)
+; RV32I-NEXT:    lw a5, 4(a2)
+; RV32I-NEXT:    lw a6, 8(a2)
+; RV32I-NEXT:    lw a7, 8(a1)
+; RV32I-NEXT:    lw a2, 12(a2)
+; RV32I-NEXT:    lw t0, 12(a1)
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    sltu t1, a7, a6
+; RV32I-NEXT:    mv t4, t1
+; RV32I-NEXT:    beq t0, a2, .LBB11_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu t4, t0, a2
+; RV32I-NEXT:  .LBB11_2:
+; RV32I-NEXT:    sltu t2, a4, a3
+; RV32I-NEXT:    mv t3, t2
+; RV32I-NEXT:    beq a1, a5, .LBB11_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    sltu t3, a1, a5
+; RV32I-NEXT:  .LBB11_4:
+; RV32I-NEXT:    xor t5, t0, a2
+; RV32I-NEXT:    xor t6, a7, a6
+; RV32I-NEXT:    or t6, t6, t5
+; RV32I-NEXT:    mv t5, t3
+; RV32I-NEXT:    beqz t6, .LBB11_6
+; RV32I-NEXT:  # %bb.5:
+; RV32I-NEXT:    mv t5, t4
+; RV32I-NEXT:  .LBB11_6:
+; RV32I-NEXT:    neg t4, t5
+; RV32I-NEXT:    sub a2, t0, a2
+; RV32I-NEXT:    sub a2, a2, t1
+; RV32I-NEXT:    sub a6, a7, a6
+; RV32I-NEXT:    sltu a7, a6, t3
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    sub a6, a6, t3
+; RV32I-NEXT:    sub a1, a1, a5
+; RV32I-NEXT:    sub a1, a1, t2
+; RV32I-NEXT:    sub a4, a4, a3
+; RV32I-NEXT:    bgez t4, .LBB11_8
+; RV32I-NEXT:  # %bb.7:
+; RV32I-NEXT:    snez a3, a1
+; RV32I-NEXT:    snez a5, a4
+; RV32I-NEXT:    or a3, a5, a3
+; RV32I-NEXT:    neg a7, a6
+; RV32I-NEXT:    sltu t0, a7, a3
+; RV32I-NEXT:    snez a6, a6
+; RV32I-NEXT:    add a2, a2, a6
+; RV32I-NEXT:    neg a2, a2
+; RV32I-NEXT:    sub a2, a2, t0
+; RV32I-NEXT:    sub a6, a7, a3
+; RV32I-NEXT:    neg a4, a4
+; RV32I-NEXT:    add a1, a1, a5
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:  .LBB11_8:
+; RV32I-NEXT:    sw a1, 4(a0)
+; RV32I-NEXT:    sw a4, 0(a0)
+; RV32I-NEXT:    sw a6, 8(a0)
+; RV32I-NEXT:    sw a2, 12(a0)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sltu a4, a0, a2
+; RV64I-NEXT:    mv a5, a4
+; RV64I-NEXT:    beq a1, a3, .LBB11_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sltu a5, a1, a3
+; RV64I-NEXT:  .LBB11_2:
+; RV64I-NEXT:    neg a5, a5
+; RV64I-NEXT:    sub a1, a1, a3
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    sub a0, a0, a2
+; RV64I-NEXT:    bgez a5, .LBB11_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    snez a2, a0
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:  .LBB11_4:
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i128:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lw a3, 0(a2)
+; RV32ZBB-NEXT:    lw a4, 0(a1)
+; RV32ZBB-NEXT:    lw a5, 4(a2)
+; RV32ZBB-NEXT:    lw a6, 8(a2)
+; RV32ZBB-NEXT:    lw a7, 8(a1)
+; RV32ZBB-NEXT:    lw a2, 12(a2)
+; RV32ZBB-NEXT:    lw t0, 12(a1)
+; RV32ZBB-NEXT:    lw a1, 4(a1)
+; RV32ZBB-NEXT:    sltu t1, a7, a6
+; RV32ZBB-NEXT:    mv t4, t1
+; RV32ZBB-NEXT:    beq t0, a2, .LBB11_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu t4, t0, a2
+; RV32ZBB-NEXT:  .LBB11_2:
+; RV32ZBB-NEXT:    sltu t2, a4, a3
+; RV32ZBB-NEXT:    mv t3, t2
+; RV32ZBB-NEXT:    beq a1, a5, .LBB11_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    sltu t3, a1, a5
+; RV32ZBB-NEXT:  .LBB11_4:
+; RV32ZBB-NEXT:    xor t5, t0, a2
+; RV32ZBB-NEXT:    xor t6, a7, a6
+; RV32ZBB-NEXT:    or t6, t6, t5
+; RV32ZBB-NEXT:    mv t5, t3
+; RV32ZBB-NEXT:    beqz t6, .LBB11_6
+; RV32ZBB-NEXT:  # %bb.5:
+; RV32ZBB-NEXT:    mv t5, t4
+; RV32ZBB-NEXT:  .LBB11_6:
+; RV32ZBB-NEXT:    neg t4, t5
+; RV32ZBB-NEXT:    sub a2, t0, a2
+; RV32ZBB-NEXT:    sub a2, a2, t1
+; RV32ZBB-NEXT:    sub a6, a7, a6
+; RV32ZBB-NEXT:    sltu a7, a6, t3
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    sub a6, a6, t3
+; RV32ZBB-NEXT:    sub a1, a1, a5
+; RV32ZBB-NEXT:    sub a1, a1, t2
+; RV32ZBB-NEXT:    sub a4, a4, a3
+; RV32ZBB-NEXT:    bgez t4, .LBB11_8
+; RV32ZBB-NEXT:  # %bb.7:
+; RV32ZBB-NEXT:    snez a3, a1
+; RV32ZBB-NEXT:    snez a5, a4
+; RV32ZBB-NEXT:    or a3, a5, a3
+; RV32ZBB-NEXT:    neg a7, a6
+; RV32ZBB-NEXT:    sltu t0, a7, a3
+; RV32ZBB-NEXT:    snez a6, a6
+; RV32ZBB-NEXT:    add a2, a2, a6
+; RV32ZBB-NEXT:    neg a2, a2
+; RV32ZBB-NEXT:    sub a2, a2, t0
+; RV32ZBB-NEXT:    sub a6, a7, a3
+; RV32ZBB-NEXT:    neg a4, a4
+; RV32ZBB-NEXT:    add a1, a1, a5
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:  .LBB11_8:
+; RV32ZBB-NEXT:    sw a1, 4(a0)
+; RV32ZBB-NEXT:    sw a4, 0(a0)
+; RV32ZBB-NEXT:    sw a6, 8(a0)
+; RV32ZBB-NEXT:    sw a2, 12(a0)
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i128:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sltu a4, a0, a2
+; RV64ZBB-NEXT:    mv a5, a4
+; RV64ZBB-NEXT:    beq a1, a3, .LBB11_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    sltu a5, a1, a3
+; RV64ZBB-NEXT:  .LBB11_2:
+; RV64ZBB-NEXT:    neg a5, a5
+; RV64ZBB-NEXT:    sub a1, a1, a3
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a0, a0, a2
+; RV64ZBB-NEXT:    bgez a5, .LBB11_4
+; RV64ZBB-NEXT:  # %bb.3:
+; RV64ZBB-NEXT:    snez a2, a0
+; RV64ZBB-NEXT:    add a1, a1, a2
+; RV64ZBB-NEXT:    neg a1, a1
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:  .LBB11_4:
+; RV64ZBB-NEXT:    ret
+  %aext = zext i128 %a to i256
+  %bext = zext i128 %b to i256
+  %sub = sub i256 %aext, %bext
+  %abs = call i256 @llvm.abs.i256(i256 %sub, i1 false)
+  %trunc = trunc i256 %abs to i128
+  ret i128 %trunc
+}
+
+define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind {
+; RV32I-LABEL: abd_ext_i128_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lw a3, 0(a2)
+; RV32I-NEXT:    lw a4, 0(a1)
+; RV32I-NEXT:    lw a5, 4(a2)
+; RV32I-NEXT:    lw a6, 8(a2)
+; RV32I-NEXT:    lw a7, 8(a1)
+; RV32I-NEXT:    lw a2, 12(a2)
+; RV32I-NEXT:    lw t0, 12(a1)
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    sltu t1, a7, a6
+; RV32I-NEXT:    mv t4, t1
+; RV32I-NEXT:    beq t0, a2, .LBB12_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu t4, t0, a2
+; RV32I-NEXT:  .LBB12_2:
+; RV32I-NEXT:    sltu t2, a4, a3
+; RV32I-NEXT:    mv t3, t2
+; RV32I-NEXT:    beq a1, a5, .LBB12_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    sltu t3, a1, a5
+; RV32I-NEXT:  .LBB12_4:
+; RV32I-NEXT:    xor t5, t0, a2
+; RV32I-NEXT:    xor t6, a7, a6
+; RV32I-NEXT:    or t6, t6, t5
+; RV32I-NEXT:    mv t5, t3
+; RV32I-NEXT:    beqz t6, .LBB12_6
+; RV32I-NEXT:  # %bb.5:
+; RV32I-NEXT:    mv t5, t4
+; RV32I-NEXT:  .LBB12_6:
+; RV32I-NEXT:    neg t4, t5
+; RV32I-NEXT:    sub a2, t0, a2
+; RV32I-NEXT:    sub a2, a2, t1
+; RV32I-NEXT:    sub a6, a7, a6
+; RV32I-NEXT:    sltu a7, a6, t3
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    sub a6, a6, t3
+; RV32I-NEXT:    sub a1, a1, a5
+; RV32I-NEXT:    sub a1, a1, t2
+; RV32I-NEXT:    sub a4, a4, a3
+; RV32I-NEXT:    bgez t4, .LBB12_8
+; RV32I-NEXT:  # %bb.7:
+; RV32I-NEXT:    snez a3, a1
+; RV32I-NEXT:    snez a5, a4
+; RV32I-NEXT:    or a3, a5, a3
+; RV32I-NEXT:    neg a7, a6
+; RV32I-NEXT:    sltu t0, a7, a3
+; RV32I-NEXT:    snez a6, a6
+; RV32I-NEXT:    add a2, a2, a6
+; RV32I-NEXT:    neg a2, a2
+; RV32I-NEXT:    sub a2, a2, t0
+; RV32I-NEXT:    sub a6, a7, a3
+; RV32I-NEXT:    neg a4, a4
+; RV32I-NEXT:    add a1, a1, a5
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:  .LBB12_8:
+; RV32I-NEXT:    sw a1, 4(a0)
+; RV32I-NEXT:    sw a4, 0(a0)
+; RV32I-NEXT:    sw a6, 8(a0)
+; RV32I-NEXT:    sw a2, 12(a0)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_ext_i128_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sltu a4, a0, a2
+; RV64I-NEXT:    mv a5, a4
+; RV64I-NEXT:    beq a1, a3, .LBB12_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sltu a5, a1, a3
+; RV64I-NEXT:  .LBB12_2:
+; RV64I-NEXT:    neg a5, a5
+; RV64I-NEXT:    sub a1, a1, a3
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    sub a0, a0, a2
+; RV64I-NEXT:    bgez a5, .LBB12_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    snez a2, a0
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:  .LBB12_4:
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_ext_i128_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lw a3, 0(a2)
+; RV32ZBB-NEXT:    lw a4, 0(a1)
+; RV32ZBB-NEXT:    lw a5, 4(a2)
+; RV32ZBB-NEXT:    lw a6, 8(a2)
+; RV32ZBB-NEXT:    lw a7, 8(a1)
+; RV32ZBB-NEXT:    lw a2, 12(a2)
+; RV32ZBB-NEXT:    lw t0, 12(a1)
+; RV32ZBB-NEXT:    lw a1, 4(a1)
+; RV32ZBB-NEXT:    sltu t1, a7, a6
+; RV32ZBB-NEXT:    mv t4, t1
+; RV32ZBB-NEXT:    beq t0, a2, .LBB12_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu t4, t0, a2
+; RV32ZBB-NEXT:  .LBB12_2:
+; RV32ZBB-NEXT:    sltu t2, a4, a3
+; RV32ZBB-NEXT:    mv t3, t2
+; RV32ZBB-NEXT:    beq a1, a5, .LBB12_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    sltu t3, a1, a5
+; RV32ZBB-NEXT:  .LBB12_4:
+; RV32ZBB-NEXT:    xor t5, t0, a2
+; RV32ZBB-NEXT:    xor t6, a7, a6
+; RV32ZBB-NEXT:    or t6, t6, t5
+; RV32ZBB-NEXT:    mv t5, t3
+; RV32ZBB-NEXT:    beqz t6, .LBB12_6
+; RV32ZBB-NEXT:  # %bb.5:
+; RV32ZBB-NEXT:    mv t5, t4
+; RV32ZBB-NEXT:  .LBB12_6:
+; RV32ZBB-NEXT:    neg t4, t5
+; RV32ZBB-NEXT:    sub a2, t0, a2
+; RV32ZBB-NEXT:    sub a2, a2, t1
+; RV32ZBB-NEXT:    sub a6, a7, a6
+; RV32ZBB-NEXT:    sltu a7, a6, t3
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    sub a6, a6, t3
+; RV32ZBB-NEXT:    sub a1, a1, a5
+; RV32ZBB-NEXT:    sub a1, a1, t2
+; RV32ZBB-NEXT:    sub a4, a4, a3
+; RV32ZBB-NEXT:    bgez t4, .LBB12_8
+; RV32ZBB-NEXT:  # %bb.7:
+; RV32ZBB-NEXT:    snez a3, a1
+; RV32ZBB-NEXT:    snez a5, a4
+; RV32ZBB-NEXT:    or a3, a5, a3
+; RV32ZBB-NEXT:    neg a7, a6
+; RV32ZBB-NEXT:    sltu t0, a7, a3
+; RV32ZBB-NEXT:    snez a6, a6
+; RV32ZBB-NEXT:    add a2, a2, a6
+; RV32ZBB-NEXT:    neg a2, a2
+; RV32ZBB-NEXT:    sub a2, a2, t0
+; RV32ZBB-NEXT:    sub a6, a7, a3
+; RV32ZBB-NEXT:    neg a4, a4
+; RV32ZBB-NEXT:    add a1, a1, a5
+; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:  .LBB12_8:
+; RV32ZBB-NEXT:    sw a1, 4(a0)
+; RV32ZBB-NEXT:    sw a4, 0(a0)
+; RV32ZBB-NEXT:    sw a6, 8(a0)
+; RV32ZBB-NEXT:    sw a2, 12(a0)
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_ext_i128_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sltu a4, a0, a2
+; RV64ZBB-NEXT:    mv a5, a4
+; RV64ZBB-NEXT:    beq a1, a3, .LBB12_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    sltu a5, a1, a3
+; RV64ZBB-NEXT:  .LBB12_2:
+; RV64ZBB-NEXT:    neg a5, a5
+; RV64ZBB-NEXT:    sub a1, a1, a3
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a0, a0, a2
+; RV64ZBB-NEXT:    bgez a5, .LBB12_4
+; RV64ZBB-NEXT:  # %bb.3:
+; RV64ZBB-NEXT:    snez a2, a0
+; RV64ZBB-NEXT:    add a1, a1, a2
+; RV64ZBB-NEXT:    neg a1, a1
+; RV64ZBB-NEXT:    neg a0, a0
+; RV64ZBB-NEXT:  .LBB12_4:
+; RV64ZBB-NEXT:    ret
+  %aext = zext i128 %a to i256
+  %bext = zext i128 %b to i256
+  %sub = sub i256 %aext, %bext
+  %abs = call i256 @llvm.abs.i256(i256 %sub, i1 true)
+  %trunc = trunc i256 %abs to i128
+  ret i128 %trunc
+}
+
+;
+; sub(umax(a,b),umin(a,b)) -> abdu(a,b)
+;
+
+define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind {
+; NOZBB-LABEL: abd_minmax_i8:
+; NOZBB:       # %bb.0:
+; NOZBB-NEXT:    andi a1, a1, 255
+; NOZBB-NEXT:    andi a0, a0, 255
+; NOZBB-NEXT:    mv a2, a0
+; NOZBB-NEXT:    bgeu a0, a1, .LBB13_3
+; NOZBB-NEXT:  # %bb.1:
+; NOZBB-NEXT:    bgeu a1, a0, .LBB13_4
+; NOZBB-NEXT:  .LBB13_2:
+; NOZBB-NEXT:    sub a0, a0, a2
+; NOZBB-NEXT:    ret
+; NOZBB-NEXT:  .LBB13_3:
+; NOZBB-NEXT:    mv a2, a1
+; NOZBB-NEXT:    bltu a1, a0, .LBB13_2
+; NOZBB-NEXT:  .LBB13_4:
+; NOZBB-NEXT:    sub a0, a1, a2
+; NOZBB-NEXT:    ret
+;
+; ZBB-LABEL: abd_minmax_i8:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    andi a1, a1, 255
+; ZBB-NEXT:    andi a0, a0, 255
+; ZBB-NEXT:    minu a2, a0, a1
+; ZBB-NEXT:    maxu a0, a0, a1
+; ZBB-NEXT:    sub a0, a0, a2
+; ZBB-NEXT:    ret
+  %min = call i8 @llvm.umin.i8(i8 %a, i8 %b)
+  %max = call i8 @llvm.umax.i8(i8 %a, i8 %b)
+  %sub = sub i8 %max, %min
+  ret i8 %sub
+}
+
+define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_minmax_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a2, 16
+; RV32I-NEXT:    addi a2, a2, -1
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bgeu a0, a1, .LBB14_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    bgeu a1, a0, .LBB14_4
+; RV32I-NEXT:  .LBB14_2:
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB14_3:
+; RV32I-NEXT:    mv a2, a1
+; RV32I-NEXT:    bltu a1, a0, .LBB14_2
+; RV32I-NEXT:  .LBB14_4:
+; RV32I-NEXT:    sub a0, a1, a2
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_minmax_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a2, 16
+; RV64I-NEXT:    addiw a2, a2, -1
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu a0, a1, .LBB14_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bgeu a1, a0, .LBB14_4
+; RV64I-NEXT:  .LBB14_2:
+; RV64I-NEXT:    sub a0, a0, a2
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB14_3:
+; RV64I-NEXT:    mv a2, a1
+; RV64I-NEXT:    bltu a1, a0, .LBB14_2
+; RV64I-NEXT:  .LBB14_4:
+; RV64I-NEXT:    sub a0, a1, a2
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_minmax_i16:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    zext.h a1, a1
+; ZBB-NEXT:    zext.h a0, a0
+; ZBB-NEXT:    minu a2, a0, a1
+; ZBB-NEXT:    maxu a0, a0, a1
+; ZBB-NEXT:    sub a0, a0, a2
+; ZBB-NEXT:    ret
+  %min = call i16 @llvm.umin.i16(i16 %a, i16 %b)
+  %max = call i16 @llvm.umax.i16(i16 %a, i16 %b)
+  %sub = sub i16 %max, %min
+  ret i16 %sub
+}
+
+define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_minmax_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    bgeu a0, a1, .LBB15_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    bgeu a1, a0, .LBB15_4
+; RV32I-NEXT:  .LBB15_2:
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB15_3:
+; RV32I-NEXT:    mv a2, a1
+; RV32I-NEXT:    bltu a1, a0, .LBB15_2
+; RV32I-NEXT:  .LBB15_4:
+; RV32I-NEXT:    sub a0, a1, a2
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_minmax_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a1, a1
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu a0, a1, .LBB15_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bgeu a1, a0, .LBB15_4
+; RV64I-NEXT:  .LBB15_2:
+; RV64I-NEXT:    subw a0, a0, a2
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB15_3:
+; RV64I-NEXT:    mv a2, a1
+; RV64I-NEXT:    bltu a1, a0, .LBB15_2
+; RV64I-NEXT:  .LBB15_4:
+; RV64I-NEXT:    subw a0, a1, a2
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_minmax_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    minu a2, a0, a1
+; RV32ZBB-NEXT:    maxu a0, a0, a1
+; RV32ZBB-NEXT:    sub a0, a0, a2
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_minmax_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sext.w a1, a1
+; RV64ZBB-NEXT:    sext.w a0, a0
+; RV64ZBB-NEXT:    minu a2, a0, a1
+; RV64ZBB-NEXT:    maxu a0, a0, a1
+; RV64ZBB-NEXT:    subw a0, a0, a2
+; RV64ZBB-NEXT:    ret
+  %min = call i32 @llvm.umin.i32(i32 %a, i32 %b)
+  %max = call i32 @llvm.umax.i32(i32 %a, i32 %b)
+  %sub = sub i32 %max, %min
+  ret i32 %sub
+}
+
+define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: abd_minmax_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    beq a1, a3, .LBB16_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu a6, a1, a3
+; RV32I-NEXT:    j .LBB16_3
+; RV32I-NEXT:  .LBB16_2:
+; RV32I-NEXT:    sltu a6, a0, a2
+; RV32I-NEXT:  .LBB16_3:
+; RV32I-NEXT:    mv a4, a1
+; RV32I-NEXT:    mv a5, a0
+; RV32I-NEXT:    bnez a6, .LBB16_5
+; RV32I-NEXT:  # %bb.4:
+; RV32I-NEXT:    mv a4, a3
+; RV32I-NEXT:    mv a5, a2
+; RV32I-NEXT:  .LBB16_5:
+; RV32I-NEXT:    beq a1, a3, .LBB16_7
+; RV32I-NEXT:  # %bb.6:
+; RV32I-NEXT:    sltu a6, a3, a1
+; RV32I-NEXT:    beqz a6, .LBB16_8
+; RV32I-NEXT:    j .LBB16_9
+; RV32I-NEXT:  .LBB16_7:
+; RV32I-NEXT:    sltu a6, a2, a0
+; RV32I-NEXT:    bnez a6, .LBB16_9
+; RV32I-NEXT:  .LBB16_8:
+; RV32I-NEXT:    mv a1, a3
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:  .LBB16_9:
+; RV32I-NEXT:    sltu a2, a0, a5
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a1, a1, a2
+; RV32I-NEXT:    sub a0, a0, a5
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_minmax_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bgeu a0, a1, .LBB16_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bgeu a1, a0, .LBB16_4
+; RV64I-NEXT:  .LBB16_2:
+; RV64I-NEXT:    sub a0, a0, a2
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB16_3:
+; RV64I-NEXT:    mv a2, a1
+; RV64I-NEXT:    bltu a1, a0, .LBB16_2
+; RV64I-NEXT:  .LBB16_4:
+; RV64I-NEXT:    sub a0, a1, a2
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_minmax_i64:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    beq a1, a3, .LBB16_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu a6, a1, a3
+; RV32ZBB-NEXT:    j .LBB16_3
+; RV32ZBB-NEXT:  .LBB16_2:
+; RV32ZBB-NEXT:    sltu a6, a0, a2
+; RV32ZBB-NEXT:  .LBB16_3:
+; RV32ZBB-NEXT:    mv a4, a1
+; RV32ZBB-NEXT:    mv a5, a0
+; RV32ZBB-NEXT:    bnez a6, .LBB16_5
+; RV32ZBB-NEXT:  # %bb.4:
+; RV32ZBB-NEXT:    mv a4, a3
+; RV32ZBB-NEXT:    mv a5, a2
+; RV32ZBB-NEXT:  .LBB16_5:
+; RV32ZBB-NEXT:    beq a1, a3, .LBB16_7
+; RV32ZBB-NEXT:  # %bb.6:
+; RV32ZBB-NEXT:    sltu a6, a3, a1
+; RV32ZBB-NEXT:    beqz a6, .LBB16_8
+; RV32ZBB-NEXT:    j .LBB16_9
+; RV32ZBB-NEXT:  .LBB16_7:
+; RV32ZBB-NEXT:    sltu a6, a2, a0
+; RV32ZBB-NEXT:    bnez a6, .LBB16_9
+; RV32ZBB-NEXT:  .LBB16_8:
+; RV32ZBB-NEXT:    mv a1, a3
+; RV32ZBB-NEXT:    mv a0, a2
+; RV32ZBB-NEXT:  .LBB16_9:
+; RV32ZBB-NEXT:    sltu a2, a0, a5
+; RV32ZBB-NEXT:    sub a1, a1, a4
+; RV32ZBB-NEXT:    sub a1, a1, a2
+; RV32ZBB-NEXT:    sub a0, a0, a5
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_minmax_i64:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    minu a2, a0, a1
+; RV64ZBB-NEXT:    maxu a0, a0, a1
+; RV64ZBB-NEXT:    sub a0, a0, a2
+; RV64ZBB-NEXT:    ret
+  %min = call i64 @llvm.umin.i64(i64 %a, i64 %b)
+  %max = call i64 @llvm.umax.i64(i64 %a, i64 %b)
+  %sub = sub i64 %max, %min
+  ret i64 %sub
+}
+
+define i128 @abd_minmax_i128(i128 %a, i128 %b) nounwind {
+; RV32I-LABEL: abd_minmax_i128:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lw a6, 4(a2)
+; RV32I-NEXT:    lw a3, 4(a1)
+; RV32I-NEXT:    lw a7, 8(a2)
+; RV32I-NEXT:    lw t0, 12(a2)
+; RV32I-NEXT:    lw a5, 12(a1)
+; RV32I-NEXT:    lw a4, 8(a1)
+; RV32I-NEXT:    beq a5, t0, .LBB17_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu t1, a5, t0
+; RV32I-NEXT:    j .LBB17_3
+; RV32I-NEXT:  .LBB17_2:
+; RV32I-NEXT:    sltu t1, a4, a7
+; RV32I-NEXT:  .LBB17_3:
+; RV32I-NEXT:    lw t2, 0(a2)
+; RV32I-NEXT:    lw a1, 0(a1)
+; RV32I-NEXT:    beq a3, a6, .LBB17_5
+; RV32I-NEXT:  # %bb.4:
+; RV32I-NEXT:    sltu t6, a3, a6
+; RV32I-NEXT:    j .LBB17_6
+; RV32I-NEXT:  .LBB17_5:
+; RV32I-NEXT:    sltu t6, a1, t2
+; RV32I-NEXT:  .LBB17_6:
+; RV32I-NEXT:    xor a2, a5, t0
+; RV32I-NEXT:    xor t3, a4, a7
+; RV32I-NEXT:    or t5, t3, a2
+; RV32I-NEXT:    beqz t5, .LBB17_8
+; RV32I-NEXT:  # %bb.7:
+; RV32I-NEXT:    mv t6, t1
+; RV32I-NEXT:  .LBB17_8:
+; RV32I-NEXT:    mv a2, a1
+; RV32I-NEXT:    mv t1, a3
+; RV32I-NEXT:    mv t4, a5
+; RV32I-NEXT:    mv t3, a4
+; RV32I-NEXT:    bnez t6, .LBB17_10
+; RV32I-NEXT:  # %bb.9:
+; RV32I-NEXT:    mv a2, t2
+; RV32I-NEXT:    mv t1, a6
+; RV32I-NEXT:    mv t4, t0
+; RV32I-NEXT:    mv t3, a7
+; RV32I-NEXT:  .LBB17_10:
+; RV32I-NEXT:    beq a5, t0, .LBB17_12
+; RV32I-NEXT:  # %bb.11:
+; RV32I-NEXT:    sltu t6, t0, a5
+; RV32I-NEXT:    j .LBB17_13
+; RV32I-NEXT:  .LBB17_12:
+; RV32I-NEXT:    sltu t6, a7, a4
+; RV32I-NEXT:  .LBB17_13:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    beq a3, a6, .LBB17_15
+; RV32I-NEXT:  # %bb.14:
+; RV32I-NEXT:    sltu s0, a6, a3
+; RV32I-NEXT:    bnez t5, .LBB17_16
+; RV32I-NEXT:    j .LBB17_17
+; RV32I-NEXT:  .LBB17_15:
+; RV32I-NEXT:    sltu s0, t2, a1
+; RV32I-NEXT:    beqz t5, .LBB17_17
+; RV32I-NEXT:  .LBB17_16:
+; RV32I-NEXT:    mv s0, t6
+; RV32I-NEXT:  .LBB17_17:
+; RV32I-NEXT:    bnez s0, .LBB17_19
+; RV32I-NEXT:  # %bb.18:
+; RV32I-NEXT:    mv a1, t2
+; RV32I-NEXT:    mv a3, a6
+; RV32I-NEXT:    mv a5, t0
+; RV32I-NEXT:    mv a4, a7
+; RV32I-NEXT:  .LBB17_19:
+; RV32I-NEXT:    sltu a6, a4, t3
+; RV32I-NEXT:    sub a7, a5, t4
+; RV32I-NEXT:    sltu a5, a1, a2
+; RV32I-NEXT:    sub a6, a7, a6
+; RV32I-NEXT:    mv a7, a5
+; RV32I-NEXT:    beq a3, t1, .LBB17_21
+; RV32I-NEXT:  # %bb.20:
+; RV32I-NEXT:    sltu a7, a3, t1
+; RV32I-NEXT:  .LBB17_21:
+; RV32I-NEXT:    sub a4, a4, t3
+; RV32I-NEXT:    sltu t0, a4, a7
+; RV32I-NEXT:    sub a6, a6, t0
+; RV32I-NEXT:    sub a4, a4, a7
+; RV32I-NEXT:    sub a3, a3, t1
+; RV32I-NEXT:    sub a3, a3, a5
+; RV32I-NEXT:    sub a1, a1, a2
+; RV32I-NEXT:    sw a1, 0(a0)
+; RV32I-NEXT:    sw a3, 4(a0)
+; RV32I-NEXT:    sw a4, 8(a0)
+; RV32I-NEXT:    sw a6, 12(a0)
+; RV32I-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_minmax_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    beq a1, a3, .LBB17_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sltu a6, a1, a3
+; RV64I-NEXT:    j .LBB17_3
+; RV64I-NEXT:  .LBB17_2:
+; RV64I-NEXT:    sltu a6, a0, a2
+; RV64I-NEXT:  .LBB17_3:
+; RV64I-NEXT:    mv a4, a1
+; RV64I-NEXT:    mv a5, a0
+; RV64I-NEXT:    bnez a6, .LBB17_5
+; RV64I-NEXT:  # %bb.4:
+; RV64I-NEXT:    mv a4, a3
+; RV64I-NEXT:    mv a5, a2
+; RV64I-NEXT:  .LBB17_5:
+; RV64I-NEXT:    beq a1, a3, .LBB17_7
+; RV64I-NEXT:  # %bb.6:
+; RV64I-NEXT:    sltu a6, a3, a1
+; RV64I-NEXT:    beqz a6, .LBB17_8
+; RV64I-NEXT:    j .LBB17_9
+; RV64I-NEXT:  .LBB17_7:
+; RV64I-NEXT:    sltu a6, a2, a0
+; RV64I-NEXT:    bnez a6, .LBB17_9
+; RV64I-NEXT:  .LBB17_8:
+; RV64I-NEXT:    mv a1, a3
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:  .LBB17_9:
+; RV64I-NEXT:    sltu a2, a0, a5
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    sub a1, a1, a2
+; RV64I-NEXT:    sub a0, a0, a5
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_minmax_i128:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lw a6, 4(a2)
+; RV32ZBB-NEXT:    lw a3, 4(a1)
+; RV32ZBB-NEXT:    lw a7, 8(a2)
+; RV32ZBB-NEXT:    lw t0, 12(a2)
+; RV32ZBB-NEXT:    lw a5, 12(a1)
+; RV32ZBB-NEXT:    lw a4, 8(a1)
+; RV32ZBB-NEXT:    beq a5, t0, .LBB17_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu t1, a5, t0
+; RV32ZBB-NEXT:    j .LBB17_3
+; RV32ZBB-NEXT:  .LBB17_2:
+; RV32ZBB-NEXT:    sltu t1, a4, a7
+; RV32ZBB-NEXT:  .LBB17_3:
+; RV32ZBB-NEXT:    lw t2, 0(a2)
+; RV32ZBB-NEXT:    lw a1, 0(a1)
+; RV32ZBB-NEXT:    beq a3, a6, .LBB17_5
+; RV32ZBB-NEXT:  # %bb.4:
+; RV32ZBB-NEXT:    sltu t6, a3, a6
+; RV32ZBB-NEXT:    j .LBB17_6
+; RV32ZBB-NEXT:  .LBB17_5:
+; RV32ZBB-NEXT:    sltu t6, a1, t2
+; RV32ZBB-NEXT:  .LBB17_6:
+; RV32ZBB-NEXT:    xor a2, a5, t0
+; RV32ZBB-NEXT:    xor t3, a4, a7
+; RV32ZBB-NEXT:    or t5, t3, a2
+; RV32ZBB-NEXT:    beqz t5, .LBB17_8
+; RV32ZBB-NEXT:  # %bb.7:
+; RV32ZBB-NEXT:    mv t6, t1
+; RV32ZBB-NEXT:  .LBB17_8:
+; RV32ZBB-NEXT:    mv a2, a1
+; RV32ZBB-NEXT:    mv t1, a3
+; RV32ZBB-NEXT:    mv t4, a5
+; RV32ZBB-NEXT:    mv t3, a4
+; RV32ZBB-NEXT:    bnez t6, .LBB17_10
+; RV32ZBB-NEXT:  # %bb.9:
+; RV32ZBB-NEXT:    mv a2, t2
+; RV32ZBB-NEXT:    mv t1, a6
+; RV32ZBB-NEXT:    mv t4, t0
+; RV32ZBB-NEXT:    mv t3, a7
+; RV32ZBB-NEXT:  .LBB17_10:
+; RV32ZBB-NEXT:    beq a5, t0, .LBB17_12
+; RV32ZBB-NEXT:  # %bb.11:
+; RV32ZBB-NEXT:    sltu t6, t0, a5
+; RV32ZBB-NEXT:    j .LBB17_13
+; RV32ZBB-NEXT:  .LBB17_12:
+; RV32ZBB-NEXT:    sltu t6, a7, a4
+; RV32ZBB-NEXT:  .LBB17_13:
+; RV32ZBB-NEXT:    addi sp, sp, -16
+; RV32ZBB-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32ZBB-NEXT:    beq a3, a6, .LBB17_15
+; RV32ZBB-NEXT:  # %bb.14:
+; RV32ZBB-NEXT:    sltu s0, a6, a3
+; RV32ZBB-NEXT:    bnez t5, .LBB17_16
+; RV32ZBB-NEXT:    j .LBB17_17
+; RV32ZBB-NEXT:  .LBB17_15:
+; RV32ZBB-NEXT:    sltu s0, t2, a1
+; RV32ZBB-NEXT:    beqz t5, .LBB17_17
+; RV32ZBB-NEXT:  .LBB17_16:
+; RV32ZBB-NEXT:    mv s0, t6
+; RV32ZBB-NEXT:  .LBB17_17:
+; RV32ZBB-NEXT:    bnez s0, .LBB17_19
+; RV32ZBB-NEXT:  # %bb.18:
+; RV32ZBB-NEXT:    mv a1, t2
+; RV32ZBB-NEXT:    mv a3, a6
+; RV32ZBB-NEXT:    mv a5, t0
+; RV32ZBB-NEXT:    mv a4, a7
+; RV32ZBB-NEXT:  .LBB17_19:
+; RV32ZBB-NEXT:    sltu a6, a4, t3
+; RV32ZBB-NEXT:    sub a7, a5, t4
+; RV32ZBB-NEXT:    sltu a5, a1, a2
+; RV32ZBB-NEXT:    sub a6, a7, a6
+; RV32ZBB-NEXT:    mv a7, a5
+; RV32ZBB-NEXT:    beq a3, t1, .LBB17_21
+; RV32ZBB-NEXT:  # %bb.20:
+; RV32ZBB-NEXT:    sltu a7, a3, t1
+; RV32ZBB-NEXT:  .LBB17_21:
+; RV32ZBB-NEXT:    sub a4, a4, t3
+; RV32ZBB-NEXT:    sltu t0, a4, a7
+; RV32ZBB-NEXT:    sub a6, a6, t0
+; RV32ZBB-NEXT:    sub a4, a4, a7
+; RV32ZBB-NEXT:    sub a3, a3, t1
+; RV32ZBB-NEXT:    sub a3, a3, a5
+; RV32ZBB-NEXT:    sub a1, a1, a2
+; RV32ZBB-NEXT:    sw a1, 0(a0)
+; RV32ZBB-NEXT:    sw a3, 4(a0)
+; RV32ZBB-NEXT:    sw a4, 8(a0)
+; RV32ZBB-NEXT:    sw a6, 12(a0)
+; RV32ZBB-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32ZBB-NEXT:    addi sp, sp, 16
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_minmax_i128:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    beq a1, a3, .LBB17_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    sltu a6, a1, a3
+; RV64ZBB-NEXT:    j .LBB17_3
+; RV64ZBB-NEXT:  .LBB17_2:
+; RV64ZBB-NEXT:    sltu a6, a0, a2
+; RV64ZBB-NEXT:  .LBB17_3:
+; RV64ZBB-NEXT:    mv a4, a1
+; RV64ZBB-NEXT:    mv a5, a0
+; RV64ZBB-NEXT:    bnez a6, .LBB17_5
+; RV64ZBB-NEXT:  # %bb.4:
+; RV64ZBB-NEXT:    mv a4, a3
+; RV64ZBB-NEXT:    mv a5, a2
+; RV64ZBB-NEXT:  .LBB17_5:
+; RV64ZBB-NEXT:    beq a1, a3, .LBB17_7
+; RV64ZBB-NEXT:  # %bb.6:
+; RV64ZBB-NEXT:    sltu a6, a3, a1
+; RV64ZBB-NEXT:    beqz a6, .LBB17_8
+; RV64ZBB-NEXT:    j .LBB17_9
+; RV64ZBB-NEXT:  .LBB17_7:
+; RV64ZBB-NEXT:    sltu a6, a2, a0
+; RV64ZBB-NEXT:    bnez a6, .LBB17_9
+; RV64ZBB-NEXT:  .LBB17_8:
+; RV64ZBB-NEXT:    mv a1, a3
+; RV64ZBB-NEXT:    mv a0, a2
+; RV64ZBB-NEXT:  .LBB17_9:
+; RV64ZBB-NEXT:    sltu a2, a0, a5
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a1, a1, a2
+; RV64ZBB-NEXT:    sub a0, a0, a5
+; RV64ZBB-NEXT:    ret
+  %min = call i128 @llvm.umin.i128(i128 %a, i128 %b)
+  %max = call i128 @llvm.umax.i128(i128 %a, i128 %b)
+  %sub = sub i128 %max, %min
+  ret i128 %sub
+}
+
+;
+; select(icmp(a,b),sub(a,b),sub(b,a)) -> abdu(a,b)
+;
+
+define i8 @abd_cmp_i8(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: abd_cmp_i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    andi a2, a0, 255
+; CHECK-NEXT:    andi a3, a1, 255
+; CHECK-NEXT:    bltu a3, a2, .LBB18_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    sub a0, a1, a0
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB18_2:
+; CHECK-NEXT:    sub a0, a0, a1
+; CHECK-NEXT:    ret
+  %cmp = icmp ugt i8 %a, %b
+  %ab = sub i8 %a, %b
+  %ba = sub i8 %b, %a
+  %sel = select i1 %cmp, i8 %ab, i8 %ba
+  ret i8 %sel
+}
+
+define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: abd_cmp_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a2, 16
+; RV32I-NEXT:    addi a2, a2, -1
+; RV32I-NEXT:    and a3, a1, a2
+; RV32I-NEXT:    and a2, a0, a2
+; RV32I-NEXT:    bgeu a2, a3, .LBB19_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB19_2:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_cmp_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a2, 16
+; RV64I-NEXT:    addiw a2, a2, -1
+; RV64I-NEXT:    and a3, a1, a2
+; RV64I-NEXT:    and a2, a0, a2
+; RV64I-NEXT:    bgeu a2, a3, .LBB19_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB19_2:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; ZBB-LABEL: abd_cmp_i16:
+; ZBB:       # %bb.0:
+; ZBB-NEXT:    zext.h a2, a1
+; ZBB-NEXT:    zext.h a3, a0
+; ZBB-NEXT:    bgeu a3, a2, .LBB19_2
+; ZBB-NEXT:  # %bb.1:
+; ZBB-NEXT:    sub a0, a1, a0
+; ZBB-NEXT:    ret
+; ZBB-NEXT:  .LBB19_2:
+; ZBB-NEXT:    sub a0, a0, a1
+; ZBB-NEXT:    ret
+  %cmp = icmp uge i16 %a, %b
+  %ab = sub i16 %a, %b
+  %ba = sub i16 %b, %a
+  %sel = select i1 %cmp, i16 %ab, i16 %ba
+  ret i16 %sel
+}
+
+define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: abd_cmp_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    bltu a0, a1, .LBB20_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB20_2:
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_cmp_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a2, a1
+; RV64I-NEXT:    sext.w a3, a0
+; RV64I-NEXT:    bltu a3, a2, .LBB20_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB20_2:
+; RV64I-NEXT:    subw a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_cmp_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    bltu a0, a1, .LBB20_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    ret
+; RV32ZBB-NEXT:  .LBB20_2:
+; RV32ZBB-NEXT:    sub a0, a1, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_cmp_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sext.w a2, a1
+; RV64ZBB-NEXT:    sext.w a3, a0
+; RV64ZBB-NEXT:    bltu a3, a2, .LBB20_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    subw a0, a0, a1
+; RV64ZBB-NEXT:    ret
+; RV64ZBB-NEXT:  .LBB20_2:
+; RV64ZBB-NEXT:    subw a0, a1, a0
+; RV64ZBB-NEXT:    ret
+  %cmp = icmp ult i32 %a, %b
+  %ab = sub i32 %a, %b
+  %ba = sub i32 %b, %a
+  %sel = select i1 %cmp, i32 %ba, i32 %ab
+  ret i32 %sel
+}
+
+define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: abd_cmp_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltu a4, a0, a2
+; RV32I-NEXT:    mv a5, a4
+; RV32I-NEXT:    beq a1, a3, .LBB21_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu a5, a1, a3
+; RV32I-NEXT:  .LBB21_2:
+; RV32I-NEXT:    beqz a5, .LBB21_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB21_4:
+; RV32I-NEXT:    sltu a4, a2, a0
+; RV32I-NEXT:    sub a1, a3, a1
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a0, a2, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_cmp_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    bgeu a0, a1, .LBB21_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB21_2:
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_cmp_i64:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sltu a4, a0, a2
+; RV32ZBB-NEXT:    mv a5, a4
+; RV32ZBB-NEXT:    beq a1, a3, .LBB21_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu a5, a1, a3
+; RV32ZBB-NEXT:  .LBB21_2:
+; RV32ZBB-NEXT:    beqz a5, .LBB21_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    sub a1, a1, a3
+; RV32ZBB-NEXT:    sub a1, a1, a4
+; RV32ZBB-NEXT:    sub a0, a0, a2
+; RV32ZBB-NEXT:    ret
+; RV32ZBB-NEXT:  .LBB21_4:
+; RV32ZBB-NEXT:    sltu a4, a2, a0
+; RV32ZBB-NEXT:    sub a1, a3, a1
+; RV32ZBB-NEXT:    sub a1, a1, a4
+; RV32ZBB-NEXT:    sub a0, a2, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_cmp_i64:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    bgeu a0, a1, .LBB21_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    ret
+; RV64ZBB-NEXT:  .LBB21_2:
+; RV64ZBB-NEXT:    sub a0, a1, a0
+; RV64ZBB-NEXT:    ret
+  %cmp = icmp uge i64 %a, %b
+  %ab = sub i64 %a, %b
+  %ba = sub i64 %b, %a
+  %sel = select i1 %cmp, i64 %ba, i64 %ab
+  ret i64 %sel
+}
+
+define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
+; RV32I-LABEL: abd_cmp_i128:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lw a3, 0(a2)
+; RV32I-NEXT:    lw a4, 0(a1)
+; RV32I-NEXT:    lw a5, 4(a2)
+; RV32I-NEXT:    lw a6, 8(a2)
+; RV32I-NEXT:    lw a7, 8(a1)
+; RV32I-NEXT:    lw a2, 12(a2)
+; RV32I-NEXT:    lw t0, 12(a1)
+; RV32I-NEXT:    lw a1, 4(a1)
+; RV32I-NEXT:    sltu t1, a7, a6
+; RV32I-NEXT:    mv t4, t1
+; RV32I-NEXT:    beq t0, a2, .LBB22_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu t4, t0, a2
+; RV32I-NEXT:  .LBB22_2:
+; RV32I-NEXT:    xor t3, t0, a2
+; RV32I-NEXT:    xor t5, a7, a6
+; RV32I-NEXT:    sltu t2, a4, a3
+; RV32I-NEXT:    or t5, t5, t3
+; RV32I-NEXT:    mv t3, t2
+; RV32I-NEXT:    beq a1, a5, .LBB22_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    sltu t3, a1, a5
+; RV32I-NEXT:  .LBB22_4:
+; RV32I-NEXT:    mv t6, t3
+; RV32I-NEXT:    beqz t5, .LBB22_6
+; RV32I-NEXT:  # %bb.5:
+; RV32I-NEXT:    mv t6, t4
+; RV32I-NEXT:  .LBB22_6:
+; RV32I-NEXT:    sltu t4, a3, a4
+; RV32I-NEXT:    mv t5, t4
+; RV32I-NEXT:    beq a1, a5, .LBB22_8
+; RV32I-NEXT:  # %bb.7:
+; RV32I-NEXT:    sltu t5, a5, a1
+; RV32I-NEXT:  .LBB22_8:
+; RV32I-NEXT:    beqz t6, .LBB22_10
+; RV32I-NEXT:  # %bb.9:
+; RV32I-NEXT:    sub a2, t0, a2
+; RV32I-NEXT:    sub a6, a7, a6
+; RV32I-NEXT:    sub a2, a2, t1
+; RV32I-NEXT:    sltu a7, a6, t3
+; RV32I-NEXT:    sub a1, a1, a5
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    sub a6, a6, t3
+; RV32I-NEXT:    sub a1, a1, t2
+; RV32I-NEXT:    sub a3, a4, a3
+; RV32I-NEXT:    j .LBB22_11
+; RV32I-NEXT:  .LBB22_10:
+; RV32I-NEXT:    sltu t1, a6, a7
+; RV32I-NEXT:    sub a2, a2, t0
+; RV32I-NEXT:    sub a2, a2, t1
+; RV32I-NEXT:    sub a6, a6, a7
+; RV32I-NEXT:    sltu a7, a6, t5
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    sub a6, a6, t5
+; RV32I-NEXT:    sub a5, a5, a1
+; RV32I-NEXT:    sub a1, a5, t4
+; RV32I-NEXT:    sub a3, a3, a4
+; RV32I-NEXT:  .LBB22_11:
+; RV32I-NEXT:    sw a6, 8(a0)
+; RV32I-NEXT:    sw a1, 4(a0)
+; RV32I-NEXT:    sw a3, 0(a0)
+; RV32I-NEXT:    sw a2, 12(a0)
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: abd_cmp_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sltu a4, a0, a2
+; RV64I-NEXT:    mv a5, a4
+; RV64I-NEXT:    beq a1, a3, .LBB22_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sltu a5, a1, a3
+; RV64I-NEXT:  .LBB22_2:
+; RV64I-NEXT:    beqz a5, .LBB22_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    sub a1, a1, a3
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    sub a0, a0, a2
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB22_4:
+; RV64I-NEXT:    sltu a4, a2, a0
+; RV64I-NEXT:    sub a1, a3, a1
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    sub a0, a2, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: abd_cmp_i128:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lw a3, 0(a2)
+; RV32ZBB-NEXT:    lw a4, 0(a1)
+; RV32ZBB-NEXT:    lw a5, 4(a2)
+; RV32ZBB-NEXT:    lw a6, 8(a2)
+; RV32ZBB-NEXT:    lw a7, 8(a1)
+; RV32ZBB-NEXT:    lw a2, 12(a2)
+; RV32ZBB-NEXT:    lw t0, 12(a1)
+; RV32ZBB-NEXT:    lw a1, 4(a1)
+; RV32ZBB-NEXT:    sltu t1, a7, a6
+; RV32ZBB-NEXT:    mv t4, t1
+; RV32ZBB-NEXT:    beq t0, a2, .LBB22_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu t4, t0, a2
+; RV32ZBB-NEXT:  .LBB22_2:
+; RV32ZBB-NEXT:    xor t3, t0, a2
+; RV32ZBB-NEXT:    xor t5, a7, a6
+; RV32ZBB-NEXT:    sltu t2, a4, a3
+; RV32ZBB-NEXT:    or t5, t5, t3
+; RV32ZBB-NEXT:    mv t3, t2
+; RV32ZBB-NEXT:    beq a1, a5, .LBB22_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    sltu t3, a1, a5
+; RV32ZBB-NEXT:  .LBB22_4:
+; RV32ZBB-NEXT:    mv t6, t3
+; RV32ZBB-NEXT:    beqz t5, .LBB22_6
+; RV32ZBB-NEXT:  # %bb.5:
+; RV32ZBB-NEXT:    mv t6, t4
+; RV32ZBB-NEXT:  .LBB22_6:
+; RV32ZBB-NEXT:    sltu t4, a3, a4
+; RV32ZBB-NEXT:    mv t5, t4
+; RV32ZBB-NEXT:    beq a1, a5, .LBB22_8
+; RV32ZBB-NEXT:  # %bb.7:
+; RV32ZBB-NEXT:    sltu t5, a5, a1
+; RV32ZBB-NEXT:  .LBB22_8:
+; RV32ZBB-NEXT:    beqz t6, .LBB22_10
+; RV32ZBB-NEXT:  # %bb.9:
+; RV32ZBB-NEXT:    sub a2, t0, a2
+; RV32ZBB-NEXT:    sub a6, a7, a6
+; RV32ZBB-NEXT:    sub a2, a2, t1
+; RV32ZBB-NEXT:    sltu a7, a6, t3
+; RV32ZBB-NEXT:    sub a1, a1, a5
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    sub a6, a6, t3
+; RV32ZBB-NEXT:    sub a1, a1, t2
+; RV32ZBB-NEXT:    sub a3, a4, a3
+; RV32ZBB-NEXT:    j .LBB22_11
+; RV32ZBB-NEXT:  .LBB22_10:
+; RV32ZBB-NEXT:    sltu t1, a6, a7
+; RV32ZBB-NEXT:    sub a2, a2, t0
+; RV32ZBB-NEXT:    sub a2, a2, t1
+; RV32ZBB-NEXT:    sub a6, a6, a7
+; RV32ZBB-NEXT:    sltu a7, a6, t5
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    sub a6, a6, t5
+; RV32ZBB-NEXT:    sub a5, a5, a1
+; RV32ZBB-NEXT:    sub a1, a5, t4
+; RV32ZBB-NEXT:    sub a3, a3, a4
+; RV32ZBB-NEXT:  .LBB22_11:
+; RV32ZBB-NEXT:    sw a6, 8(a0)
+; RV32ZBB-NEXT:    sw a1, 4(a0)
+; RV32ZBB-NEXT:    sw a3, 0(a0)
+; RV32ZBB-NEXT:    sw a2, 12(a0)
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: abd_cmp_i128:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sltu a4, a0, a2
+; RV64ZBB-NEXT:    mv a5, a4
+; RV64ZBB-NEXT:    beq a1, a3, .LBB22_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    sltu a5, a1, a3
+; RV64ZBB-NEXT:  .LBB22_2:
+; RV64ZBB-NEXT:    beqz a5, .LBB22_4
+; RV64ZBB-NEXT:  # %bb.3:
+; RV64ZBB-NEXT:    sub a1, a1, a3
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a0, a0, a2
+; RV64ZBB-NEXT:    ret
+; RV64ZBB-NEXT:  .LBB22_4:
+; RV64ZBB-NEXT:    sltu a4, a2, a0
+; RV64ZBB-NEXT:    sub a1, a3, a1
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a0, a2, a0
+; RV64ZBB-NEXT:    ret
+  %cmp = icmp uge i128 %a, %b
+  %ab = sub i128 %a, %b
+  %ba = sub i128 %b, %a
+  %sel = select i1 %cmp, i128 %ba, i128 %ab
+  ret i128 %sel
+}
+
+declare i8 @llvm.abs.i8(i8, i1)
+declare i16 @llvm.abs.i16(i16, i1)
+declare i32 @llvm.abs.i32(i32, i1)
+declare i64 @llvm.abs.i64(i64, i1)
+declare i128 @llvm.abs.i128(i128, i1)
+
+declare i8 @llvm.umax.i8(i8, i8)
+declare i16 @llvm.umax.i16(i16, i16)
+declare i32 @llvm.umax.i32(i32, i32)
+declare i64 @llvm.umax.i64(i64, i64)
+
+declare i8 @llvm.umin.i8(i8, i8)
+declare i16 @llvm.umin.i16(i16, i16)
+declare i32 @llvm.umin.i32(i32, i32)
+declare i64 @llvm.umin.i64(i64, i64)


        


More information about the llvm-commits mailing list