[llvm] 0f38b4d - [ARM] Add neon_vabd.ll based off aarch64 tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 6 02:07:47 PDT 2024


Author: Simon Pilgrim
Date: 2024-06-06T10:06:57+01:00
New Revision: 0f38b4d1ca026f1c28e5a75adab00173852234dd

URL: https://github.com/llvm/llvm-project/commit/0f38b4d1ca026f1c28e5a75adab00173852234dd
DIFF: https://github.com/llvm/llvm-project/commit/0f38b4d1ca026f1c28e5a75adab00173852234dd.diff

LOG: [ARM] Add neon_vabd.ll based off aarch64 tests

Test coverage for #94504

Added: 
    llvm/test/CodeGen/ARM/neon_vabd.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/ARM/neon_vabd.ll b/llvm/test/CodeGen/ARM/neon_vabd.ll
new file mode 100644
index 0000000000000..14ad1a108a728
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/neon_vabd.ll
@@ -0,0 +1,890 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
+
+;
+; SABD
+;
+
+define <8 x i8> @sabd_8b(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: sabd_8b:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vsubl.s8 q8, d17, d16
+; CHECK-NEXT:    vabs.s16 q8, q8
+; CHECK-NEXT:    vmovn.i16 d16, q8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
+  %a.sext = sext <8 x i8> %a to <8 x i16>
+  %b.sext = sext <8 x i8> %b to <8 x i16>
+  %sub = sub <8 x i16> %a.sext, %b.sext
+  %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+  %trunc = trunc <8 x i16> %abs to <8 x i8>
+  ret <8 x i8> %trunc
+}
+
+define <16 x i8> @sabd_16b(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: sabd_16b:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT:    vmov d18, r2, r3
+; CHECK-NEXT:    vmov d19, r0, r1
+; CHECK-NEXT:    vsubl.s8 q10, d18, d17
+; CHECK-NEXT:    vsubl.s8 q8, d19, d16
+; CHECK-NEXT:    vabs.s16 q9, q10
+; CHECK-NEXT:    vabs.s16 q8, q8
+; CHECK-NEXT:    vmovn.i16 d19, q9
+; CHECK-NEXT:    vmovn.i16 d18, q8
+; CHECK-NEXT:    vmov r2, r3, d19
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    mov pc, lr
+  %a.sext = sext <16 x i8> %a to <16 x i16>
+  %b.sext = sext <16 x i8> %b to <16 x i16>
+  %sub = sub <16 x i16> %a.sext, %b.sext
+  %abs = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %sub, i1 true)
+  %trunc = trunc <16 x i16> %abs to <16 x i8>
+  ret <16 x i8> %trunc
+}
+
+define <4 x i16> @sabd_4h(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: sabd_4h:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vsubl.s16 q8, d17, d16
+; CHECK-NEXT:    vabs.s32 q8, q8
+; CHECK-NEXT:    vmovn.i32 d16, q8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
+  %a.sext = sext <4 x i16> %a to <4 x i32>
+  %b.sext = sext <4 x i16> %b to <4 x i32>
+  %sub = sub <4 x i32> %a.sext, %b.sext
+  %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+  %trunc = trunc <4 x i32> %abs to <4 x i16>
+  ret <4 x i16> %trunc
+}
+
+define <4 x i16> @sabd_4h_promoted_ops(<4 x i8> %a, <4 x i8> %b) {
+; CHECK-LABEL: sabd_4h_promoted_ops:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vshl.i16 d16, d16, #8
+; CHECK-NEXT:    vshl.i16 d17, d17, #8
+; CHECK-NEXT:    vshr.s16 d16, d16, #8
+; CHECK-NEXT:    vshr.s16 d17, d17, #8
+; CHECK-NEXT:    vsub.i16 d16, d17, d16
+; CHECK-NEXT:    vabs.s16 d16, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
+  %a.sext = sext <4 x i8> %a to <4 x i16>
+  %b.sext = sext <4 x i8> %b to <4 x i16>
+  %sub = sub <4 x i16> %a.sext, %b.sext
+  %abs = call <4 x i16> @llvm.abs.v4i16(<4 x i16> %sub, i1 true)
+  ret <4 x i16> %abs
+}
+
+define <8 x i16> @sabd_8h(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: sabd_8h:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT:    vmov d18, r2, r3
+; CHECK-NEXT:    vmov d19, r0, r1
+; CHECK-NEXT:    vsubl.s16 q10, d18, d17
+; CHECK-NEXT:    vsubl.s16 q8, d19, d16
+; CHECK-NEXT:    vabs.s32 q9, q10
+; CHECK-NEXT:    vabs.s32 q8, q8
+; CHECK-NEXT:    vmovn.i32 d19, q9
+; CHECK-NEXT:    vmovn.i32 d18, q8
+; CHECK-NEXT:    vmov r2, r3, d19
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    mov pc, lr
+  %a.sext = sext <8 x i16> %a to <8 x i32>
+  %b.sext = sext <8 x i16> %b to <8 x i32>
+  %sub = sub <8 x i32> %a.sext, %b.sext
+  %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 true)
+  %trunc = trunc <8 x i32> %abs to <8 x i16>
+  ret <8 x i16> %trunc
+}
+
+define <8 x i16> @sabd_8h_promoted_ops(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: sabd_8h_promoted_ops:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vsubl.s8 q8, d17, d16
+; CHECK-NEXT:    vabs.s16 q8, q8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
+  %a.sext = sext <8 x i8> %a to <8 x i16>
+  %b.sext = sext <8 x i8> %b to <8 x i16>
+  %sub = sub <8 x i16> %a.sext, %b.sext
+  %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+  ret <8 x i16> %abs
+}
+
+define <2 x i32> @sabd_2s(<2 x i32> %a, <2 x i32> %b) {
+; CHECK-LABEL: sabd_2s:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vsubl.s32 q8, d17, d16
+; CHECK-NEXT:    vshr.s64 q9, q8, #63
+; CHECK-NEXT:    veor q8, q8, q9
+; CHECK-NEXT:    vsub.i64 q8, q8, q9
+; CHECK-NEXT:    vmovn.i64 d16, q8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
+  %a.sext = sext <2 x i32> %a to <2 x i64>
+  %b.sext = sext <2 x i32> %b to <2 x i64>
+  %sub = sub <2 x i64> %a.sext, %b.sext
+  %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+  %trunc = trunc <2 x i64> %abs to <2 x i32>
+  ret <2 x i32> %trunc
+}
+
+define <2 x i32> @sabd_2s_promoted_ops(<2 x i16> %a, <2 x i16> %b) {
+; CHECK-LABEL: sabd_2s_promoted_ops:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vshl.i32 d16, d16, #16
+; CHECK-NEXT:    vshl.i32 d17, d17, #16
+; CHECK-NEXT:    vshr.s32 d16, d16, #16
+; CHECK-NEXT:    vshr.s32 d17, d17, #16
+; CHECK-NEXT:    vsub.i32 d16, d17, d16
+; CHECK-NEXT:    vabs.s32 d16, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
+  %a.sext = sext <2 x i16> %a to <2 x i32>
+  %b.sext = sext <2 x i16> %b to <2 x i32>
+  %sub = sub <2 x i32> %a.sext, %b.sext
+  %abs = call <2 x i32> @llvm.abs.v2i32(<2 x i32> %sub, i1 true)
+  ret <2 x i32> %abs
+}
+
+define <4 x i32> @sabd_4s(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: sabd_4s:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT:    vmov d18, r2, r3
+; CHECK-NEXT:    vmov d19, r0, r1
+; CHECK-NEXT:    vsubl.s32 q10, d18, d17
+; CHECK-NEXT:    vsubl.s32 q8, d19, d16
+; CHECK-NEXT:    vshr.s64 q9, q10, #63
+; CHECK-NEXT:    vshr.s64 q11, q8, #63
+; CHECK-NEXT:    veor q10, q10, q9
+; CHECK-NEXT:    veor q8, q8, q11
+; CHECK-NEXT:    vsub.i64 q9, q10, q9
+; CHECK-NEXT:    vsub.i64 q8, q8, q11
+; CHECK-NEXT:    vmovn.i64 d19, q9
+; CHECK-NEXT:    vmovn.i64 d18, q8
+; CHECK-NEXT:    vmov r2, r3, d19
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    mov pc, lr
+  %a.sext = sext <4 x i32> %a to <4 x i64>
+  %b.sext = sext <4 x i32> %b to <4 x i64>
+  %sub = sub <4 x i64> %a.sext, %b.sext
+  %abs = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %sub, i1 true)
+  %trunc = trunc <4 x i64> %abs to <4 x i32>
+  ret <4 x i32> %trunc
+}
+
+define <4 x i32> @sabd_4s_promoted_ops(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: sabd_4s_promoted_ops:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vsubl.s16 q8, d17, d16
+; CHECK-NEXT:    vabs.s32 q8, q8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
+  %a.sext = sext <4 x i16> %a to <4 x i32>
+  %b.sext = sext <4 x i16> %b to <4 x i32>
+  %sub = sub <4 x i32> %a.sext, %b.sext
+  %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+  ret <4 x i32> %abs
+}
+
+define <2 x i64> @sabd_2d(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: sabd_2d:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    add r12, sp, #24
+; CHECK-NEXT:    asr r6, r3, #31
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT:    vmov r12, lr, d17
+; CHECK-NEXT:    vmov r7, r5, d16
+; CHECK-NEXT:    subs r2, r2, r12
+; CHECK-NEXT:    sbcs r3, r3, lr
+; CHECK-NEXT:    sbcs r4, r6, lr, asr #31
+; CHECK-NEXT:    sbc r6, r6, lr, asr #31
+; CHECK-NEXT:    eor r2, r2, r6, asr #31
+; CHECK-NEXT:    eor r3, r3, r6, asr #31
+; CHECK-NEXT:    subs r2, r2, r6, asr #31
+; CHECK-NEXT:    sbc r3, r3, r6, asr #31
+; CHECK-NEXT:    subs r0, r0, r7
+; CHECK-NEXT:    asr r6, r1, #31
+; CHECK-NEXT:    sbcs r1, r1, r5
+; CHECK-NEXT:    sbcs r7, r6, r5, asr #31
+; CHECK-NEXT:    vmov.32 d17[0], r2
+; CHECK-NEXT:    sbc r7, r6, r5, asr #31
+; CHECK-NEXT:    eor r0, r0, r7, asr #31
+; CHECK-NEXT:    subs r0, r0, r7, asr #31
+; CHECK-NEXT:    vmov.32 d16[0], r0
+; CHECK-NEXT:    eor r0, r1, r7, asr #31
+; CHECK-NEXT:    sbc r0, r0, r7, asr #31
+; CHECK-NEXT:    vmov.32 d17[1], r3
+; CHECK-NEXT:    vmov.32 d16[1], r0
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov pc, lr
+  %a.sext = sext <2 x i64> %a to <2 x i128>
+  %b.sext = sext <2 x i64> %b to <2 x i128>
+  %sub = sub <2 x i128> %a.sext, %b.sext
+  %abs = call <2 x i128> @llvm.abs.v2i128(<2 x i128> %sub, i1 true)
+  %trunc = trunc <2 x i128> %abs to <2 x i64>
+  ret <2 x i64> %trunc
+}
+
+define <2 x i64> @sabd_2d_promoted_ops(<2 x i32> %a, <2 x i32> %b) {
+; CHECK-LABEL: sabd_2d_promoted_ops:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vsubl.s32 q8, d17, d16
+; CHECK-NEXT:    vshr.s64 q9, q8, #63
+; CHECK-NEXT:    veor q8, q8, q9
+; CHECK-NEXT:    vsub.i64 q8, q8, q9
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
+  %a.sext = sext <2 x i32> %a to <2 x i64>
+  %b.sext = sext <2 x i32> %b to <2 x i64>
+  %sub = sub <2 x i64> %a.sext, %b.sext
+  %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+  ret <2 x i64> %abs
+}
+
+;
+; UABD
+;
+
+define <8 x i8> @uabd_8b(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: uabd_8b:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vabdl.u8 q8, d17, d16
+; CHECK-NEXT:    vmovn.i16 d16, q8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
+  %a.zext = zext <8 x i8> %a to <8 x i16>
+  %b.zext = zext <8 x i8> %b to <8 x i16>
+  %sub = sub <8 x i16> %a.zext, %b.zext
+  %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+  %trunc = trunc <8 x i16> %abs to <8 x i8>
+  ret <8 x i8> %trunc
+}
+
+define <16 x i8> @uabd_16b(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: uabd_16b:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT:    vmov d18, r2, r3
+; CHECK-NEXT:    vmov d19, r0, r1
+; CHECK-NEXT:    vabdl.u8 q10, d18, d17
+; CHECK-NEXT:    vabdl.u8 q8, d19, d16
+; CHECK-NEXT:    vmovn.i16 d19, q10
+; CHECK-NEXT:    vmovn.i16 d18, q8
+; CHECK-NEXT:    vmov r2, r3, d19
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    mov pc, lr
+  %a.zext = zext <16 x i8> %a to <16 x i16>
+  %b.zext = zext <16 x i8> %b to <16 x i16>
+  %sub = sub <16 x i16> %a.zext, %b.zext
+  %abs = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %sub, i1 true)
+  %trunc = trunc <16 x i16> %abs to <16 x i8>
+  ret <16 x i8> %trunc
+}
+
+define <4 x i16> @uabd_4h(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: uabd_4h:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vabdl.u16 q8, d17, d16
+; CHECK-NEXT:    vmovn.i32 d16, q8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
+  %a.zext = zext <4 x i16> %a to <4 x i32>
+  %b.zext = zext <4 x i16> %b to <4 x i32>
+  %sub = sub <4 x i32> %a.zext, %b.zext
+  %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+  %trunc = trunc <4 x i32> %abs to <4 x i16>
+  ret <4 x i16> %trunc
+}
+
+define <4 x i16> @uabd_4h_promoted_ops(<4 x i8> %a, <4 x i8> %b) {
+; CHECK-LABEL: uabd_4h_promoted_ops:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vbic.i16 d16, #0xff00
+; CHECK-NEXT:    vbic.i16 d17, #0xff00
+; CHECK-NEXT:    vsub.i16 d16, d17, d16
+; CHECK-NEXT:    vabs.s16 d16, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
+  %a.zext = zext <4 x i8> %a to <4 x i16>
+  %b.zext = zext <4 x i8> %b to <4 x i16>
+  %sub = sub <4 x i16> %a.zext, %b.zext
+  %abs = call <4 x i16> @llvm.abs.v4i16(<4 x i16> %sub, i1 true)
+  ret <4 x i16> %abs
+}
+
+define <8 x i16> @uabd_8h(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: uabd_8h:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT:    vmov d18, r2, r3
+; CHECK-NEXT:    vmov d19, r0, r1
+; CHECK-NEXT:    vabdl.u16 q10, d18, d17
+; CHECK-NEXT:    vabdl.u16 q8, d19, d16
+; CHECK-NEXT:    vmovn.i32 d19, q10
+; CHECK-NEXT:    vmovn.i32 d18, q8
+; CHECK-NEXT:    vmov r2, r3, d19
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    mov pc, lr
+  %a.zext = zext <8 x i16> %a to <8 x i32>
+  %b.zext = zext <8 x i16> %b to <8 x i32>
+  %sub = sub <8 x i32> %a.zext, %b.zext
+  %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 true)
+  %trunc = trunc <8 x i32> %abs to <8 x i16>
+  ret <8 x i16> %trunc
+}
+
+define <8 x i16> @uabd_8h_promoted_ops(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: uabd_8h_promoted_ops:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vabdl.u8 q8, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
+  %a.zext = zext <8 x i8> %a to <8 x i16>
+  %b.zext = zext <8 x i8> %b to <8 x i16>
+  %sub = sub <8 x i16> %a.zext, %b.zext
+  %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+  ret <8 x i16> %abs
+}
+
+define <2 x i32> @uabd_2s(<2 x i32> %a, <2 x i32> %b) {
+; CHECK-LABEL: uabd_2s:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vsubl.u32 q8, d17, d16
+; CHECK-NEXT:    vshr.s64 q9, q8, #63
+; CHECK-NEXT:    veor q8, q8, q9
+; CHECK-NEXT:    vsub.i64 q8, q8, q9
+; CHECK-NEXT:    vmovn.i64 d16, q8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
+  %a.zext = zext <2 x i32> %a to <2 x i64>
+  %b.zext = zext <2 x i32> %b to <2 x i64>
+  %sub = sub <2 x i64> %a.zext, %b.zext
+  %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+  %trunc = trunc <2 x i64> %abs to <2 x i32>
+  ret <2 x i32> %trunc
+}
+
+define <2 x i32> @uabd_2s_promoted_ops(<2 x i16> %a, <2 x i16> %b) {
+; CHECK-LABEL: uabd_2s_promoted_ops:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.i32 d16, #0xffff
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    vmov d18, r0, r1
+; CHECK-NEXT:    vand d17, d17, d16
+; CHECK-NEXT:    vand d16, d18, d16
+; CHECK-NEXT:    vsub.i32 d16, d16, d17
+; CHECK-NEXT:    vabs.s32 d16, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
+  %a.zext = zext <2 x i16> %a to <2 x i32>
+  %b.zext = zext <2 x i16> %b to <2 x i32>
+  %sub = sub <2 x i32> %a.zext, %b.zext
+  %abs = call <2 x i32> @llvm.abs.v2i32(<2 x i32> %sub, i1 true)
+  ret <2 x i32> %abs
+}
+
+define <4 x i32> @uabd_4s(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: uabd_4s:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT:    vmov d18, r2, r3
+; CHECK-NEXT:    vmov d19, r0, r1
+; CHECK-NEXT:    vsubl.u32 q10, d18, d17
+; CHECK-NEXT:    vsubl.u32 q8, d19, d16
+; CHECK-NEXT:    vshr.s64 q9, q10, #63
+; CHECK-NEXT:    vshr.s64 q11, q8, #63
+; CHECK-NEXT:    veor q10, q10, q9
+; CHECK-NEXT:    veor q8, q8, q11
+; CHECK-NEXT:    vsub.i64 q9, q10, q9
+; CHECK-NEXT:    vsub.i64 q8, q8, q11
+; CHECK-NEXT:    vmovn.i64 d19, q9
+; CHECK-NEXT:    vmovn.i64 d18, q8
+; CHECK-NEXT:    vmov r2, r3, d19
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    mov pc, lr
+  %a.zext = zext <4 x i32> %a to <4 x i64>
+  %b.zext = zext <4 x i32> %b to <4 x i64>
+  %sub = sub <4 x i64> %a.zext, %b.zext
+  %abs = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %sub, i1 true)
+  %trunc = trunc <4 x i64> %abs to <4 x i32>
+  ret <4 x i32> %trunc
+}
+
+define <4 x i32> @uabd_4s_promoted_ops(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: uabd_4s_promoted_ops:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vabdl.u16 q8, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
+  %a.zext = zext <4 x i16> %a to <4 x i32>
+  %b.zext = zext <4 x i16> %b to <4 x i32>
+  %sub = sub <4 x i32> %a.zext, %b.zext
+  %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+  ret <4 x i32> %abs
+}
+
+define <2 x i64> @uabd_2d(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: uabd_2d:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    add r12, sp, #24
+; CHECK-NEXT:    mov r6, #0
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT:    vmov r12, lr, d17
+; CHECK-NEXT:    vmov r4, r7, d16
+; CHECK-NEXT:    subs r2, r2, r12
+; CHECK-NEXT:    sbcs r3, r3, lr
+; CHECK-NEXT:    sbcs r5, r6, #0
+; CHECK-NEXT:    sbc r5, r6, #0
+; CHECK-NEXT:    eor r2, r2, r5, asr #31
+; CHECK-NEXT:    eor r3, r3, r5, asr #31
+; CHECK-NEXT:    subs r2, r2, r5, asr #31
+; CHECK-NEXT:    sbc r3, r3, r5, asr #31
+; CHECK-NEXT:    subs r0, r0, r4
+; CHECK-NEXT:    sbcs r1, r1, r7
+; CHECK-NEXT:    vmov.32 d17[0], r2
+; CHECK-NEXT:    sbcs r7, r6, #0
+; CHECK-NEXT:    sbc r7, r6, #0
+; CHECK-NEXT:    eor r0, r0, r7, asr #31
+; CHECK-NEXT:    subs r0, r0, r7, asr #31
+; CHECK-NEXT:    vmov.32 d16[0], r0
+; CHECK-NEXT:    eor r0, r1, r7, asr #31
+; CHECK-NEXT:    sbc r0, r0, r7, asr #31
+; CHECK-NEXT:    vmov.32 d17[1], r3
+; CHECK-NEXT:    vmov.32 d16[1], r0
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov pc, lr
+  %a.zext = zext <2 x i64> %a to <2 x i128>
+  %b.zext = zext <2 x i64> %b to <2 x i128>
+  %sub = sub <2 x i128> %a.zext, %b.zext
+  %abs = call <2 x i128> @llvm.abs.v2i128(<2 x i128> %sub, i1 true)
+  %trunc = trunc <2 x i128> %abs to <2 x i64>
+  ret <2 x i64> %trunc
+}
+
+define <2 x i64> @uabd_2d_promoted_ops(<2 x i32> %a, <2 x i32> %b) {
+; CHECK-LABEL: uabd_2d_promoted_ops:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vsubl.u32 q8, d17, d16
+; CHECK-NEXT:    vshr.s64 q9, q8, #63
+; CHECK-NEXT:    veor q8, q8, q9
+; CHECK-NEXT:    vsub.i64 q8, q8, q9
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
+  %a.zext = zext <2 x i32> %a to <2 x i64>
+  %b.zext = zext <2 x i32> %b to <2 x i64>
+  %sub = sub <2 x i64> %a.zext, %b.zext
+  %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+  ret <2 x i64> %abs
+}
+
+define <16 x i8> @uabd_v16i8_nuw(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: uabd_v16i8_nuw:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vsub.i8 q8, q8, q9
+; CHECK-NEXT:    vabs.s8 q8, q8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
+  %sub = sub nuw <16 x i8> %a, %b
+  %abs = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %sub, i1 true)
+  ret <16 x i8> %abs
+}
+
+define <8 x i16> @uabd_v8i16_nuw(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: uabd_v8i16_nuw:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vsub.i16 q8, q8, q9
+; CHECK-NEXT:    vabs.s16 q8, q8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
+  %sub = sub nuw <8 x i16> %a, %b
+  %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+  ret <8 x i16> %abs
+}
+
+define <4 x i32> @uabd_v4i32_nuw(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: uabd_v4i32_nuw:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vsub.i32 q8, q8, q9
+; CHECK-NEXT:    vabs.s32 q8, q8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
+  %sub = sub nuw <4 x i32> %a, %b
+  %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+  ret <4 x i32> %abs
+}
+
+define <2 x i64> @uabd_v2i64_nuw(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: uabd_v2i64_nuw:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vsub.i64 q8, q8, q9
+; CHECK-NEXT:    vshr.s64 q9, q8, #63
+; CHECK-NEXT:    veor q8, q8, q9
+; CHECK-NEXT:    vsub.i64 q8, q8, q9
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
+  %sub = sub nuw <2 x i64> %a, %b
+  %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+  ret <2 x i64> %abs
+}
+
+define <16 x i8> @sabd_v16i8_nsw(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: sabd_v16i8_nsw:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vsub.i8 q8, q8, q9
+; CHECK-NEXT:    vabs.s8 q8, q8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
+  %sub = sub nsw <16 x i8> %a, %b
+  %abs = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %sub, i1 true)
+  ret <16 x i8> %abs
+}
+
+define <8 x i16> @sabd_v8i16_nsw(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: sabd_v8i16_nsw:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vsub.i16 q8, q8, q9
+; CHECK-NEXT:    vabs.s16 q8, q8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
+  %sub = sub nsw <8 x i16> %a, %b
+  %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+  ret <8 x i16> %abs
+}
+
+define <4 x i32> @sabd_v4i32_nsw(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: sabd_v4i32_nsw:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vsub.i32 q8, q8, q9
+; CHECK-NEXT:    vabs.s32 q8, q8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
+  %sub = sub nsw <4 x i32> %a, %b
+  %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+  ret <4 x i32> %abs
+}
+
+define <2 x i64> @sabd_v2i64_nsw(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: sabd_v2i64_nsw:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vsub.i64 q8, q8, q9
+; CHECK-NEXT:    vshr.s64 q9, q8, #63
+; CHECK-NEXT:    veor q8, q8, q9
+; CHECK-NEXT:    vsub.i64 q8, q8, q9
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
+  %sub = sub nsw <2 x i64> %a, %b
+  %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+  ret <2 x i64> %abs
+}
+
+define <16 x i8> @smaxmin_v16i8(<16 x i8> %0, <16 x i8> %1) {
+; CHECK-LABEL: smaxmin_v16i8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vmin.s8 q10, q8, q9
+; CHECK-NEXT:    vmax.s8 q8, q8, q9
+; CHECK-NEXT:    vsub.i8 q8, q8, q10
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
+  %a = tail call <16 x i8> @llvm.smax.v16i8(<16 x i8> %0, <16 x i8> %1)
+  %b = tail call <16 x i8> @llvm.smin.v16i8(<16 x i8> %0, <16 x i8> %1)
+  %sub = sub <16 x i8> %a, %b
+  ret <16 x i8> %sub
+}
+
+define <8 x i16> @smaxmin_v8i16(<8 x i16> %0, <8 x i16> %1) {
+; CHECK-LABEL: smaxmin_v8i16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vmin.s16 q10, q8, q9
+; CHECK-NEXT:    vmax.s16 q8, q8, q9
+; CHECK-NEXT:    vsub.i16 q8, q8, q10
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
+  %a = tail call <8 x i16> @llvm.smax.v8i16(<8 x i16> %0, <8 x i16> %1)
+  %b = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> %0, <8 x i16> %1)
+  %sub = sub <8 x i16> %a, %b
+  ret <8 x i16> %sub
+}
+
+define <4 x i32> @smaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
+; CHECK-LABEL: smaxmin_v4i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vmin.s32 q10, q8, q9
+; CHECK-NEXT:    vmax.s32 q8, q8, q9
+; CHECK-NEXT:    vsub.i32 q8, q8, q10
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
+  %a = tail call <4 x i32> @llvm.smax.v4i32(<4 x i32> %0, <4 x i32> %1)
+  %b = tail call <4 x i32> @llvm.smin.v4i32(<4 x i32> %0, <4 x i32> %1)
+  %sub = sub <4 x i32> %a, %b
+  ret <4 x i32> %sub
+}
+
+define <2 x i64> @smaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
+; CHECK-LABEL: smaxmin_v2i64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r8, lr}
+; CHECK-NEXT:    add r6, sp, #24
+; CHECK-NEXT:    mov r8, #0
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r6]
+; CHECK-NEXT:    vmov r7, r12, d19
+; CHECK-NEXT:    vmov r4, lr, d18
+; CHECK-NEXT:    subs r5, r2, r7
+; CHECK-NEXT:    sbcs r5, r3, r12
+; CHECK-NEXT:    mov r6, r7
+; CHECK-NEXT:    mov r5, #0
+; CHECK-NEXT:    movlt r5, #1
+; CHECK-NEXT:    cmp r5, #0
+; CHECK-NEXT:    movne r6, r2
+; CHECK-NEXT:    mov r5, r12
+; CHECK-NEXT:    vmov.32 d17[0], r6
+; CHECK-NEXT:    movne r5, r3
+; CHECK-NEXT:    mov r6, r4
+; CHECK-NEXT:    vmov.32 d17[1], r5
+; CHECK-NEXT:    subs r5, r4, r0
+; CHECK-NEXT:    sbcs r5, lr, r1
+; CHECK-NEXT:    mov r5, #0
+; CHECK-NEXT:    movlt r5, #1
+; CHECK-NEXT:    cmp r5, #0
+; CHECK-NEXT:    movne r6, r0
+; CHECK-NEXT:    vmov.32 d18[0], r6
+; CHECK-NEXT:    subs r6, r7, r2
+; CHECK-NEXT:    sbcs r6, r12, r3
+; CHECK-NEXT:    mov r6, #0
+; CHECK-NEXT:    movlt r6, #1
+; CHECK-NEXT:    cmp r6, #0
+; CHECK-NEXT:    movne r7, r2
+; CHECK-NEXT:    subs r2, r0, r4
+; CHECK-NEXT:    sbcs r2, r1, lr
+; CHECK-NEXT:    vmov.32 d19[0], r7
+; CHECK-NEXT:    movlt r8, #1
+; CHECK-NEXT:    cmp r8, #0
+; CHECK-NEXT:    movne r4, r0
+; CHECK-NEXT:    mov r0, lr
+; CHECK-NEXT:    vmov.32 d16[0], r4
+; CHECK-NEXT:    movne r0, r1
+; CHECK-NEXT:    cmp r6, #0
+; CHECK-NEXT:    movne r12, r3
+; CHECK-NEXT:    cmp r5, #0
+; CHECK-NEXT:    vmov.32 d16[1], r0
+; CHECK-NEXT:    movne lr, r1
+; CHECK-NEXT:    vmov.32 d19[1], r12
+; CHECK-NEXT:    vmov.32 d18[1], lr
+; CHECK-NEXT:    vsub.i64 q8, q9, q8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r8, lr}
+; CHECK-NEXT:    mov pc, lr
+  %a = tail call <2 x i64> @llvm.smax.v2i64(<2 x i64> %0, <2 x i64> %1)
+  %b = tail call <2 x i64> @llvm.smin.v2i64(<2 x i64> %0, <2 x i64> %1)
+  %sub = sub <2 x i64> %a, %b
+  ret <2 x i64> %sub
+}
+
+define <16 x i8> @umaxmin_v16i8(<16 x i8> %0, <16 x i8> %1) {
+; CHECK-LABEL: umaxmin_v16i8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vmin.u8 q10, q8, q9
+; CHECK-NEXT:    vmax.u8 q8, q8, q9
+; CHECK-NEXT:    vsub.i8 q8, q8, q10
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
+  %a = tail call <16 x i8> @llvm.umax.v16i8(<16 x i8> %0, <16 x i8> %1)
+  %b = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %0, <16 x i8> %1)
+  %sub = sub <16 x i8> %a, %b
+  ret <16 x i8> %sub
+}
+
+define <8 x i16> @umaxmin_v8i16(<8 x i16> %0, <8 x i16> %1) {
+; CHECK-LABEL: umaxmin_v8i16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vmin.u16 q10, q8, q9
+; CHECK-NEXT:    vmax.u16 q8, q8, q9
+; CHECK-NEXT:    vsub.i16 q8, q8, q10
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
+  %a = tail call <8 x i16> @llvm.umax.v8i16(<8 x i16> %0, <8 x i16> %1)
+  %b = tail call <8 x i16> @llvm.umin.v8i16(<8 x i16> %0, <8 x i16> %1)
+  %sub = sub <8 x i16> %a, %b
+  ret <8 x i16> %sub
+}
+
+define <4 x i32> @umaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
+; CHECK-LABEL: umaxmin_v4i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vmin.u32 q10, q8, q9
+; CHECK-NEXT:    vmax.u32 q8, q8, q9
+; CHECK-NEXT:    vsub.i32 q8, q8, q10
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
+  %a = tail call <4 x i32> @llvm.umax.v4i32(<4 x i32> %0, <4 x i32> %1)
+  %b = tail call <4 x i32> @llvm.umin.v4i32(<4 x i32> %0, <4 x i32> %1)
+  %sub = sub <4 x i32> %a, %b
+  ret <4 x i32> %sub
+}
+
+define <2 x i64> @umaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
+; CHECK-LABEL: umaxmin_v2i64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vqsub.u64 q10, q8, q9
+; CHECK-NEXT:    vqsub.u64 q9, q9, q8
+; CHECK-NEXT:    vsub.i64 q10, q10, q8
+; CHECK-NEXT:    vadd.i64 q8, q8, q9
+; CHECK-NEXT:    vadd.i64 q8, q8, q10
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
+  %a = tail call <2 x i64> @llvm.umax.v2i64(<2 x i64> %0, <2 x i64> %1)
+  %b = tail call <2 x i64> @llvm.umin.v2i64(<2 x i64> %0, <2 x i64> %1)
+  %sub = sub <2 x i64> %a, %b
+  ret <2 x i64> %sub
+}
+
+define <16 x i8> @umaxmin_v16i8_com1(<16 x i8> %0, <16 x i8> %1) {
+; CHECK-LABEL: umaxmin_v16i8_com1:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    mov r0, sp
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r0]
+; CHECK-NEXT:    vmin.u8 q10, q9, q8
+; CHECK-NEXT:    vmax.u8 q8, q8, q9
+; CHECK-NEXT:    vsub.i8 q8, q8, q10
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
+  %a = tail call <16 x i8> @llvm.umax.v16i8(<16 x i8> %0, <16 x i8> %1)
+  %b = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %1, <16 x i8> %0)
+  %sub = sub <16 x i8> %a, %b
+  ret <16 x i8> %sub
+}


        


More information about the llvm-commits mailing list