[llvm] a4bbc3b - [ARM] Predicated binary operation tests. NFC
David Green via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 22 02:40:17 PDT 2020
Author: David Green
Date: 2020-07-22T10:40:02+01:00
New Revision: a4bbc3b76360dad5c8d1cda9fb73682425d6ea3a
URL: https://github.com/llvm/llvm-project/commit/a4bbc3b76360dad5c8d1cda9fb73682425d6ea3a
DIFF: https://github.com/llvm/llvm-project/commit/a4bbc3b76360dad5c8d1cda9fb73682425d6ea3a.diff
LOG: [ARM] Predicated binary operation tests. NFC
Added:
llvm/test/CodeGen/Thumb2/mve-pred-selectop.ll
llvm/test/CodeGen/Thumb2/mve-pred-selectop2.ll
llvm/test/CodeGen/Thumb2/mve-pred-selectop3.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/Thumb2/mve-pred-selectop.ll b/llvm/test/CodeGen/Thumb2/mve-pred-selectop.ll
new file mode 100644
index 000000000000..f1deae09fa60
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-pred-selectop.ll
@@ -0,0 +1,1287 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
+
+define arm_aapcs_vfpcc <4 x i32> @add_v4i32(<4 x i32> %z, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: add_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.i32 q1, q1, q2
+; CHECK-NEXT: vcmp.i32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <4 x i32> %z, zeroinitializer
+ %a = add <4 x i32> %x, %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %z
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @add_v8i16(<8 x i16> %z, <8 x i16> %x, <8 x i16> %y) {
+; CHECK-LABEL: add_v8i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.i16 q1, q1, q2
+; CHECK-NEXT: vcmp.i16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %z, zeroinitializer
+ %a = add <8 x i16> %x, %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %z
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @add_v16i8(<16 x i8> %z, <16 x i8> %x, <16 x i8> %y) {
+; CHECK-LABEL: add_v16i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.i8 q1, q1, q2
+; CHECK-NEXT: vcmp.i8 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %z, zeroinitializer
+ %a = add <16 x i8> %x, %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %z
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sub_v4i32(<4 x i32> %z, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: sub_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.i32 q1, q1, q2
+; CHECK-NEXT: vcmp.i32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <4 x i32> %z, zeroinitializer
+ %a = sub <4 x i32> %x, %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %z
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @sub_v8i16(<8 x i16> %z, <8 x i16> %x, <8 x i16> %y) {
+; CHECK-LABEL: sub_v8i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.i16 q1, q1, q2
+; CHECK-NEXT: vcmp.i16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %z, zeroinitializer
+ %a = sub <8 x i16> %x, %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %z
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @sub_v16i8(<16 x i8> %z, <16 x i8> %x, <16 x i8> %y) {
+; CHECK-LABEL: sub_v16i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.i8 q1, q1, q2
+; CHECK-NEXT: vcmp.i8 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %z, zeroinitializer
+ %a = sub <16 x i8> %x, %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %z
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mul_v4i32(<4 x i32> %z, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: mul_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.i32 q1, q1, q2
+; CHECK-NEXT: vcmp.i32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <4 x i32> %z, zeroinitializer
+ %a = mul <4 x i32> %x, %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %z
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @mul_v8i16(<8 x i16> %z, <8 x i16> %x, <8 x i16> %y) {
+; CHECK-LABEL: mul_v8i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.i16 q1, q1, q2
+; CHECK-NEXT: vcmp.i16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %z, zeroinitializer
+ %a = mul <8 x i16> %x, %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %z
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @mul_v16i8(<16 x i8> %z, <16 x i8> %x, <16 x i8> %y) {
+; CHECK-LABEL: mul_v16i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.i8 q1, q1, q2
+; CHECK-NEXT: vcmp.i8 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %z, zeroinitializer
+ %a = mul <16 x i8> %x, %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %z
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @and_v4i32(<4 x i32> %z, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: and_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vand q1, q1, q2
+; CHECK-NEXT: vcmp.i32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <4 x i32> %z, zeroinitializer
+ %a = and <4 x i32> %x, %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %z
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @and_v8i16(<8 x i16> %z, <8 x i16> %x, <8 x i16> %y) {
+; CHECK-LABEL: and_v8i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vand q1, q1, q2
+; CHECK-NEXT: vcmp.i16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %z, zeroinitializer
+ %a = and <8 x i16> %x, %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %z
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @and_v16i8(<16 x i8> %z, <16 x i8> %x, <16 x i8> %y) {
+; CHECK-LABEL: and_v16i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vand q1, q1, q2
+; CHECK-NEXT: vcmp.i8 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %z, zeroinitializer
+ %a = and <16 x i8> %x, %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %z
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @or_v4i32(<4 x i32> %z, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: or_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vorr q1, q1, q2
+; CHECK-NEXT: vcmp.i32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <4 x i32> %z, zeroinitializer
+ %a = or <4 x i32> %x, %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %z
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @or_v8i16(<8 x i16> %z, <8 x i16> %x, <8 x i16> %y) {
+; CHECK-LABEL: or_v8i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vorr q1, q1, q2
+; CHECK-NEXT: vcmp.i16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %z, zeroinitializer
+ %a = or <8 x i16> %x, %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %z
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @or_v16i8(<16 x i8> %z, <16 x i8> %x, <16 x i8> %y) {
+; CHECK-LABEL: or_v16i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vorr q1, q1, q2
+; CHECK-NEXT: vcmp.i8 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %z, zeroinitializer
+ %a = or <16 x i8> %x, %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %z
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @xor_v4i32(<4 x i32> %z, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: xor_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: veor q1, q1, q2
+; CHECK-NEXT: vcmp.i32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <4 x i32> %z, zeroinitializer
+ %a = xor <4 x i32> %x, %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %z
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @xor_v8i16(<8 x i16> %z, <8 x i16> %x, <8 x i16> %y) {
+; CHECK-LABEL: xor_v8i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: veor q1, q1, q2
+; CHECK-NEXT: vcmp.i16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %z, zeroinitializer
+ %a = xor <8 x i16> %x, %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %z
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @xor_v16i8(<16 x i8> %z, <16 x i8> %x, <16 x i8> %y) {
+; CHECK-LABEL: xor_v16i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: veor q1, q1, q2
+; CHECK-NEXT: vcmp.i8 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %z, zeroinitializer
+ %a = xor <16 x i8> %x, %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %z
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @andnot_v4i32(<4 x i32> %z, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: andnot_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vbic q1, q1, q2
+; CHECK-NEXT: vcmp.i32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <4 x i32> %z, zeroinitializer
+ %y1 = xor <4 x i32> %y, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %a = and <4 x i32> %x, %y1
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %z
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @andnot_v8i16(<8 x i16> %z, <8 x i16> %x, <8 x i16> %y) {
+; CHECK-LABEL: andnot_v8i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vbic q1, q1, q2
+; CHECK-NEXT: vcmp.i16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %z, zeroinitializer
+ %y1 = xor <8 x i16> %y, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ %a = and <8 x i16> %x, %y1
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %z
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @andnot_v16i8(<16 x i8> %z, <16 x i8> %x, <16 x i8> %y) {
+; CHECK-LABEL: andnot_v16i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vbic q1, q1, q2
+; CHECK-NEXT: vcmp.i8 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %z, zeroinitializer
+ %y1 = xor <16 x i8> %y, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %a = and <16 x i8> %x, %y1
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %z
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @ornot_v4i32(<4 x i32> %z, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: ornot_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vorn q1, q1, q2
+; CHECK-NEXT: vcmp.i32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <4 x i32> %z, zeroinitializer
+ %y1 = xor <4 x i32> %y, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %a = or <4 x i32> %x, %y1
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %z
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @ornot_v8i16(<8 x i16> %z, <8 x i16> %x, <8 x i16> %y) {
+; CHECK-LABEL: ornot_v8i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vorn q1, q1, q2
+; CHECK-NEXT: vcmp.i16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %z, zeroinitializer
+ %y1 = xor <8 x i16> %y, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ %a = or <8 x i16> %x, %y1
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %z
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @ornot_v16i8(<16 x i8> %z, <16 x i8> %x, <16 x i8> %y) {
+; CHECK-LABEL: ornot_v16i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vorn q1, q1, q2
+; CHECK-NEXT: vcmp.i8 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %z, zeroinitializer
+ %y1 = xor <16 x i8> %y, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %a = or <16 x i8> %x, %y1
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %z
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fadd_v4f32(<4 x float> %z, <4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: fadd_v4f32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.f32 q1, q1, q2
+; CHECK-NEXT: vcmp.f32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = fcmp oeq <4 x float> %z, zeroinitializer
+ %a = fadd <4 x float> %x, %y
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %z
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fadd_v8f16(<8 x half> %z, <8 x half> %x, <8 x half> %y) {
+; CHECK-LABEL: fadd_v8f16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.f16 q1, q1, q2
+; CHECK-NEXT: vcmp.f16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = fcmp oeq <8 x half> %z, zeroinitializer
+ %a = fadd <8 x half> %x, %y
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %z
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fsub_v4f32(<4 x float> %z, <4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: fsub_v4f32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.f32 q1, q1, q2
+; CHECK-NEXT: vcmp.f32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = fcmp oeq <4 x float> %z, zeroinitializer
+ %a = fsub <4 x float> %x, %y
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %z
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fsub_v8f16(<8 x half> %z, <8 x half> %x, <8 x half> %y) {
+; CHECK-LABEL: fsub_v8f16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.f16 q1, q1, q2
+; CHECK-NEXT: vcmp.f16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = fcmp oeq <8 x half> %z, zeroinitializer
+ %a = fsub <8 x half> %x, %y
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %z
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fmul_v4f32(<4 x float> %z, <4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: fmul_v4f32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.f32 q1, q1, q2
+; CHECK-NEXT: vcmp.f32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = fcmp oeq <4 x float> %z, zeroinitializer
+ %a = fmul <4 x float> %x, %y
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %z
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fmul_v8f16(<8 x half> %z, <8 x half> %x, <8 x half> %y) {
+; CHECK-LABEL: fmul_v8f16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.f16 q1, q1, q2
+; CHECK-NEXT: vcmp.f16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = fcmp oeq <8 x half> %z, zeroinitializer
+ %a = fmul <8 x half> %x, %y
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %z
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @icmp_slt_v4i32(<4 x i32> %z, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: icmp_slt_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmin.s32 q1, q1, q2
+; CHECK-NEXT: vcmp.i32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <4 x i32> %z, zeroinitializer
+ %a1 = icmp slt <4 x i32> %x, %y
+ %a = select <4 x i1> %a1, <4 x i32> %x, <4 x i32> %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %z
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @icmp_slt_v8i16(<8 x i16> %z, <8 x i16> %x, <8 x i16> %y) {
+; CHECK-LABEL: icmp_slt_v8i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmin.s16 q1, q1, q2
+; CHECK-NEXT: vcmp.i16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %z, zeroinitializer
+ %a1 = icmp slt <8 x i16> %x, %y
+ %a = select <8 x i1> %a1, <8 x i16> %x, <8 x i16> %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %z
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @icmp_slt_v16i8(<16 x i8> %z, <16 x i8> %x, <16 x i8> %y) {
+; CHECK-LABEL: icmp_slt_v16i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmin.s8 q1, q1, q2
+; CHECK-NEXT: vcmp.i8 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %z, zeroinitializer
+ %a1 = icmp slt <16 x i8> %x, %y
+ %a = select <16 x i1> %a1, <16 x i8> %x, <16 x i8> %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %z
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @icmp_sgt_v4i32(<4 x i32> %z, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: icmp_sgt_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmax.s32 q1, q1, q2
+; CHECK-NEXT: vcmp.i32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <4 x i32> %z, zeroinitializer
+ %a1 = icmp sgt <4 x i32> %x, %y
+ %a = select <4 x i1> %a1, <4 x i32> %x, <4 x i32> %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %z
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @icmp_sgt_v8i16(<8 x i16> %z, <8 x i16> %x, <8 x i16> %y) {
+; CHECK-LABEL: icmp_sgt_v8i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmax.s16 q1, q1, q2
+; CHECK-NEXT: vcmp.i16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %z, zeroinitializer
+ %a1 = icmp sgt <8 x i16> %x, %y
+ %a = select <8 x i1> %a1, <8 x i16> %x, <8 x i16> %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %z
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @icmp_sgt_v16i8(<16 x i8> %z, <16 x i8> %x, <16 x i8> %y) {
+; CHECK-LABEL: icmp_sgt_v16i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmax.s8 q1, q1, q2
+; CHECK-NEXT: vcmp.i8 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %z, zeroinitializer
+ %a1 = icmp sgt <16 x i8> %x, %y
+ %a = select <16 x i1> %a1, <16 x i8> %x, <16 x i8> %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %z
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @icmp_ult_v4i32(<4 x i32> %z, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: icmp_ult_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmin.u32 q1, q1, q2
+; CHECK-NEXT: vcmp.i32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <4 x i32> %z, zeroinitializer
+ %a1 = icmp ult <4 x i32> %x, %y
+ %a = select <4 x i1> %a1, <4 x i32> %x, <4 x i32> %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %z
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @icmp_ult_v8i16(<8 x i16> %z, <8 x i16> %x, <8 x i16> %y) {
+; CHECK-LABEL: icmp_ult_v8i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmin.u16 q1, q1, q2
+; CHECK-NEXT: vcmp.i16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %z, zeroinitializer
+ %a1 = icmp ult <8 x i16> %x, %y
+ %a = select <8 x i1> %a1, <8 x i16> %x, <8 x i16> %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %z
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @icmp_ult_v16i8(<16 x i8> %z, <16 x i8> %x, <16 x i8> %y) {
+; CHECK-LABEL: icmp_ult_v16i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmin.u8 q1, q1, q2
+; CHECK-NEXT: vcmp.i8 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %z, zeroinitializer
+ %a1 = icmp ult <16 x i8> %x, %y
+ %a = select <16 x i1> %a1, <16 x i8> %x, <16 x i8> %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %z
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @icmp_ugt_v4i32(<4 x i32> %z, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: icmp_ugt_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmax.u32 q1, q1, q2
+; CHECK-NEXT: vcmp.i32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <4 x i32> %z, zeroinitializer
+ %a1 = icmp ugt <4 x i32> %x, %y
+ %a = select <4 x i1> %a1, <4 x i32> %x, <4 x i32> %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %z
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @icmp_ugt_v8i16(<8 x i16> %z, <8 x i16> %x, <8 x i16> %y) {
+; CHECK-LABEL: icmp_ugt_v8i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmax.u16 q1, q1, q2
+; CHECK-NEXT: vcmp.i16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %z, zeroinitializer
+ %a1 = icmp ugt <8 x i16> %x, %y
+ %a = select <8 x i1> %a1, <8 x i16> %x, <8 x i16> %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %z
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @icmp_ugt_v16i8(<16 x i8> %z, <16 x i8> %x, <16 x i8> %y) {
+; CHECK-LABEL: icmp_ugt_v16i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmax.u8 q1, q1, q2
+; CHECK-NEXT: vcmp.i8 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %z, zeroinitializer
+ %a1 = icmp ugt <16 x i8> %x, %y
+ %a = select <16 x i1> %a1, <16 x i8> %x, <16 x i8> %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %z
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fcmp_fast_olt_v4f32(<4 x float> %z, <4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: fcmp_fast_olt_v4f32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vminnm.f32 q1, q1, q2
+; CHECK-NEXT: vcmp.f32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = fcmp oeq <4 x float> %z, zeroinitializer
+ %a1 = fcmp fast olt <4 x float> %x, %y
+ %a = select <4 x i1> %a1, <4 x float> %x, <4 x float> %y
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %z
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fcmp_fast_olt_v8f16(<8 x half> %z, <8 x half> %x, <8 x half> %y) {
+; CHECK-LABEL: fcmp_fast_olt_v8f16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vminnm.f16 q1, q1, q2
+; CHECK-NEXT: vcmp.f16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = fcmp oeq <8 x half> %z, zeroinitializer
+ %a1 = fcmp fast olt <8 x half> %x, %y
+ %a = select <8 x i1> %a1, <8 x half> %x, <8 x half> %y
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %z
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fcmp_fast_ogt_v4f32(<4 x float> %z, <4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: fcmp_fast_ogt_v4f32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmaxnm.f32 q1, q1, q2
+; CHECK-NEXT: vcmp.f32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = fcmp oeq <4 x float> %z, zeroinitializer
+ %a1 = fcmp fast ogt <4 x float> %x, %y
+ %a = select <4 x i1> %a1, <4 x float> %x, <4 x float> %y
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %z
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fcmp_fast_ogt_v8f16(<8 x half> %z, <8 x half> %x, <8 x half> %y) {
+; CHECK-LABEL: fcmp_fast_ogt_v8f16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmaxnm.f16 q1, q1, q2
+; CHECK-NEXT: vcmp.f16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = fcmp oeq <8 x half> %z, zeroinitializer
+ %a1 = fcmp fast ogt <8 x half> %x, %y
+ %a = select <8 x i1> %a1, <8 x half> %x, <8 x half> %y
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %z
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sadd_sat_v4i32(<4 x i32> %z, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: sadd_sat_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s32 q1, q1, q2
+; CHECK-NEXT: vcmp.i32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <4 x i32> %z, zeroinitializer
+ %a = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %z
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @sadd_sat_v8i16(<8 x i16> %z, <8 x i16> %x, <8 x i16> %y) {
+; CHECK-LABEL: sadd_sat_v8i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s16 q1, q1, q2
+; CHECK-NEXT: vcmp.i16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %z, zeroinitializer
+ %a = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %z
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @sadd_sat_v16i8(<16 x i8> %z, <16 x i8> %x, <16 x i8> %y) {
+; CHECK-LABEL: sadd_sat_v16i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s8 q1, q1, q2
+; CHECK-NEXT: vcmp.i8 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %z, zeroinitializer
+ %a = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %z
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @uadd_sat_v4i32(<4 x i32> %z, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: uadd_sat_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u32 q1, q1, q2
+; CHECK-NEXT: vcmp.i32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <4 x i32> %z, zeroinitializer
+ %a = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %z
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @uadd_sat_v8i16(<8 x i16> %z, <8 x i16> %x, <8 x i16> %y) {
+; CHECK-LABEL: uadd_sat_v8i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u16 q1, q1, q2
+; CHECK-NEXT: vcmp.i16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %z, zeroinitializer
+ %a = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %z
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @uadd_sat_v16i8(<16 x i8> %z, <16 x i8> %x, <16 x i8> %y) {
+; CHECK-LABEL: uadd_sat_v16i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u8 q1, q1, q2
+; CHECK-NEXT: vcmp.i8 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %z, zeroinitializer
+ %a = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %z
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @ssub_sat_v4i32(<4 x i32> %z, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: ssub_sat_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s32 q1, q1, q2
+; CHECK-NEXT: vcmp.i32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <4 x i32> %z, zeroinitializer
+ %a = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %z
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @ssub_sat_v8i16(<8 x i16> %z, <8 x i16> %x, <8 x i16> %y) {
+; CHECK-LABEL: ssub_sat_v8i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s16 q1, q1, q2
+; CHECK-NEXT: vcmp.i16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %z, zeroinitializer
+ %a = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %z
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @ssub_sat_v16i8(<16 x i8> %z, <16 x i8> %x, <16 x i8> %y) {
+; CHECK-LABEL: ssub_sat_v16i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s8 q1, q1, q2
+; CHECK-NEXT: vcmp.i8 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %z, zeroinitializer
+ %a = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %z
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @usub_sat_v4i32(<4 x i32> %z, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: usub_sat_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u32 q1, q1, q2
+; CHECK-NEXT: vcmp.i32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <4 x i32> %z, zeroinitializer
+ %a = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %z
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @usub_sat_v8i16(<8 x i16> %z, <8 x i16> %x, <8 x i16> %y) {
+; CHECK-LABEL: usub_sat_v8i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u16 q1, q1, q2
+; CHECK-NEXT: vcmp.i16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %z, zeroinitializer
+ %a = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %z
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @usub_sat_v16i8(<16 x i8> %z, <16 x i8> %x, <16 x i8> %y) {
+; CHECK-LABEL: usub_sat_v16i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u8 q1, q1, q2
+; CHECK-NEXT: vcmp.i8 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %z, zeroinitializer
+ %a = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %z
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @addqr_v4i32(<4 x i32> %z, <4 x i32> %x, i32 %y) {
+; CHECK-LABEL: addqr_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.i32 q1, q1, r0
+; CHECK-NEXT: vcmp.i32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <4 x i32> %z, zeroinitializer
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = add <4 x i32> %x, %ys
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %z
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @addqr_v8i16(<8 x i16> %z, <8 x i16> %x, i16 %y) {
+; CHECK-LABEL: addqr_v8i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.i16 q1, q1, r0
+; CHECK-NEXT: vcmp.i16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %z, zeroinitializer
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = add <8 x i16> %x, %ys
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %z
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @addqr_v16i8(<16 x i8> %z, <16 x i8> %x, i8 %y) {
+; CHECK-LABEL: addqr_v16i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.i8 q1, q1, r0
+; CHECK-NEXT: vcmp.i8 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %z, zeroinitializer
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = add <16 x i8> %x, %ys
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %z
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @subqr_v4i32(<4 x i32> %z, <4 x i32> %x, i32 %y) {
+; CHECK-LABEL: subqr_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.i32 q1, q1, r0
+; CHECK-NEXT: vcmp.i32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <4 x i32> %z, zeroinitializer
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = sub <4 x i32> %x, %ys
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %z
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @subqr_v8i16(<8 x i16> %z, <8 x i16> %x, i16 %y) {
+; CHECK-LABEL: subqr_v8i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.i16 q1, q1, r0
+; CHECK-NEXT: vcmp.i16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %z, zeroinitializer
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = sub <8 x i16> %x, %ys
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %z
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @subqr_v16i8(<16 x i8> %z, <16 x i8> %x, i8 %y) {
+; CHECK-LABEL: subqr_v16i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.i8 q1, q1, r0
+; CHECK-NEXT: vcmp.i8 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %z, zeroinitializer
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = sub <16 x i8> %x, %ys
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %z
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mulqr_v4i32(<4 x i32> %z, <4 x i32> %x, i32 %y) {
+; CHECK-LABEL: mulqr_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.i32 q1, q1, r0
+; CHECK-NEXT: vcmp.i32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <4 x i32> %z, zeroinitializer
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = mul <4 x i32> %x, %ys
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %z
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @mulqr_v8i16(<8 x i16> %z, <8 x i16> %x, i16 %y) {
+; CHECK-LABEL: mulqr_v8i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.i16 q1, q1, r0
+; CHECK-NEXT: vcmp.i16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %z, zeroinitializer
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = mul <8 x i16> %x, %ys
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %z
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @mulqr_v16i8(<16 x i8> %z, <16 x i8> %x, i8 %y) {
+; CHECK-LABEL: mulqr_v16i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.i8 q1, q1, r0
+; CHECK-NEXT: vcmp.i8 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %z, zeroinitializer
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = mul <16 x i8> %x, %ys
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %z
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @faddqr_v4f32(<4 x float> %z, <4 x float> %x, float %y) {
+; CHECK-LABEL: faddqr_v4f32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov r0, s8
+; CHECK-NEXT: vcmp.f32 eq, q0, zr
+; CHECK-NEXT: vadd.f32 q1, q1, r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = fcmp oeq <4 x float> %z, zeroinitializer
+ %i = insertelement <4 x float> undef, float %y, i32 0
+ %ys = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
+ %a = fadd <4 x float> %x, %ys
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %z
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @faddqr_v8f16(<8 x half> %z, <8 x half> %x, half %y) {
+; CHECK-LABEL: faddqr_v8f16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 r0, s8
+; CHECK-NEXT: vcmp.f16 eq, q0, zr
+; CHECK-NEXT: vadd.f16 q1, q1, r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = fcmp oeq <8 x half> %z, zeroinitializer
+ %i = insertelement <8 x half> undef, half %y, i32 0
+ %ys = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
+ %a = fadd <8 x half> %x, %ys
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %z
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fsubqr_v4f32(<4 x float> %z, <4 x float> %x, float %y) {
+; CHECK-LABEL: fsubqr_v4f32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov r0, s8
+; CHECK-NEXT: vcmp.f32 eq, q0, zr
+; CHECK-NEXT: vsub.f32 q1, q1, r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = fcmp oeq <4 x float> %z, zeroinitializer
+ %i = insertelement <4 x float> undef, float %y, i32 0
+ %ys = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
+ %a = fsub <4 x float> %x, %ys
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %z
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fsubqr_v8f16(<8 x half> %z, <8 x half> %x, half %y) {
+; CHECK-LABEL: fsubqr_v8f16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 r0, s8
+; CHECK-NEXT: vcmp.f16 eq, q0, zr
+; CHECK-NEXT: vsub.f16 q1, q1, r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = fcmp oeq <8 x half> %z, zeroinitializer
+ %i = insertelement <8 x half> undef, half %y, i32 0
+ %ys = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
+ %a = fsub <8 x half> %x, %ys
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %z
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fmulqr_v4f32(<4 x float> %z, <4 x float> %x, float %y) {
+; CHECK-LABEL: fmulqr_v4f32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov r0, s8
+; CHECK-NEXT: vcmp.f32 eq, q0, zr
+; CHECK-NEXT: vmul.f32 q1, q1, r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = fcmp oeq <4 x float> %z, zeroinitializer
+ %i = insertelement <4 x float> undef, float %y, i32 0
+ %ys = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
+ %a = fmul <4 x float> %x, %ys
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %z
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fmulqr_v8f16(<8 x half> %z, <8 x half> %x, half %y) {
+; CHECK-LABEL: fmulqr_v8f16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 r0, s8
+; CHECK-NEXT: vcmp.f16 eq, q0, zr
+; CHECK-NEXT: vmul.f16 q1, q1, r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = fcmp oeq <8 x half> %z, zeroinitializer
+ %i = insertelement <8 x half> undef, half %y, i32 0
+ %ys = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
+ %a = fmul <8 x half> %x, %ys
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %z
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sadd_satqr_v4i32(<4 x i32> %z, <4 x i32> %x, i32 %y) {
+; CHECK-LABEL: sadd_satqr_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s32 q1, q1, r0
+; CHECK-NEXT: vcmp.i32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <4 x i32> %z, zeroinitializer
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %x, <4 x i32> %ys)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %z
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @sadd_satqr_v8i16(<8 x i16> %z, <8 x i16> %x, i16 %y) {
+; CHECK-LABEL: sadd_satqr_v8i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s16 q1, q1, r0
+; CHECK-NEXT: vcmp.i16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %z, zeroinitializer
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %x, <8 x i16> %ys)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %z
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @sadd_satqr_v16i8(<16 x i8> %z, <16 x i8> %x, i8 %y) {
+; CHECK-LABEL: sadd_satqr_v16i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s8 q1, q1, r0
+; CHECK-NEXT: vcmp.i8 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %z, zeroinitializer
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %x, <16 x i8> %ys)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %z
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @uadd_satqr_v4i32(<4 x i32> %z, <4 x i32> %x, i32 %y) {
+; CHECK-LABEL: uadd_satqr_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u32 q1, q1, r0
+; CHECK-NEXT: vcmp.i32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <4 x i32> %z, zeroinitializer
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %x, <4 x i32> %ys)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %z
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @uadd_satqr_v8i16(<8 x i16> %z, <8 x i16> %x, i16 %y) {
+; CHECK-LABEL: uadd_satqr_v8i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u16 q1, q1, r0
+; CHECK-NEXT: vcmp.i16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %z, zeroinitializer
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %x, <8 x i16> %ys)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %z
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @uadd_satqr_v16i8(<16 x i8> %z, <16 x i8> %x, i8 %y) {
+; CHECK-LABEL: uadd_satqr_v16i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u8 q1, q1, r0
+; CHECK-NEXT: vcmp.i8 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %z, zeroinitializer
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %x, <16 x i8> %ys)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %z
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @ssub_satqr_v4i32(<4 x i32> %z, <4 x i32> %x, i32 %y) {
+; CHECK-LABEL: ssub_satqr_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s32 q1, q1, r0
+; CHECK-NEXT: vcmp.i32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <4 x i32> %z, zeroinitializer
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %x, <4 x i32> %ys)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %z
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @ssub_satqr_v8i16(<8 x i16> %z, <8 x i16> %x, i16 %y) {
+; CHECK-LABEL: ssub_satqr_v8i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s16 q1, q1, r0
+; CHECK-NEXT: vcmp.i16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %z, zeroinitializer
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %x, <8 x i16> %ys)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %z
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @ssub_satqr_v16i8(<16 x i8> %z, <16 x i8> %x, i8 %y) {
+; CHECK-LABEL: ssub_satqr_v16i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s8 q1, q1, r0
+; CHECK-NEXT: vcmp.i8 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %z, zeroinitializer
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %x, <16 x i8> %ys)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %z
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @usub_satqr_v4i32(<4 x i32> %z, <4 x i32> %x, i32 %y) {
+; CHECK-LABEL: usub_satqr_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u32 q1, q1, r0
+; CHECK-NEXT: vcmp.i32 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <4 x i32> %z, zeroinitializer
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %x, <4 x i32> %ys)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %z
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @usub_satqr_v8i16(<8 x i16> %z, <8 x i16> %x, i16 %y) {
+; CHECK-LABEL: usub_satqr_v8i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u16 q1, q1, r0
+; CHECK-NEXT: vcmp.i16 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %z, zeroinitializer
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %x, <8 x i16> %ys)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %z
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @usub_satqr_v16i8(<16 x i8> %z, <16 x i8> %x, i8 %y) {
+; CHECK-LABEL: usub_satqr_v16i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u8 q1, q1, r0
+; CHECK-NEXT: vcmp.i8 eq, q0, zr
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %z, zeroinitializer
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %x, <16 x i8> %ys)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %z
+ ret <16 x i8> %b
+}
+
+declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %src1, <16 x i8> %src2)
+declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %src1, <8 x i16> %src2)
+declare <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %src1, <4 x i32> %src2)
+declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %src1, <16 x i8> %src2)
+declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %src1, <8 x i16> %src2)
+declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %src1, <4 x i32> %src2)
+declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %src1, <16 x i8> %src2)
+declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %src1, <8 x i16> %src2)
+declare <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %src1, <4 x i32> %src2)
+declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %src1, <16 x i8> %src2)
+declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %src1, <8 x i16> %src2)
+declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %src1, <4 x i32> %src2)
diff --git a/llvm/test/CodeGen/Thumb2/mve-pred-selectop2.ll b/llvm/test/CodeGen/Thumb2/mve-pred-selectop2.ll
new file mode 100644
index 000000000000..58377ee61987
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-pred-selectop2.ll
@@ -0,0 +1,2590 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
+
+define arm_aapcs_vfpcc <4 x i32> @add_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: add_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.i32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = add <4 x i32> %x, %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @add_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: add_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.i16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = add <8 x i16> %x, %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @add_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: add_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.i8 q1, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = add <16 x i8> %x, %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sub_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: sub_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.i32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = sub <4 x i32> %x, %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @sub_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: sub_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.i16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = sub <8 x i16> %x, %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @sub_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: sub_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.i8 q1, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = sub <16 x i8> %x, %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mul_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: mul_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.i32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = mul <4 x i32> %x, %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @mul_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: mul_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.i16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = mul <8 x i16> %x, %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @mul_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: mul_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.i8 q1, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = mul <16 x i8> %x, %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @and_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: and_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vand q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = and <4 x i32> %x, %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @and_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: and_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vand q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = and <8 x i16> %x, %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @and_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: and_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vand q1, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = and <16 x i8> %x, %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @or_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: or_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vorr q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = or <4 x i32> %x, %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @or_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: or_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vorr q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = or <8 x i16> %x, %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @or_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: or_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vorr q1, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = or <16 x i8> %x, %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @xor_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: xor_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: veor q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = xor <4 x i32> %x, %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @xor_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: xor_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: veor q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = xor <8 x i16> %x, %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @xor_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: xor_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: veor q1, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = xor <16 x i8> %x, %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @andnot_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: andnot_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vbic q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %y1 = xor <4 x i32> %y, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %a = and <4 x i32> %x, %y1
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @andnot_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: andnot_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vbic q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %y1 = xor <8 x i16> %y, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ %a = and <8 x i16> %x, %y1
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @andnot_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: andnot_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vbic q1, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %y1 = xor <16 x i8> %y, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %a = and <16 x i8> %x, %y1
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @ornot_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: ornot_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vorn q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %y1 = xor <4 x i32> %y, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %a = or <4 x i32> %x, %y1
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @ornot_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: ornot_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vorn q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %y1 = xor <8 x i16> %y, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ %a = or <8 x i16> %x, %y1
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @ornot_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: ornot_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vorn q1, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %y1 = xor <16 x i8> %y, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %a = or <16 x i8> %x, %y1
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fadd_v4f32_x(<4 x float> %x, <4 x float> %y, i32 %n) {
+; CHECK-LABEL: fadd_v4f32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.f32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = fadd <4 x float> %x, %y
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %x
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fadd_v8f16_x(<8 x half> %x, <8 x half> %y, i32 %n) {
+; CHECK-LABEL: fadd_v8f16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.f16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = fadd <8 x half> %x, %y
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %x
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fsub_v4f32_x(<4 x float> %x, <4 x float> %y, i32 %n) {
+; CHECK-LABEL: fsub_v4f32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.f32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = fsub <4 x float> %x, %y
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %x
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fsub_v8f16_x(<8 x half> %x, <8 x half> %y, i32 %n) {
+; CHECK-LABEL: fsub_v8f16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.f16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = fsub <8 x half> %x, %y
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %x
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fmul_v4f32_x(<4 x float> %x, <4 x float> %y, i32 %n) {
+; CHECK-LABEL: fmul_v4f32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.f32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = fmul <4 x float> %x, %y
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %x
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fmul_v8f16_x(<8 x half> %x, <8 x half> %y, i32 %n) {
+; CHECK-LABEL: fmul_v8f16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.f16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = fmul <8 x half> %x, %y
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %x
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @icmp_slt_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: icmp_slt_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmin.s32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a1 = icmp slt <4 x i32> %x, %y
+ %a = select <4 x i1> %a1, <4 x i32> %x, <4 x i32> %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @icmp_slt_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: icmp_slt_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmin.s16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a1 = icmp slt <8 x i16> %x, %y
+ %a = select <8 x i1> %a1, <8 x i16> %x, <8 x i16> %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @icmp_slt_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: icmp_slt_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmin.s8 q1, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a1 = icmp slt <16 x i8> %x, %y
+ %a = select <16 x i1> %a1, <16 x i8> %x, <16 x i8> %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @icmp_sgt_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: icmp_sgt_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmax.s32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a1 = icmp sgt <4 x i32> %x, %y
+ %a = select <4 x i1> %a1, <4 x i32> %x, <4 x i32> %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @icmp_sgt_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: icmp_sgt_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmax.s16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a1 = icmp sgt <8 x i16> %x, %y
+ %a = select <8 x i1> %a1, <8 x i16> %x, <8 x i16> %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @icmp_sgt_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: icmp_sgt_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmax.s8 q1, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a1 = icmp sgt <16 x i8> %x, %y
+ %a = select <16 x i1> %a1, <16 x i8> %x, <16 x i8> %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @icmp_ult_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: icmp_ult_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmin.u32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a1 = icmp ult <4 x i32> %x, %y
+ %a = select <4 x i1> %a1, <4 x i32> %x, <4 x i32> %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @icmp_ult_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: icmp_ult_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmin.u16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a1 = icmp ult <8 x i16> %x, %y
+ %a = select <8 x i1> %a1, <8 x i16> %x, <8 x i16> %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @icmp_ult_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: icmp_ult_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmin.u8 q1, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a1 = icmp ult <16 x i8> %x, %y
+ %a = select <16 x i1> %a1, <16 x i8> %x, <16 x i8> %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @icmp_ugt_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: icmp_ugt_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmax.u32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a1 = icmp ugt <4 x i32> %x, %y
+ %a = select <4 x i1> %a1, <4 x i32> %x, <4 x i32> %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @icmp_ugt_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: icmp_ugt_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmax.u16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a1 = icmp ugt <8 x i16> %x, %y
+ %a = select <8 x i1> %a1, <8 x i16> %x, <8 x i16> %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @icmp_ugt_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: icmp_ugt_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmax.u8 q1, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a1 = icmp ugt <16 x i8> %x, %y
+ %a = select <16 x i1> %a1, <16 x i8> %x, <16 x i8> %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fcmp_fast_olt_v4f32_x(<4 x float> %x, <4 x float> %y, i32 %n) {
+; CHECK-LABEL: fcmp_fast_olt_v4f32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vminnm.f32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a1 = fcmp fast olt <4 x float> %x, %y
+ %a = select <4 x i1> %a1, <4 x float> %x, <4 x float> %y
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %x
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fcmp_fast_olt_v8f16_x(<8 x half> %x, <8 x half> %y, i32 %n) {
+; CHECK-LABEL: fcmp_fast_olt_v8f16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vminnm.f16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a1 = fcmp fast olt <8 x half> %x, %y
+ %a = select <8 x i1> %a1, <8 x half> %x, <8 x half> %y
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %x
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fcmp_fast_ogt_v4f32_x(<4 x float> %x, <4 x float> %y, i32 %n) {
+; CHECK-LABEL: fcmp_fast_ogt_v4f32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmaxnm.f32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a1 = fcmp fast ogt <4 x float> %x, %y
+ %a = select <4 x i1> %a1, <4 x float> %x, <4 x float> %y
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %x
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fcmp_fast_ogt_v8f16_x(<8 x half> %x, <8 x half> %y, i32 %n) {
+; CHECK-LABEL: fcmp_fast_ogt_v8f16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmaxnm.f16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a1 = fcmp fast ogt <8 x half> %x, %y
+ %a = select <8 x i1> %a1, <8 x half> %x, <8 x half> %y
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %x
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sadd_sat_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: sadd_sat_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @sadd_sat_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: sadd_sat_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @sadd_sat_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: sadd_sat_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s8 q1, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @uadd_sat_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: uadd_sat_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @uadd_sat_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: uadd_sat_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @uadd_sat_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: uadd_sat_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u8 q1, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @ssub_sat_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: ssub_sat_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @ssub_sat_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: ssub_sat_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @ssub_sat_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: ssub_sat_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s8 q1, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @usub_sat_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: usub_sat_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @usub_sat_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: usub_sat_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @usub_sat_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: usub_sat_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u8 q1, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @addqr_v4i32_x(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: addqr_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.i32 q1, q0, r0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = add <4 x i32> %x, %ys
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @addqr_v8i16_x(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: addqr_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.i16 q1, q0, r0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = add <8 x i16> %x, %ys
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @addqr_v16i8_x(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: addqr_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.i8 q1, q0, r0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = add <16 x i8> %x, %ys
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @subqr_v4i32_x(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: subqr_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.i32 q1, q0, r0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = sub <4 x i32> %x, %ys
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @subqr_v8i16_x(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: subqr_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.i16 q1, q0, r0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = sub <8 x i16> %x, %ys
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @subqr_v16i8_x(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: subqr_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.i8 q1, q0, r0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = sub <16 x i8> %x, %ys
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mulqr_v4i32_x(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: mulqr_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.i32 q1, q0, r0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = mul <4 x i32> %x, %ys
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @mulqr_v8i16_x(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: mulqr_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.i16 q1, q0, r0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = mul <8 x i16> %x, %ys
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @mulqr_v16i8_x(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: mulqr_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.i8 q1, q0, r0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = mul <16 x i8> %x, %ys
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @faddqr_v4f32_x(<4 x float> %x, float %y, i32 %n) {
+; CHECK-LABEL: faddqr_v4f32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov r1, s4
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vadd.f32 q1, q0, r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x float> undef, float %y, i32 0
+ %ys = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
+ %a = fadd <4 x float> %x, %ys
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %x
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @faddqr_v8f16_x(<8 x half> %x, half %y, i32 %n) {
+; CHECK-LABEL: faddqr_v8f16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 r1, s4
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vadd.f16 q1, q0, r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x half> undef, half %y, i32 0
+ %ys = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
+ %a = fadd <8 x half> %x, %ys
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %x
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fsubqr_v4f32_x(<4 x float> %x, float %y, i32 %n) {
+; CHECK-LABEL: fsubqr_v4f32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov r1, s4
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vsub.f32 q1, q0, r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x float> undef, float %y, i32 0
+ %ys = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
+ %a = fsub <4 x float> %x, %ys
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %x
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fsubqr_v8f16_x(<8 x half> %x, half %y, i32 %n) {
+; CHECK-LABEL: fsubqr_v8f16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 r1, s4
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vsub.f16 q1, q0, r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x half> undef, half %y, i32 0
+ %ys = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
+ %a = fsub <8 x half> %x, %ys
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %x
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fmulqr_v4f32_x(<4 x float> %x, float %y, i32 %n) {
+; CHECK-LABEL: fmulqr_v4f32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov r1, s4
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vmul.f32 q1, q0, r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x float> undef, float %y, i32 0
+ %ys = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
+ %a = fmul <4 x float> %x, %ys
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %x
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fmulqr_v8f16_x(<8 x half> %x, half %y, i32 %n) {
+; CHECK-LABEL: fmulqr_v8f16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 r1, s4
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vmul.f16 q1, q0, r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x half> undef, half %y, i32 0
+ %ys = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
+ %a = fmul <8 x half> %x, %ys
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %x
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sadd_satqr_v4i32_x(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: sadd_satqr_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s32 q1, q0, r0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %x, <4 x i32> %ys)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @sadd_satqr_v8i16_x(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: sadd_satqr_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s16 q1, q0, r0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %x, <8 x i16> %ys)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @sadd_satqr_v16i8_x(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: sadd_satqr_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s8 q1, q0, r0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %x, <16 x i8> %ys)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @uadd_satqr_v4i32_x(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: uadd_satqr_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u32 q1, q0, r0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %x, <4 x i32> %ys)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @uadd_satqr_v8i16_x(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: uadd_satqr_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u16 q1, q0, r0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %x, <8 x i16> %ys)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @uadd_satqr_v16i8_x(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: uadd_satqr_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u8 q1, q0, r0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %x, <16 x i8> %ys)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @ssub_satqr_v4i32_x(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: ssub_satqr_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s32 q1, q0, r0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %x, <4 x i32> %ys)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @ssub_satqr_v8i16_x(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: ssub_satqr_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s16 q1, q0, r0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %x, <8 x i16> %ys)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @ssub_satqr_v16i8_x(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: ssub_satqr_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s8 q1, q0, r0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %x, <16 x i8> %ys)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @usub_satqr_v4i32_x(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: usub_satqr_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u32 q1, q0, r0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %x, <4 x i32> %ys)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @usub_satqr_v8i16_x(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: usub_satqr_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u16 q1, q0, r0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %x, <8 x i16> %ys)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @usub_satqr_v16i8_x(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: usub_satqr_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u8 q1, q0, r0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %x, <16 x i8> %ys)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @add_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: add_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.i32 q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = add <4 x i32> %x, %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @add_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: add_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.i16 q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = add <8 x i16> %x, %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @add_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: add_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.i8 q0, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = add <16 x i8> %x, %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sub_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: sub_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.i32 q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = sub <4 x i32> %x, %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @sub_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: sub_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.i16 q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = sub <8 x i16> %x, %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @sub_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: sub_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.i8 q0, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = sub <16 x i8> %x, %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mul_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: mul_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.i32 q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = mul <4 x i32> %x, %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @mul_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: mul_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.i16 q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = mul <8 x i16> %x, %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @mul_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: mul_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.i8 q0, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = mul <16 x i8> %x, %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @and_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: and_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vand q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = and <4 x i32> %x, %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @and_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: and_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vand q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = and <8 x i16> %x, %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @and_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: and_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vand q0, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = and <16 x i8> %x, %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @or_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: or_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vorr q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = or <4 x i32> %x, %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @or_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: or_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vorr q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = or <8 x i16> %x, %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @or_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: or_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vorr q0, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = or <16 x i8> %x, %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @xor_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: xor_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: veor q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = xor <4 x i32> %x, %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @xor_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: xor_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: veor q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = xor <8 x i16> %x, %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @xor_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: xor_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: veor q0, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = xor <16 x i8> %x, %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @andnot_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: andnot_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vbic q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %y1 = xor <4 x i32> %y, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %a = and <4 x i32> %x, %y1
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @andnot_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: andnot_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vbic q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %y1 = xor <8 x i16> %y, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ %a = and <8 x i16> %x, %y1
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @andnot_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: andnot_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vbic q0, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %y1 = xor <16 x i8> %y, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %a = and <16 x i8> %x, %y1
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @ornot_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: ornot_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vorn q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %y1 = xor <4 x i32> %y, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %a = or <4 x i32> %x, %y1
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @ornot_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: ornot_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vorn q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %y1 = xor <8 x i16> %y, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ %a = or <8 x i16> %x, %y1
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @ornot_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: ornot_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vorn q0, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %y1 = xor <16 x i8> %y, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %a = or <16 x i8> %x, %y1
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fadd_v4f32_y(<4 x float> %x, <4 x float> %y, i32 %n) {
+; CHECK-LABEL: fadd_v4f32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.f32 q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = fadd <4 x float> %x, %y
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %y
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fadd_v8f16_y(<8 x half> %x, <8 x half> %y, i32 %n) {
+; CHECK-LABEL: fadd_v8f16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.f16 q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = fadd <8 x half> %x, %y
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %y
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fsub_v4f32_y(<4 x float> %x, <4 x float> %y, i32 %n) {
+; CHECK-LABEL: fsub_v4f32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.f32 q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = fsub <4 x float> %x, %y
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %y
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fsub_v8f16_y(<8 x half> %x, <8 x half> %y, i32 %n) {
+; CHECK-LABEL: fsub_v8f16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.f16 q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = fsub <8 x half> %x, %y
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %y
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fmul_v4f32_y(<4 x float> %x, <4 x float> %y, i32 %n) {
+; CHECK-LABEL: fmul_v4f32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.f32 q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = fmul <4 x float> %x, %y
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %y
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fmul_v8f16_y(<8 x half> %x, <8 x half> %y, i32 %n) {
+; CHECK-LABEL: fmul_v8f16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.f16 q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = fmul <8 x half> %x, %y
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %y
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @icmp_slt_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: icmp_slt_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmin.s32 q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a1 = icmp slt <4 x i32> %x, %y
+ %a = select <4 x i1> %a1, <4 x i32> %x, <4 x i32> %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @icmp_slt_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: icmp_slt_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmin.s16 q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a1 = icmp slt <8 x i16> %x, %y
+ %a = select <8 x i1> %a1, <8 x i16> %x, <8 x i16> %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @icmp_slt_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: icmp_slt_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmin.s8 q0, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a1 = icmp slt <16 x i8> %x, %y
+ %a = select <16 x i1> %a1, <16 x i8> %x, <16 x i8> %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @icmp_sgt_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: icmp_sgt_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmax.s32 q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a1 = icmp sgt <4 x i32> %x, %y
+ %a = select <4 x i1> %a1, <4 x i32> %x, <4 x i32> %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @icmp_sgt_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: icmp_sgt_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmax.s16 q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a1 = icmp sgt <8 x i16> %x, %y
+ %a = select <8 x i1> %a1, <8 x i16> %x, <8 x i16> %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @icmp_sgt_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: icmp_sgt_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmax.s8 q0, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a1 = icmp sgt <16 x i8> %x, %y
+ %a = select <16 x i1> %a1, <16 x i8> %x, <16 x i8> %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @icmp_ult_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: icmp_ult_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmin.u32 q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a1 = icmp ult <4 x i32> %x, %y
+ %a = select <4 x i1> %a1, <4 x i32> %x, <4 x i32> %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @icmp_ult_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: icmp_ult_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmin.u16 q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a1 = icmp ult <8 x i16> %x, %y
+ %a = select <8 x i1> %a1, <8 x i16> %x, <8 x i16> %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @icmp_ult_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: icmp_ult_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmin.u8 q0, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a1 = icmp ult <16 x i8> %x, %y
+ %a = select <16 x i1> %a1, <16 x i8> %x, <16 x i8> %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @icmp_ugt_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: icmp_ugt_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmax.u32 q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a1 = icmp ugt <4 x i32> %x, %y
+ %a = select <4 x i1> %a1, <4 x i32> %x, <4 x i32> %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @icmp_ugt_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: icmp_ugt_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmax.u16 q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a1 = icmp ugt <8 x i16> %x, %y
+ %a = select <8 x i1> %a1, <8 x i16> %x, <8 x i16> %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @icmp_ugt_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: icmp_ugt_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmax.u8 q0, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a1 = icmp ugt <16 x i8> %x, %y
+ %a = select <16 x i1> %a1, <16 x i8> %x, <16 x i8> %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fcmp_fast_olt_v4f32_y(<4 x float> %x, <4 x float> %y, i32 %n) {
+; CHECK-LABEL: fcmp_fast_olt_v4f32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vminnm.f32 q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a1 = fcmp fast olt <4 x float> %x, %y
+ %a = select <4 x i1> %a1, <4 x float> %x, <4 x float> %y
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %y
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fcmp_fast_olt_v8f16_y(<8 x half> %x, <8 x half> %y, i32 %n) {
+; CHECK-LABEL: fcmp_fast_olt_v8f16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vminnm.f16 q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a1 = fcmp fast olt <8 x half> %x, %y
+ %a = select <8 x i1> %a1, <8 x half> %x, <8 x half> %y
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %y
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fcmp_fast_ogt_v4f32_y(<4 x float> %x, <4 x float> %y, i32 %n) {
+; CHECK-LABEL: fcmp_fast_ogt_v4f32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmaxnm.f32 q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a1 = fcmp fast ogt <4 x float> %x, %y
+ %a = select <4 x i1> %a1, <4 x float> %x, <4 x float> %y
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %y
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fcmp_fast_ogt_v8f16_y(<8 x half> %x, <8 x half> %y, i32 %n) {
+; CHECK-LABEL: fcmp_fast_ogt_v8f16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmaxnm.f16 q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a1 = fcmp fast ogt <8 x half> %x, %y
+ %a = select <8 x i1> %a1, <8 x half> %x, <8 x half> %y
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %y
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sadd_sat_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: sadd_sat_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s32 q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @sadd_sat_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: sadd_sat_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s16 q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @sadd_sat_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: sadd_sat_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s8 q0, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @uadd_sat_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: uadd_sat_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u32 q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @uadd_sat_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: uadd_sat_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u16 q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @uadd_sat_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: uadd_sat_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u8 q0, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @ssub_sat_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: ssub_sat_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s32 q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @ssub_sat_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: ssub_sat_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s16 q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @ssub_sat_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: ssub_sat_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s8 q0, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @usub_sat_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: usub_sat_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u32 q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @usub_sat_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: usub_sat_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u16 q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @usub_sat_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: usub_sat_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u8 q0, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @addqr_v4i32_y(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: addqr_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.i32 q0, q0, r0
+; CHECK-NEXT: vdup.32 q1, r0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = add <4 x i32> %x, %ys
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %ys
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @addqr_v8i16_y(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: addqr_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.i16 q0, q0, r0
+; CHECK-NEXT: vdup.16 q1, r0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = add <8 x i16> %x, %ys
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %ys
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @addqr_v16i8_y(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: addqr_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.i8 q0, q0, r0
+; CHECK-NEXT: vdup.8 q1, r0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = add <16 x i8> %x, %ys
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %ys
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @subqr_v4i32_y(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: subqr_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.i32 q0, q0, r0
+; CHECK-NEXT: vdup.32 q1, r0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = sub <4 x i32> %x, %ys
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %ys
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @subqr_v8i16_y(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: subqr_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.i16 q0, q0, r0
+; CHECK-NEXT: vdup.16 q1, r0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = sub <8 x i16> %x, %ys
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %ys
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @subqr_v16i8_y(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: subqr_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.i8 q0, q0, r0
+; CHECK-NEXT: vdup.8 q1, r0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = sub <16 x i8> %x, %ys
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %ys
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mulqr_v4i32_y(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: mulqr_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.i32 q0, q0, r0
+; CHECK-NEXT: vdup.32 q1, r0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = mul <4 x i32> %x, %ys
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %ys
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @mulqr_v8i16_y(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: mulqr_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.i16 q0, q0, r0
+; CHECK-NEXT: vdup.16 q1, r0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = mul <8 x i16> %x, %ys
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %ys
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @mulqr_v16i8_y(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: mulqr_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.i8 q0, q0, r0
+; CHECK-NEXT: vdup.8 q1, r0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = mul <16 x i8> %x, %ys
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %ys
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @faddqr_v4f32_y(<4 x float> %x, float %y, i32 %n) {
+; CHECK-LABEL: faddqr_v4f32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov r1, s4
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vadd.f32 q0, q0, r1
+; CHECK-NEXT: vdup.32 q1, r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x float> undef, float %y, i32 0
+ %ys = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
+ %a = fadd <4 x float> %x, %ys
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %ys
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @faddqr_v8f16_y(<8 x half> %x, half %y, i32 %n) {
+; CHECK-LABEL: faddqr_v8f16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 r1, s4
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vadd.f16 q0, q0, r1
+; CHECK-NEXT: vdup.16 q1, r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x half> undef, half %y, i32 0
+ %ys = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
+ %a = fadd <8 x half> %x, %ys
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %ys
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fsubqr_v4f32_y(<4 x float> %x, float %y, i32 %n) {
+; CHECK-LABEL: fsubqr_v4f32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov r1, s4
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vsub.f32 q0, q0, r1
+; CHECK-NEXT: vdup.32 q1, r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x float> undef, float %y, i32 0
+ %ys = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
+ %a = fsub <4 x float> %x, %ys
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %ys
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fsubqr_v8f16_y(<8 x half> %x, half %y, i32 %n) {
+; CHECK-LABEL: fsubqr_v8f16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 r1, s4
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vsub.f16 q0, q0, r1
+; CHECK-NEXT: vdup.16 q1, r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x half> undef, half %y, i32 0
+ %ys = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
+ %a = fsub <8 x half> %x, %ys
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %ys
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fmulqr_v4f32_y(<4 x float> %x, float %y, i32 %n) {
+; CHECK-LABEL: fmulqr_v4f32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov r1, s4
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vmul.f32 q0, q0, r1
+; CHECK-NEXT: vdup.32 q1, r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x float> undef, float %y, i32 0
+ %ys = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
+ %a = fmul <4 x float> %x, %ys
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %ys
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fmulqr_v8f16_y(<8 x half> %x, half %y, i32 %n) {
+; CHECK-LABEL: fmulqr_v8f16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 r1, s4
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vmul.f16 q0, q0, r1
+; CHECK-NEXT: vdup.16 q1, r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x half> undef, half %y, i32 0
+ %ys = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
+ %a = fmul <8 x half> %x, %ys
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %ys
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sadd_satqr_v4i32_y(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: sadd_satqr_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s32 q0, q0, r0
+; CHECK-NEXT: vdup.32 q1, r0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %x, <4 x i32> %ys)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %ys
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @sadd_satqr_v8i16_y(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: sadd_satqr_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s16 q0, q0, r0
+; CHECK-NEXT: vdup.16 q1, r0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %x, <8 x i16> %ys)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %ys
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @sadd_satqr_v16i8_y(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: sadd_satqr_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s8 q0, q0, r0
+; CHECK-NEXT: vdup.8 q1, r0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %x, <16 x i8> %ys)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %ys
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @uadd_satqr_v4i32_y(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: uadd_satqr_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u32 q0, q0, r0
+; CHECK-NEXT: vdup.32 q1, r0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %x, <4 x i32> %ys)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %ys
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @uadd_satqr_v8i16_y(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: uadd_satqr_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u16 q0, q0, r0
+; CHECK-NEXT: vdup.16 q1, r0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %x, <8 x i16> %ys)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %ys
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @uadd_satqr_v16i8_y(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: uadd_satqr_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u8 q0, q0, r0
+; CHECK-NEXT: vdup.8 q1, r0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %x, <16 x i8> %ys)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %ys
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @ssub_satqr_v4i32_y(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: ssub_satqr_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s32 q0, q0, r0
+; CHECK-NEXT: vdup.32 q1, r0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %x, <4 x i32> %ys)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %ys
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @ssub_satqr_v8i16_y(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: ssub_satqr_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s16 q0, q0, r0
+; CHECK-NEXT: vdup.16 q1, r0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %x, <8 x i16> %ys)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %ys
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @ssub_satqr_v16i8_y(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: ssub_satqr_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s8 q0, q0, r0
+; CHECK-NEXT: vdup.8 q1, r0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %x, <16 x i8> %ys)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %ys
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @usub_satqr_v4i32_y(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: usub_satqr_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u32 q0, q0, r0
+; CHECK-NEXT: vdup.32 q1, r0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %x, <4 x i32> %ys)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %ys
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @usub_satqr_v8i16_y(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: usub_satqr_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u16 q0, q0, r0
+; CHECK-NEXT: vdup.16 q1, r0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %x, <8 x i16> %ys)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %ys
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @usub_satqr_v16i8_y(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: usub_satqr_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u8 q0, q0, r0
+; CHECK-NEXT: vdup.8 q1, r0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %x, <16 x i8> %ys)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %ys
+ ret <16 x i8> %b
+}
+
+declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32>, <4 x i32>)
+declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>)
+declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32>, <4 x i32>)
+declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>)
+
+declare <16 x i1> @llvm.arm.mve.vctp8(i32)
+declare <8 x i1> @llvm.arm.mve.vctp16(i32)
+declare <4 x i1> @llvm.arm.mve.vctp32(i32)
diff --git a/llvm/test/CodeGen/Thumb2/mve-pred-selectop3.ll b/llvm/test/CodeGen/Thumb2/mve-pred-selectop3.ll
new file mode 100644
index 000000000000..e856e5d3f9e2
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-pred-selectop3.ll
@@ -0,0 +1,2669 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
+
+define arm_aapcs_vfpcc <4 x i32> @add_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: add_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q1, q1, q2
+; CHECK-NEXT: vadd.i32 q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = select <4 x i1> %c, <4 x i32> %y, <4 x i32> zeroinitializer
+ %b = add <4 x i32> %a, %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @add_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: add_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q1, q1, q2
+; CHECK-NEXT: vadd.i16 q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = select <8 x i1> %c, <8 x i16> %y, <8 x i16> zeroinitializer
+ %b = add <8 x i16> %a, %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @add_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: add_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q1, q1, q2
+; CHECK-NEXT: vadd.i8 q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = select <16 x i1> %c, <16 x i8> %y, <16 x i8> zeroinitializer
+ %b = add <16 x i8> %a, %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sub_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: sub_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q1, q1, q2
+; CHECK-NEXT: vsub.i32 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = select <4 x i1> %c, <4 x i32> %y, <4 x i32> zeroinitializer
+ %b = sub <4 x i32> %x, %a
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @sub_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: sub_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q1, q1, q2
+; CHECK-NEXT: vsub.i16 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = select <8 x i1> %c, <8 x i16> %y, <8 x i16> zeroinitializer
+ %b = sub <8 x i16> %x, %a
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @sub_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: sub_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q1, q1, q2
+; CHECK-NEXT: vsub.i8 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = select <16 x i1> %c, <16 x i8> %y, <16 x i8> zeroinitializer
+ %b = sub <16 x i8> %x, %a
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mul_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: mul_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q1, q1, q2
+; CHECK-NEXT: vmul.i32 q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = select <4 x i1> %c, <4 x i32> %y, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %b = mul <4 x i32> %a, %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @mul_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: mul_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i16 q2, #0x1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q1, q1, q2
+; CHECK-NEXT: vmul.i16 q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = select <8 x i1> %c, <8 x i16> %y, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %b = mul <8 x i16> %a, %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @mul_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: mul_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i8 q2, #0x1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q1, q1, q2
+; CHECK-NEXT: vmul.i8 q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = select <16 x i1> %c, <16 x i8> %y, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %b = mul <16 x i8> %a, %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @and_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: and_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i8 q2, #0xff
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q1, q1, q2
+; CHECK-NEXT: vand q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = select <4 x i1> %c, <4 x i32> %y, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
+ %b = and <4 x i32> %a, %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @and_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: and_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i8 q2, #0xff
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q1, q1, q2
+; CHECK-NEXT: vand q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = select <8 x i1> %c, <8 x i16> %y, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ %b = and <8 x i16> %a, %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @and_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: and_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i8 q2, #0xff
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q1, q1, q2
+; CHECK-NEXT: vand q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = select <16 x i1> %c, <16 x i8> %y, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %b = and <16 x i8> %a, %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @or_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: or_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q1, q1, q2
+; CHECK-NEXT: vorr q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = select <4 x i1> %c, <4 x i32> %y, <4 x i32> zeroinitializer
+ %b = or <4 x i32> %a, %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @or_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: or_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q1, q1, q2
+; CHECK-NEXT: vorr q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = select <8 x i1> %c, <8 x i16> %y, <8 x i16> zeroinitializer
+ %b = or <8 x i16> %a, %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @or_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: or_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q1, q1, q2
+; CHECK-NEXT: vorr q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = select <16 x i1> %c, <16 x i8> %y, <16 x i8> zeroinitializer
+ %b = or <16 x i8> %a, %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @xor_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: xor_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q1, q1, q2
+; CHECK-NEXT: veor q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = select <4 x i1> %c, <4 x i32> %y, <4 x i32> zeroinitializer
+ %b = xor <4 x i32> %a, %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @xor_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: xor_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q1, q1, q2
+; CHECK-NEXT: veor q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = select <8 x i1> %c, <8 x i16> %y, <8 x i16> zeroinitializer
+ %b = xor <8 x i16> %a, %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @xor_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: xor_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q1, q1, q2
+; CHECK-NEXT: veor q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = select <16 x i1> %c, <16 x i8> %y, <16 x i8> zeroinitializer
+ %b = xor <16 x i8> %a, %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @andnot_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: andnot_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i8 q2, #0xff
+; CHECK-NEXT: vmvn q1, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q1, q1, q2
+; CHECK-NEXT: vand q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %y1 = xor <4 x i32> %y, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %a = select <4 x i1> %c, <4 x i32> %y1, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
+ %b = and <4 x i32> %a, %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @andnot_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: andnot_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i8 q2, #0xff
+; CHECK-NEXT: vmvn q1, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q1, q1, q2
+; CHECK-NEXT: vand q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %y1 = xor <8 x i16> %y, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ %a = select <8 x i1> %c, <8 x i16> %y1, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ %b = and <8 x i16> %a, %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @andnot_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: andnot_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i8 q2, #0xff
+; CHECK-NEXT: vmvn q1, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q1, q1, q2
+; CHECK-NEXT: vand q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %y1 = xor <16 x i8> %y, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %a = select <16 x i1> %c, <16 x i8> %y1, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %b = and <16 x i8> %a, %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @ornot_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: ornot_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vmvn q1, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q1, q1, q2
+; CHECK-NEXT: vorr q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %y1 = xor <4 x i32> %y, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %a = select <4 x i1> %c, <4 x i32> %y1, <4 x i32> zeroinitializer
+ %b = or <4 x i32> %a, %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @ornot_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: ornot_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vmvn q1, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q1, q1, q2
+; CHECK-NEXT: vorr q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %y1 = xor <8 x i16> %y, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ %a = select <8 x i1> %c, <8 x i16> %y1, <8 x i16> zeroinitializer
+ %b = or <8 x i16> %a, %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @ornot_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: ornot_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vmvn q1, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q1, q1, q2
+; CHECK-NEXT: vorr q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %y1 = xor <16 x i8> %y, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %a = select <16 x i1> %c, <16 x i8> %y1, <16 x i8> zeroinitializer
+ %b = or <16 x i8> %a, %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fadd_v4f32_x(<4 x float> %x, <4 x float> %y, i32 %n) {
+; CHECK-LABEL: fadd_v4f32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.f32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = fadd <4 x float> %x, %y
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %x
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fadd_v8f16_x(<8 x half> %x, <8 x half> %y, i32 %n) {
+; CHECK-LABEL: fadd_v8f16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.f16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = fadd <8 x half> %x, %y
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %x
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fsub_v4f32_x(<4 x float> %x, <4 x float> %y, i32 %n) {
+; CHECK-LABEL: fsub_v4f32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.f32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = fsub <4 x float> %x, %y
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %x
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fsub_v8f16_x(<8 x half> %x, <8 x half> %y, i32 %n) {
+; CHECK-LABEL: fsub_v8f16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.f16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = fsub <8 x half> %x, %y
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %x
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fmul_v4f32_x(<4 x float> %x, <4 x float> %y, i32 %n) {
+; CHECK-LABEL: fmul_v4f32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.f32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = fmul <4 x float> %x, %y
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %x
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fmul_v8f16_x(<8 x half> %x, <8 x half> %y, i32 %n) {
+; CHECK-LABEL: fmul_v8f16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.f16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = fmul <8 x half> %x, %y
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %x
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @icmp_slt_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: icmp_slt_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmin.s32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a1 = icmp slt <4 x i32> %x, %y
+ %a = select <4 x i1> %a1, <4 x i32> %x, <4 x i32> %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @icmp_slt_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: icmp_slt_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmin.s16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a1 = icmp slt <8 x i16> %x, %y
+ %a = select <8 x i1> %a1, <8 x i16> %x, <8 x i16> %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @icmp_slt_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: icmp_slt_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmin.s8 q1, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a1 = icmp slt <16 x i8> %x, %y
+ %a = select <16 x i1> %a1, <16 x i8> %x, <16 x i8> %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @icmp_sgt_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: icmp_sgt_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmax.s32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a1 = icmp sgt <4 x i32> %x, %y
+ %a = select <4 x i1> %a1, <4 x i32> %x, <4 x i32> %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @icmp_sgt_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: icmp_sgt_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmax.s16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a1 = icmp sgt <8 x i16> %x, %y
+ %a = select <8 x i1> %a1, <8 x i16> %x, <8 x i16> %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @icmp_sgt_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: icmp_sgt_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmax.s8 q1, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a1 = icmp sgt <16 x i8> %x, %y
+ %a = select <16 x i1> %a1, <16 x i8> %x, <16 x i8> %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @icmp_ult_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: icmp_ult_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmin.u32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a1 = icmp ult <4 x i32> %x, %y
+ %a = select <4 x i1> %a1, <4 x i32> %x, <4 x i32> %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @icmp_ult_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: icmp_ult_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmin.u16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a1 = icmp ult <8 x i16> %x, %y
+ %a = select <8 x i1> %a1, <8 x i16> %x, <8 x i16> %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @icmp_ult_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: icmp_ult_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmin.u8 q1, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a1 = icmp ult <16 x i8> %x, %y
+ %a = select <16 x i1> %a1, <16 x i8> %x, <16 x i8> %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @icmp_ugt_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: icmp_ugt_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmax.u32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a1 = icmp ugt <4 x i32> %x, %y
+ %a = select <4 x i1> %a1, <4 x i32> %x, <4 x i32> %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @icmp_ugt_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: icmp_ugt_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmax.u16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a1 = icmp ugt <8 x i16> %x, %y
+ %a = select <8 x i1> %a1, <8 x i16> %x, <8 x i16> %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @icmp_ugt_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: icmp_ugt_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmax.u8 q1, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a1 = icmp ugt <16 x i8> %x, %y
+ %a = select <16 x i1> %a1, <16 x i8> %x, <16 x i8> %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fcmp_fast_olt_v4f32_x(<4 x float> %x, <4 x float> %y, i32 %n) {
+; CHECK-LABEL: fcmp_fast_olt_v4f32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vminnm.f32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a1 = fcmp fast olt <4 x float> %x, %y
+ %a = select <4 x i1> %a1, <4 x float> %x, <4 x float> %y
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %x
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fcmp_fast_olt_v8f16_x(<8 x half> %x, <8 x half> %y, i32 %n) {
+; CHECK-LABEL: fcmp_fast_olt_v8f16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vminnm.f16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a1 = fcmp fast olt <8 x half> %x, %y
+ %a = select <8 x i1> %a1, <8 x half> %x, <8 x half> %y
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %x
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fcmp_fast_ogt_v4f32_x(<4 x float> %x, <4 x float> %y, i32 %n) {
+; CHECK-LABEL: fcmp_fast_ogt_v4f32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmaxnm.f32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a1 = fcmp fast ogt <4 x float> %x, %y
+ %a = select <4 x i1> %a1, <4 x float> %x, <4 x float> %y
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %x
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fcmp_fast_ogt_v8f16_x(<8 x half> %x, <8 x half> %y, i32 %n) {
+; CHECK-LABEL: fcmp_fast_ogt_v8f16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmaxnm.f16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a1 = fcmp fast ogt <8 x half> %x, %y
+ %a = select <8 x i1> %a1, <8 x half> %x, <8 x half> %y
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %x
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sadd_sat_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: sadd_sat_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @sadd_sat_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: sadd_sat_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @sadd_sat_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: sadd_sat_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s8 q1, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @uadd_sat_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: uadd_sat_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @uadd_sat_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: uadd_sat_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @uadd_sat_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: uadd_sat_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u8 q1, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @ssub_sat_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: ssub_sat_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @ssub_sat_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: ssub_sat_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @ssub_sat_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: ssub_sat_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s8 q1, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @usub_sat_v4i32_x(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: usub_sat_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u32 q1, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @usub_sat_v8i16_x(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: usub_sat_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u16 q1, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @usub_sat_v16i8_x(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: usub_sat_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u8 q1, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @addqr_v4i32_x(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: addqr_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q1, #0x0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vdupt.32 q1, r0
+; CHECK-NEXT: vadd.i32 q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = select <4 x i1> %c, <4 x i32> %ys, <4 x i32> zeroinitializer
+ %b = add <4 x i32> %a, %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @addqr_v8i16_x(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: addqr_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q1, #0x0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vdupt.16 q1, r0
+; CHECK-NEXT: vadd.i16 q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = select <8 x i1> %c, <8 x i16> %ys, <8 x i16> zeroinitializer
+ %b = add <8 x i16> %a, %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @addqr_v16i8_x(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: addqr_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q1, #0x0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vdupt.8 q1, r0
+; CHECK-NEXT: vadd.i8 q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = select <16 x i1> %c, <16 x i8> %ys, <16 x i8> zeroinitializer
+ %b = add <16 x i8> %a, %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @subqr_v4i32_x(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: subqr_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q1, #0x0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vdupt.32 q1, r0
+; CHECK-NEXT: vsub.i32 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = select <4 x i1> %c, <4 x i32> %ys, <4 x i32> zeroinitializer
+ %b = sub <4 x i32> %x, %a
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @subqr_v8i16_x(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: subqr_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q1, #0x0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vdupt.16 q1, r0
+; CHECK-NEXT: vsub.i16 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = select <8 x i1> %c, <8 x i16> %ys, <8 x i16> zeroinitializer
+ %b = sub <8 x i16> %x, %a
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @subqr_v16i8_x(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: subqr_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q1, #0x0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vdupt.8 q1, r0
+; CHECK-NEXT: vsub.i8 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = select <16 x i1> %c, <16 x i8> %ys, <16 x i8> zeroinitializer
+ %b = sub <16 x i8> %x, %a
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mulqr_v4i32_x(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: mulqr_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q1, #0x1
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vdupt.32 q1, r0
+; CHECK-NEXT: vmul.i32 q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = select <4 x i1> %c, <4 x i32> %ys, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %b = mul <4 x i32> %a, %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @mulqr_v8i16_x(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: mulqr_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i16 q1, #0x1
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vdupt.16 q1, r0
+; CHECK-NEXT: vmul.i16 q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = select <8 x i1> %c, <8 x i16> %ys, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %b = mul <8 x i16> %a, %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @mulqr_v16i8_x(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: mulqr_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i8 q1, #0x1
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vdupt.8 q1, r0
+; CHECK-NEXT: vmul.i8 q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = select <16 x i1> %c, <16 x i8> %ys, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %b = mul <16 x i8> %a, %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @faddqr_v4f32_x(<4 x float> %x, float %y, i32 %n) {
+; CHECK-LABEL: faddqr_v4f32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov r1, s4
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vadd.f32 q1, q0, r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x float> undef, float %y, i32 0
+ %ys = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
+ %a = fadd <4 x float> %ys, %x
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %x
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @faddqr_v8f16_x(<8 x half> %x, half %y, i32 %n) {
+; CHECK-LABEL: faddqr_v8f16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 r1, s4
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vadd.f16 q1, q0, r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x half> undef, half %y, i32 0
+ %ys = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
+ %a = fadd <8 x half> %ys, %x
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %x
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fsubqr_v4f32_x(<4 x float> %x, float %y, i32 %n) {
+; CHECK-LABEL: fsubqr_v4f32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov r1, s4
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vsub.f32 q1, q0, r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x float> undef, float %y, i32 0
+ %ys = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
+ %a = fsub <4 x float> %x, %ys
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %x
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fsubqr_v8f16_x(<8 x half> %x, half %y, i32 %n) {
+; CHECK-LABEL: fsubqr_v8f16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 r1, s4
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vsub.f16 q1, q0, r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x half> undef, half %y, i32 0
+ %ys = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
+ %a = fsub <8 x half> %x, %ys
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %x
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fmulqr_v4f32_x(<4 x float> %x, float %y, i32 %n) {
+; CHECK-LABEL: fmulqr_v4f32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov r1, s4
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vmul.f32 q1, q0, r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x float> undef, float %y, i32 0
+ %ys = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
+ %a = fmul <4 x float> %ys, %x
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %x
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fmulqr_v8f16_x(<8 x half> %x, half %y, i32 %n) {
+; CHECK-LABEL: fmulqr_v8f16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 r1, s4
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vmul.f16 q1, q0, r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x half> undef, half %y, i32 0
+ %ys = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
+ %a = fmul <8 x half> %ys, %x
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %x
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sadd_satqr_v4i32_x(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: sadd_satqr_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s32 q1, q0, r0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %x, <4 x i32> %ys)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @sadd_satqr_v8i16_x(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: sadd_satqr_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s16 q1, q0, r0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %x, <8 x i16> %ys)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @sadd_satqr_v16i8_x(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: sadd_satqr_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s8 q1, q0, r0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %x, <16 x i8> %ys)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @uadd_satqr_v4i32_x(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: uadd_satqr_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u32 q1, q0, r0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %x, <4 x i32> %ys)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @uadd_satqr_v8i16_x(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: uadd_satqr_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u16 q1, q0, r0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %x, <8 x i16> %ys)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @uadd_satqr_v16i8_x(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: uadd_satqr_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u8 q1, q0, r0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %x, <16 x i8> %ys)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @ssub_satqr_v4i32_x(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: ssub_satqr_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s32 q1, q0, r0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %x, <4 x i32> %ys)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @ssub_satqr_v8i16_x(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: ssub_satqr_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s16 q1, q0, r0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %x, <8 x i16> %ys)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @ssub_satqr_v16i8_x(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: ssub_satqr_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s8 q1, q0, r0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %x, <16 x i8> %ys)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @usub_satqr_v4i32_x(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: usub_satqr_v4i32_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u32 q1, q0, r0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %x, <4 x i32> %ys)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %x
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @usub_satqr_v8i16_x(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: usub_satqr_v8i16_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u16 q1, q0, r0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %x, <8 x i16> %ys)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %x
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @usub_satqr_v16i8_x(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: usub_satqr_v16i8_x:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u8 q1, q0, r0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %x, <16 x i8> %ys)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %x
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @add_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: add_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q2
+; CHECK-NEXT: vadd.i32 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = select <4 x i1> %c, <4 x i32> %x, <4 x i32> zeroinitializer
+ %b = add <4 x i32> %a, %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @add_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: add_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q2
+; CHECK-NEXT: vadd.i16 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = select <8 x i1> %c, <8 x i16> %x, <8 x i16> zeroinitializer
+ %b = add <8 x i16> %a, %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @add_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: add_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q2
+; CHECK-NEXT: vadd.i8 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = select <16 x i1> %c, <16 x i8> %x, <16 x i8> zeroinitializer
+ %b = add <16 x i8> %a, %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sub_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: sub_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.i32 q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = sub <4 x i32> %x, %y
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @sub_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: sub_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.i16 q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = sub <8 x i16> %x, %y
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @sub_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: sub_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.i8 q0, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = sub <16 x i8> %x, %y
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mul_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: mul_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q2
+; CHECK-NEXT: vmul.i32 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = select <4 x i1> %c, <4 x i32> %x, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %b = mul <4 x i32> %a, %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @mul_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: mul_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i16 q2, #0x1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q2
+; CHECK-NEXT: vmul.i16 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = select <8 x i1> %c, <8 x i16> %x, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %b = mul <8 x i16> %a, %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @mul_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: mul_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i8 q2, #0x1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q2
+; CHECK-NEXT: vmul.i8 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = select <16 x i1> %c, <16 x i8> %x, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %b = mul <16 x i8> %a, %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @and_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: and_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i8 q2, #0xff
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q2
+; CHECK-NEXT: vand q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = select <4 x i1> %c, <4 x i32> %x, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
+ %b = and <4 x i32> %a, %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @and_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: and_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i8 q2, #0xff
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q2
+; CHECK-NEXT: vand q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = select <8 x i1> %c, <8 x i16> %x, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ %b = and <8 x i16> %a, %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @and_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: and_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i8 q2, #0xff
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q2
+; CHECK-NEXT: vand q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = select <16 x i1> %c, <16 x i8> %x, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %b = and <16 x i8> %a, %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @or_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: or_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q2
+; CHECK-NEXT: vorr q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = select <4 x i1> %c, <4 x i32> %x, <4 x i32> zeroinitializer
+ %b = or <4 x i32> %a, %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @or_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: or_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q2
+; CHECK-NEXT: vorr q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = select <8 x i1> %c, <8 x i16> %x, <8 x i16> zeroinitializer
+ %b = or <8 x i16> %a, %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @or_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: or_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q2
+; CHECK-NEXT: vorr q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = select <16 x i1> %c, <16 x i8> %x, <16 x i8> zeroinitializer
+ %b = or <16 x i8> %a, %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @xor_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: xor_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q2
+; CHECK-NEXT: veor q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = select <4 x i1> %c, <4 x i32> %x, <4 x i32> zeroinitializer
+ %b = xor <4 x i32> %a, %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @xor_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: xor_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q2
+; CHECK-NEXT: veor q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = select <8 x i1> %c, <8 x i16> %x, <8 x i16> zeroinitializer
+ %b = xor <8 x i16> %a, %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @xor_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: xor_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q2, #0x0
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q2
+; CHECK-NEXT: veor q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = select <16 x i1> %c, <16 x i8> %x, <16 x i8> zeroinitializer
+ %b = xor <16 x i8> %a, %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @andnot_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: andnot_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vbic q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %y1 = xor <4 x i32> %y, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %a = and <4 x i32> %y1, %x
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @andnot_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: andnot_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vbic q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %y1 = xor <8 x i16> %y, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ %a = and <8 x i16> %y1, %x
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @andnot_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: andnot_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vbic q0, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %y1 = xor <16 x i8> %y, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %a = and <16 x i8> %y1, %x
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @ornot_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: ornot_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vorn q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %y1 = xor <4 x i32> %y, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %a = or <4 x i32> %y1, %x
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @ornot_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: ornot_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vorn q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %y1 = xor <8 x i16> %y, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ %a = or <8 x i16> %y1, %x
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @ornot_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: ornot_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vorn q0, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %y1 = xor <16 x i8> %y, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %a = or <16 x i8> %y1, %x
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fadd_v4f32_y(<4 x float> %x, <4 x float> %y, i32 %n) {
+; CHECK-LABEL: fadd_v4f32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.f32 q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = fadd <4 x float> %x, %y
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %y
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fadd_v8f16_y(<8 x half> %x, <8 x half> %y, i32 %n) {
+; CHECK-LABEL: fadd_v8f16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vadd.f16 q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = fadd <8 x half> %x, %y
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %y
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fsub_v4f32_y(<4 x float> %x, <4 x float> %y, i32 %n) {
+; CHECK-LABEL: fsub_v4f32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.f32 q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = fsub <4 x float> %x, %y
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %y
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fsub_v8f16_y(<8 x half> %x, <8 x half> %y, i32 %n) {
+; CHECK-LABEL: fsub_v8f16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.f16 q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = fsub <8 x half> %x, %y
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %y
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fmul_v4f32_y(<4 x float> %x, <4 x float> %y, i32 %n) {
+; CHECK-LABEL: fmul_v4f32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.f32 q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = fmul <4 x float> %x, %y
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %y
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fmul_v8f16_y(<8 x half> %x, <8 x half> %y, i32 %n) {
+; CHECK-LABEL: fmul_v8f16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmul.f16 q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = fmul <8 x half> %x, %y
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %y
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @icmp_slt_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: icmp_slt_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vcmpt.s32 gt, q1, q0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a1 = icmp slt <4 x i32> %x, %y
+ %0 = and <4 x i1> %c, %a1
+ %b = select <4 x i1> %0, <4 x i32> %x, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @icmp_slt_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: icmp_slt_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vcmpt.s16 gt, q1, q0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a1 = icmp slt <8 x i16> %x, %y
+ %0 = and <8 x i1> %c, %a1
+ %b = select <8 x i1> %0, <8 x i16> %x, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @icmp_slt_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: icmp_slt_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vcmpt.s8 gt, q1, q0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a1 = icmp slt <16 x i8> %x, %y
+ %0 = and <16 x i1> %c, %a1
+ %b = select <16 x i1> %0, <16 x i8> %x, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @icmp_sgt_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: icmp_sgt_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vcmpt.s32 gt, q0, q1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a1 = icmp sgt <4 x i32> %x, %y
+ %0 = and <4 x i1> %c, %a1
+ %b = select <4 x i1> %0, <4 x i32> %x, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @icmp_sgt_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: icmp_sgt_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vcmpt.s16 gt, q0, q1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a1 = icmp sgt <8 x i16> %x, %y
+ %0 = and <8 x i1> %c, %a1
+ %b = select <8 x i1> %0, <8 x i16> %x, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @icmp_sgt_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: icmp_sgt_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vcmpt.s8 gt, q0, q1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a1 = icmp sgt <16 x i8> %x, %y
+ %0 = and <16 x i1> %c, %a1
+ %b = select <16 x i1> %0, <16 x i8> %x, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @icmp_ult_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: icmp_ult_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vcmpt.u32 hi, q1, q0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a1 = icmp ult <4 x i32> %x, %y
+ %0 = and <4 x i1> %c, %a1
+ %b = select <4 x i1> %0, <4 x i32> %x, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @icmp_ult_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: icmp_ult_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vcmpt.u16 hi, q1, q0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a1 = icmp ult <8 x i16> %x, %y
+ %0 = and <8 x i1> %c, %a1
+ %b = select <8 x i1> %0, <8 x i16> %x, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @icmp_ult_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: icmp_ult_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vcmpt.u8 hi, q1, q0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a1 = icmp ult <16 x i8> %x, %y
+ %0 = and <16 x i1> %c, %a1
+ %b = select <16 x i1> %0, <16 x i8> %x, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @icmp_ugt_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: icmp_ugt_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vcmpt.u32 hi, q0, q1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a1 = icmp ugt <4 x i32> %x, %y
+ %0 = and <4 x i1> %c, %a1
+ %b = select <4 x i1> %0, <4 x i32> %x, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @icmp_ugt_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: icmp_ugt_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vcmpt.u16 hi, q0, q1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a1 = icmp ugt <8 x i16> %x, %y
+ %0 = and <8 x i1> %c, %a1
+ %b = select <8 x i1> %0, <8 x i16> %x, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @icmp_ugt_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: icmp_ugt_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vcmpt.u8 hi, q0, q1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a1 = icmp ugt <16 x i8> %x, %y
+ %0 = and <16 x i1> %c, %a1
+ %b = select <16 x i1> %0, <16 x i8> %x, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fcmp_fast_olt_v4f32_y(<4 x float> %x, <4 x float> %y, i32 %n) {
+; CHECK-LABEL: fcmp_fast_olt_v4f32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vcmpt.f32 gt, q1, q0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a1 = fcmp fast olt <4 x float> %x, %y
+ %0 = and <4 x i1> %c, %a1
+ %b = select <4 x i1> %0, <4 x float> %x, <4 x float> %y
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fcmp_fast_olt_v8f16_y(<8 x half> %x, <8 x half> %y, i32 %n) {
+; CHECK-LABEL: fcmp_fast_olt_v8f16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vcmpt.f16 gt, q1, q0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a1 = fcmp fast olt <8 x half> %x, %y
+ %0 = and <8 x i1> %c, %a1
+ %b = select <8 x i1> %0, <8 x half> %x, <8 x half> %y
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fcmp_fast_ogt_v4f32_y(<4 x float> %x, <4 x float> %y, i32 %n) {
+; CHECK-LABEL: fcmp_fast_ogt_v4f32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vcmpt.f32 gt, q0, q1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a1 = fcmp fast ogt <4 x float> %x, %y
+ %0 = and <4 x i1> %c, %a1
+ %b = select <4 x i1> %0, <4 x float> %x, <4 x float> %y
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fcmp_fast_ogt_v8f16_y(<8 x half> %x, <8 x half> %y, i32 %n) {
+; CHECK-LABEL: fcmp_fast_ogt_v8f16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vcmpt.f16 gt, q0, q1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a1 = fcmp fast ogt <8 x half> %x, %y
+ %0 = and <8 x i1> %c, %a1
+ %b = select <8 x i1> %0, <8 x half> %x, <8 x half> %y
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sadd_sat_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: sadd_sat_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s32 q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @sadd_sat_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: sadd_sat_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s16 q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @sadd_sat_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: sadd_sat_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s8 q0, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @uadd_sat_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: uadd_sat_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u32 q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @uadd_sat_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: uadd_sat_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u16 q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @uadd_sat_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: uadd_sat_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u8 q0, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @ssub_sat_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: ssub_sat_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s32 q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @ssub_sat_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: ssub_sat_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s16 q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @ssub_sat_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: ssub_sat_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s8 q0, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @usub_sat_v4i32_y(<4 x i32> %x, <4 x i32> %y, i32 %n) {
+; CHECK-LABEL: usub_sat_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u32 q0, q0, q1
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %a = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %y
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @usub_sat_v8i16_y(<8 x i16> %x, <8 x i16> %y, i32 %n) {
+; CHECK-LABEL: usub_sat_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u16 q0, q0, q1
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %a = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %y
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @usub_sat_v16i8_y(<16 x i8> %x, <16 x i8> %y, i32 %n) {
+; CHECK-LABEL: usub_sat_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u8 q0, q0, q1
+; CHECK-NEXT: vctp.8 r0
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %a = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %y
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @addqr_v4i32_y(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: addqr_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q1, #0x0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: vadd.i32 q0, q0, r0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = select <4 x i1> %c, <4 x i32> %x, <4 x i32> zeroinitializer
+ %b = add <4 x i32> %ys, %a
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @addqr_v8i16_y(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: addqr_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q1, #0x0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: vadd.i16 q0, q0, r0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = select <8 x i1> %c, <8 x i16> %x, <8 x i16> zeroinitializer
+ %b = add <8 x i16> %ys, %a
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @addqr_v16i8_y(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: addqr_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q1, #0x0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: vadd.i8 q0, q0, r0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = select <16 x i1> %c, <16 x i8> %x, <16 x i8> zeroinitializer
+ %b = add <16 x i8> %ys, %a
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @subqr_v4i32_y(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: subqr_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.i32 q0, q0, r0
+; CHECK-NEXT: vdup.32 q1, r0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = sub <4 x i32> %x, %ys
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %ys
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @subqr_v8i16_y(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: subqr_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.i16 q0, q0, r0
+; CHECK-NEXT: vdup.16 q1, r0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = sub <8 x i16> %x, %ys
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %ys
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @subqr_v16i8_y(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: subqr_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vsub.i8 q0, q0, r0
+; CHECK-NEXT: vdup.8 q1, r0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = sub <16 x i8> %x, %ys
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %ys
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mulqr_v4i32_y(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: mulqr_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q1, #0x1
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: vmul.i32 q0, q0, r0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = select <4 x i1> %c, <4 x i32> %x, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %b = mul <4 x i32> %ys, %a
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @mulqr_v8i16_y(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: mulqr_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i16 q1, #0x1
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: vmul.i16 q0, q0, r0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = select <8 x i1> %c, <8 x i16> %x, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %b = mul <8 x i16> %ys, %a
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @mulqr_v16i8_y(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: mulqr_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i8 q1, #0x1
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: vmul.i8 q0, q0, r0
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = select <16 x i1> %c, <16 x i8> %x, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %b = mul <16 x i8> %ys, %a
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @faddqr_v4f32_y(<4 x float> %x, float %y, i32 %n) {
+; CHECK-LABEL: faddqr_v4f32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov r1, s4
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vadd.f32 q0, q0, r1
+; CHECK-NEXT: vdup.32 q1, r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x float> undef, float %y, i32 0
+ %ys = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
+ %a = fadd <4 x float> %ys, %x
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %ys
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @faddqr_v8f16_y(<8 x half> %x, half %y, i32 %n) {
+; CHECK-LABEL: faddqr_v8f16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 r1, s4
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vadd.f16 q0, q0, r1
+; CHECK-NEXT: vdup.16 q1, r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x half> undef, half %y, i32 0
+ %ys = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
+ %a = fadd <8 x half> %ys, %x
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %ys
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fsubqr_v4f32_y(<4 x float> %x, float %y, i32 %n) {
+; CHECK-LABEL: fsubqr_v4f32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov r1, s4
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vsub.f32 q0, q0, r1
+; CHECK-NEXT: vdup.32 q1, r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x float> undef, float %y, i32 0
+ %ys = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
+ %a = fsub <4 x float> %x, %ys
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %ys
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fsubqr_v8f16_y(<8 x half> %x, half %y, i32 %n) {
+; CHECK-LABEL: fsubqr_v8f16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 r1, s4
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vsub.f16 q0, q0, r1
+; CHECK-NEXT: vdup.16 q1, r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x half> undef, half %y, i32 0
+ %ys = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
+ %a = fsub <8 x half> %x, %ys
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %ys
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x float> @fmulqr_v4f32_y(<4 x float> %x, float %y, i32 %n) {
+; CHECK-LABEL: fmulqr_v4f32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov r1, s4
+; CHECK-NEXT: vctp.32 r0
+; CHECK-NEXT: vmul.f32 q0, q0, r1
+; CHECK-NEXT: vdup.32 q1, r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x float> undef, float %y, i32 0
+ %ys = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
+ %a = fmul <4 x float> %ys, %x
+ %b = select <4 x i1> %c, <4 x float> %a, <4 x float> %ys
+ ret <4 x float> %b
+}
+
+define arm_aapcs_vfpcc <8 x half> @fmulqr_v8f16_y(<8 x half> %x, half %y, i32 %n) {
+; CHECK-LABEL: fmulqr_v8f16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.f16 r1, s4
+; CHECK-NEXT: vctp.16 r0
+; CHECK-NEXT: vmul.f16 q0, q0, r1
+; CHECK-NEXT: vdup.16 q1, r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x half> undef, half %y, i32 0
+ %ys = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
+ %a = fmul <8 x half> %ys, %x
+ %b = select <8 x i1> %c, <8 x half> %a, <8 x half> %ys
+ ret <8 x half> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sadd_satqr_v4i32_y(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: sadd_satqr_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s32 q0, q0, r0
+; CHECK-NEXT: vdup.32 q1, r0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %x, <4 x i32> %ys)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %ys
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @sadd_satqr_v8i16_y(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: sadd_satqr_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s16 q0, q0, r0
+; CHECK-NEXT: vdup.16 q1, r0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %x, <8 x i16> %ys)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %ys
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @sadd_satqr_v16i8_y(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: sadd_satqr_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.s8 q0, q0, r0
+; CHECK-NEXT: vdup.8 q1, r0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %x, <16 x i8> %ys)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %ys
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @uadd_satqr_v4i32_y(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: uadd_satqr_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u32 q0, q0, r0
+; CHECK-NEXT: vdup.32 q1, r0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %x, <4 x i32> %ys)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %ys
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @uadd_satqr_v8i16_y(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: uadd_satqr_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u16 q0, q0, r0
+; CHECK-NEXT: vdup.16 q1, r0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %x, <8 x i16> %ys)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %ys
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @uadd_satqr_v16i8_y(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: uadd_satqr_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqadd.u8 q0, q0, r0
+; CHECK-NEXT: vdup.8 q1, r0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %x, <16 x i8> %ys)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %ys
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @ssub_satqr_v4i32_y(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: ssub_satqr_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s32 q0, q0, r0
+; CHECK-NEXT: vdup.32 q1, r0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %x, <4 x i32> %ys)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %ys
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @ssub_satqr_v8i16_y(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: ssub_satqr_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s16 q0, q0, r0
+; CHECK-NEXT: vdup.16 q1, r0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %x, <8 x i16> %ys)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %ys
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @ssub_satqr_v16i8_y(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: ssub_satqr_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.s8 q0, q0, r0
+; CHECK-NEXT: vdup.8 q1, r0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %x, <16 x i8> %ys)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %ys
+ ret <16 x i8> %b
+}
+
+define arm_aapcs_vfpcc <4 x i32> @usub_satqr_v4i32_y(<4 x i32> %x, i32 %y, i32 %n) {
+; CHECK-LABEL: usub_satqr_v4i32_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u32 q0, q0, r0
+; CHECK-NEXT: vdup.32 q1, r0
+; CHECK-NEXT: vctp.32 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <4 x i1> @llvm.arm.mve.vctp32(i32 %n)
+ %i = insertelement <4 x i32> undef, i32 %y, i32 0
+ %ys = shufflevector <4 x i32> %i, <4 x i32> undef, <4 x i32> zeroinitializer
+ %a = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %x, <4 x i32> %ys)
+ %b = select <4 x i1> %c, <4 x i32> %a, <4 x i32> %ys
+ ret <4 x i32> %b
+}
+
+define arm_aapcs_vfpcc <8 x i16> @usub_satqr_v8i16_y(<8 x i16> %x, i16 %y, i32 %n) {
+; CHECK-LABEL: usub_satqr_v8i16_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u16 q0, q0, r0
+; CHECK-NEXT: vdup.16 q1, r0
+; CHECK-NEXT: vctp.16 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
+ %i = insertelement <8 x i16> undef, i16 %y, i32 0
+ %ys = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
+ %a = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %x, <8 x i16> %ys)
+ %b = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %ys
+ ret <8 x i16> %b
+}
+
+define arm_aapcs_vfpcc <16 x i8> @usub_satqr_v16i8_y(<16 x i8> %x, i8 %y, i32 %n) {
+; CHECK-LABEL: usub_satqr_v16i8_y:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqsub.u8 q0, q0, r0
+; CHECK-NEXT: vdup.8 q1, r0
+; CHECK-NEXT: vctp.8 r1
+; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = call <16 x i1> @llvm.arm.mve.vctp8(i32 %n)
+ %i = insertelement <16 x i8> undef, i8 %y, i32 0
+ %ys = shufflevector <16 x i8> %i, <16 x i8> undef, <16 x i32> zeroinitializer
+ %a = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %x, <16 x i8> %ys)
+ %b = select <16 x i1> %c, <16 x i8> %a, <16 x i8> %ys
+ ret <16 x i8> %b
+}
+
+declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32>, <4 x i32>)
+declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>)
+declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32>, <4 x i32>)
+declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>)
+
+declare <16 x i1> @llvm.arm.mve.vctp8(i32)
+declare <8 x i1> @llvm.arm.mve.vctp16(i32)
+declare <4 x i1> @llvm.arm.mve.vctp32(i32)
More information about the llvm-commits
mailing list