[clang] e13d153 - [ARM,MVE] Add intrinsics for the VQDMLAD family.

Simon Tatham via cfe-commits cfe-commits at lists.llvm.org
Wed Mar 18 10:13:06 PDT 2020


Author: Simon Tatham
Date: 2020-03-18T17:11:22Z
New Revision: e13d153c1b59a11185bf6a1aa8853c9e14d556a5

URL: https://github.com/llvm/llvm-project/commit/e13d153c1b59a11185bf6a1aa8853c9e14d556a5
DIFF: https://github.com/llvm/llvm-project/commit/e13d153c1b59a11185bf6a1aa8853c9e14d556a5.diff

LOG: [ARM,MVE] Add intrinsics for the VQDMLAD family.

Summary:
This is another set of instructions too complicated to be sensibly
expressed in IR by anything short of a target-specific intrinsic.
Given input vectors a,b, the instruction generates intermediate values
2*(a[0]*b[0]+a[1]+b[1]), 2*(a[2]*b[2]+a[3]+b[3]), etc; takes the high
half of each double-width values, and overwrites half the lanes in the
output vector c, which you therefore have to provide the input value
of. Optionally you can swap the elements of b so that the are things
like a[0]*b[1]+a[1]*b[0]; optionally you can round to nearest when
taking the high half; and optionally you can take the difference
rather than sum of the two products. Finally, saturation is applied
when converting back to a single-width vector lane.

Reviewers: dmgreen, MarkMurrayARM, miyuki, ostannard

Reviewed By: miyuki

Subscribers: kristof.beyls, hiraditya, cfe-commits

Tags: #clang

Differential Revision: https://reviews.llvm.org/D76359

Added: 
    clang/test/CodeGen/arm-mve-intrinsics/vqdmlad.c
    llvm/test/CodeGen/Thumb2/mve-intrinsics/vqdmlad.ll

Modified: 
    clang/include/clang/Basic/arm_mve.td
    llvm/include/llvm/IR/IntrinsicsARM.td
    llvm/lib/Target/ARM/ARMInstrMVE.td

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/arm_mve.td b/clang/include/clang/Basic/arm_mve.td
index ae6ce4837d76..45e45899de5f 100644
--- a/clang/include/clang/Basic/arm_mve.td
+++ b/clang/include/clang/Basic/arm_mve.td
@@ -243,6 +243,26 @@ let params = T.Signed, pnt = PNT_NType in {
   defm vqrdmla: VQDMLA;
 }
 
+multiclass VQDMLAD<int exchange, int round, int subtract> {
+  def "": Intrinsic<Vector, (args Vector:$a, Vector:$b, Vector:$c),
+       (IRInt<"vqdmlad", [Vector]> $a, $b, $c,
+            (u32 exchange), (u32 round), (u32 subtract))>;
+  def _m: Intrinsic<Vector, (args Vector:$a, Vector:$b, Vector:$c,
+                                  Predicate:$pred),
+       (IRInt<"vqdmlad_predicated", [Vector, Predicate]> $a, $b, $c,
+            (u32 exchange), (u32 round), (u32 subtract), $pred)>;
+}
+let params = T.Signed in {
+  defm vqdmladhq:   VQDMLAD<0, 0, 0>;
+  defm vqdmladhxq:  VQDMLAD<1, 0, 0>;
+  defm vqdmlsdhq:   VQDMLAD<0, 0, 1>;
+  defm vqdmlsdhxq:  VQDMLAD<1, 0, 1>;
+  defm vqrdmladhq:  VQDMLAD<0, 1, 0>;
+  defm vqrdmladhxq: VQDMLAD<1, 1, 0>;
+  defm vqrdmlsdhq:  VQDMLAD<0, 1, 1>;
+  defm vqrdmlsdhxq: VQDMLAD<1, 1, 1>;
+}
+
 let params = !listconcat(T.Int16, T.Int32) in {
   let pnt = PNT_None in {
     def vmvnq_n: Intrinsic<Vector, (args imm_simd_vmvn:$imm),

diff  --git a/clang/test/CodeGen/arm-mve-intrinsics/vqdmlad.c b/clang/test/CodeGen/arm-mve-intrinsics/vqdmlad.c
new file mode 100644
index 000000000000..cd59e6ccdfb7
--- /dev/null
+++ b/clang/test/CodeGen/arm-mve-intrinsics/vqdmlad.c
@@ -0,0 +1,677 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
+// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
+
+#include <arm_mve.h>
+
+// CHECK-LABEL: @test_vqdmladhq_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vqdmlad.v16i8(<16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 0, i32 0, i32 0)
+// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+int8x16_t test_vqdmladhq_s8(int8x16_t inactive, int8x16_t a, int8x16_t b) {
+#ifdef POLYMORPHIC
+  return vqdmladhq(inactive, a, b);
+#else  /* POLYMORPHIC */
+  return vqdmladhq_s8(inactive, a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqdmladhq_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vqdmlad.v8i16(<8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 0, i32 0)
+// CHECK-NEXT:    ret <8 x i16> [[TMP0]]
+//
+int16x8_t test_vqdmladhq_s16(int16x8_t inactive, int16x8_t a, int16x8_t b) {
+#ifdef POLYMORPHIC
+  return vqdmladhq(inactive, a, b);
+#else  /* POLYMORPHIC */
+  return vqdmladhq_s16(inactive, a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqdmladhq_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vqdmlad.v4i32(<4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 0, i32 0)
+// CHECK-NEXT:    ret <4 x i32> [[TMP0]]
+//
+int32x4_t test_vqdmladhq_s32(int32x4_t inactive, int32x4_t a, int32x4_t b) {
+#ifdef POLYMORPHIC
+  return vqdmladhq(inactive, a, b);
+#else  /* POLYMORPHIC */
+  return vqdmladhq_s32(inactive, a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqdmladhxq_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vqdmlad.v16i8(<16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 1, i32 0, i32 0)
+// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+int8x16_t test_vqdmladhxq_s8(int8x16_t inactive, int8x16_t a, int8x16_t b) {
+#ifdef POLYMORPHIC
+  return vqdmladhxq(inactive, a, b);
+#else  /* POLYMORPHIC */
+  return vqdmladhxq_s8(inactive, a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqdmladhxq_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vqdmlad.v8i16(<8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 0, i32 0)
+// CHECK-NEXT:    ret <8 x i16> [[TMP0]]
+//
+int16x8_t test_vqdmladhxq_s16(int16x8_t inactive, int16x8_t a, int16x8_t b) {
+#ifdef POLYMORPHIC
+  return vqdmladhxq(inactive, a, b);
+#else  /* POLYMORPHIC */
+  return vqdmladhxq_s16(inactive, a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqdmladhxq_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vqdmlad.v4i32(<4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 0, i32 0)
+// CHECK-NEXT:    ret <4 x i32> [[TMP0]]
+//
+int32x4_t test_vqdmladhxq_s32(int32x4_t inactive, int32x4_t a, int32x4_t b) {
+#ifdef POLYMORPHIC
+  return vqdmladhxq(inactive, a, b);
+#else  /* POLYMORPHIC */
+  return vqdmladhxq_s32(inactive, a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqdmlsdhq_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vqdmlad.v16i8(<16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 0, i32 0, i32 1)
+// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+int8x16_t test_vqdmlsdhq_s8(int8x16_t inactive, int8x16_t a, int8x16_t b) {
+#ifdef POLYMORPHIC
+  return vqdmlsdhq(inactive, a, b);
+#else  /* POLYMORPHIC */
+  return vqdmlsdhq_s8(inactive, a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqdmlsdhq_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vqdmlad.v8i16(<8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 0, i32 1)
+// CHECK-NEXT:    ret <8 x i16> [[TMP0]]
+//
+int16x8_t test_vqdmlsdhq_s16(int16x8_t inactive, int16x8_t a, int16x8_t b) {
+#ifdef POLYMORPHIC
+  return vqdmlsdhq(inactive, a, b);
+#else  /* POLYMORPHIC */
+  return vqdmlsdhq_s16(inactive, a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqdmlsdhq_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vqdmlad.v4i32(<4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 0, i32 1)
+// CHECK-NEXT:    ret <4 x i32> [[TMP0]]
+//
+int32x4_t test_vqdmlsdhq_s32(int32x4_t inactive, int32x4_t a, int32x4_t b) {
+#ifdef POLYMORPHIC
+  return vqdmlsdhq(inactive, a, b);
+#else  /* POLYMORPHIC */
+  return vqdmlsdhq_s32(inactive, a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqdmlsdhxq_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vqdmlad.v16i8(<16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 1, i32 0, i32 1)
+// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+int8x16_t test_vqdmlsdhxq_s8(int8x16_t inactive, int8x16_t a, int8x16_t b) {
+#ifdef POLYMORPHIC
+  return vqdmlsdhxq(inactive, a, b);
+#else  /* POLYMORPHIC */
+  return vqdmlsdhxq_s8(inactive, a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqdmlsdhxq_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vqdmlad.v8i16(<8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 0, i32 1)
+// CHECK-NEXT:    ret <8 x i16> [[TMP0]]
+//
+int16x8_t test_vqdmlsdhxq_s16(int16x8_t inactive, int16x8_t a, int16x8_t b) {
+#ifdef POLYMORPHIC
+  return vqdmlsdhxq(inactive, a, b);
+#else  /* POLYMORPHIC */
+  return vqdmlsdhxq_s16(inactive, a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqdmlsdhxq_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vqdmlad.v4i32(<4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 0, i32 1)
+// CHECK-NEXT:    ret <4 x i32> [[TMP0]]
+//
+int32x4_t test_vqdmlsdhxq_s32(int32x4_t inactive, int32x4_t a, int32x4_t b) {
+#ifdef POLYMORPHIC
+  return vqdmlsdhxq(inactive, a, b);
+#else  /* POLYMORPHIC */
+  return vqdmlsdhxq_s32(inactive, a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqrdmladhq_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vqdmlad.v16i8(<16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 0, i32 1, i32 0)
+// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+int8x16_t test_vqrdmladhq_s8(int8x16_t inactive, int8x16_t a, int8x16_t b) {
+#ifdef POLYMORPHIC
+  return vqrdmladhq(inactive, a, b);
+#else  /* POLYMORPHIC */
+  return vqrdmladhq_s8(inactive, a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqrdmladhq_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vqdmlad.v8i16(<8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 1, i32 0)
+// CHECK-NEXT:    ret <8 x i16> [[TMP0]]
+//
+int16x8_t test_vqrdmladhq_s16(int16x8_t inactive, int16x8_t a, int16x8_t b) {
+#ifdef POLYMORPHIC
+  return vqrdmladhq(inactive, a, b);
+#else  /* POLYMORPHIC */
+  return vqrdmladhq_s16(inactive, a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqrdmladhq_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vqdmlad.v4i32(<4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 1, i32 0)
+// CHECK-NEXT:    ret <4 x i32> [[TMP0]]
+//
+int32x4_t test_vqrdmladhq_s32(int32x4_t inactive, int32x4_t a, int32x4_t b) {
+#ifdef POLYMORPHIC
+  return vqrdmladhq(inactive, a, b);
+#else  /* POLYMORPHIC */
+  return vqrdmladhq_s32(inactive, a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqrdmladhxq_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vqdmlad.v16i8(<16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 1, i32 1, i32 0)
+// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+int8x16_t test_vqrdmladhxq_s8(int8x16_t inactive, int8x16_t a, int8x16_t b) {
+#ifdef POLYMORPHIC
+  return vqrdmladhxq(inactive, a, b);
+#else  /* POLYMORPHIC */
+  return vqrdmladhxq_s8(inactive, a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqrdmladhxq_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vqdmlad.v8i16(<8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 1, i32 0)
+// CHECK-NEXT:    ret <8 x i16> [[TMP0]]
+//
+int16x8_t test_vqrdmladhxq_s16(int16x8_t inactive, int16x8_t a, int16x8_t b) {
+#ifdef POLYMORPHIC
+  return vqrdmladhxq(inactive, a, b);
+#else  /* POLYMORPHIC */
+  return vqrdmladhxq_s16(inactive, a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqrdmladhxq_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vqdmlad.v4i32(<4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 1, i32 0)
+// CHECK-NEXT:    ret <4 x i32> [[TMP0]]
+//
+int32x4_t test_vqrdmladhxq_s32(int32x4_t inactive, int32x4_t a, int32x4_t b) {
+#ifdef POLYMORPHIC
+  return vqrdmladhxq(inactive, a, b);
+#else  /* POLYMORPHIC */
+  return vqrdmladhxq_s32(inactive, a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqrdmlsdhq_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vqdmlad.v16i8(<16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 0, i32 1, i32 1)
+// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+int8x16_t test_vqrdmlsdhq_s8(int8x16_t inactive, int8x16_t a, int8x16_t b) {
+#ifdef POLYMORPHIC
+  return vqrdmlsdhq(inactive, a, b);
+#else  /* POLYMORPHIC */
+  return vqrdmlsdhq_s8(inactive, a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqrdmlsdhq_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vqdmlad.v8i16(<8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 1, i32 1)
+// CHECK-NEXT:    ret <8 x i16> [[TMP0]]
+//
+int16x8_t test_vqrdmlsdhq_s16(int16x8_t inactive, int16x8_t a, int16x8_t b) {
+#ifdef POLYMORPHIC
+  return vqrdmlsdhq(inactive, a, b);
+#else  /* POLYMORPHIC */
+  return vqrdmlsdhq_s16(inactive, a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqrdmlsdhq_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vqdmlad.v4i32(<4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 1, i32 1)
+// CHECK-NEXT:    ret <4 x i32> [[TMP0]]
+//
+int32x4_t test_vqrdmlsdhq_s32(int32x4_t inactive, int32x4_t a, int32x4_t b) {
+#ifdef POLYMORPHIC
+  return vqrdmlsdhq(inactive, a, b);
+#else  /* POLYMORPHIC */
+  return vqrdmlsdhq_s32(inactive, a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqrdmlsdhxq_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vqdmlad.v16i8(<16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 1, i32 1, i32 1)
+// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+int8x16_t test_vqrdmlsdhxq_s8(int8x16_t inactive, int8x16_t a, int8x16_t b) {
+#ifdef POLYMORPHIC
+  return vqrdmlsdhxq(inactive, a, b);
+#else  /* POLYMORPHIC */
+  return vqrdmlsdhxq_s8(inactive, a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqrdmlsdhxq_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vqdmlad.v8i16(<8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 1, i32 1)
+// CHECK-NEXT:    ret <8 x i16> [[TMP0]]
+//
+int16x8_t test_vqrdmlsdhxq_s16(int16x8_t inactive, int16x8_t a, int16x8_t b) {
+#ifdef POLYMORPHIC
+  return vqrdmlsdhxq(inactive, a, b);
+#else  /* POLYMORPHIC */
+  return vqrdmlsdhxq_s16(inactive, a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqrdmlsdhxq_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vqdmlad.v4i32(<4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 1, i32 1)
+// CHECK-NEXT:    ret <4 x i32> [[TMP0]]
+//
+int32x4_t test_vqrdmlsdhxq_s32(int32x4_t inactive, int32x4_t a, int32x4_t b) {
+#ifdef POLYMORPHIC
+  return vqrdmlsdhxq(inactive, a, b);
+#else  /* POLYMORPHIC */
+  return vqrdmlsdhxq_s32(inactive, a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqdmladhq_m_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vqdmlad.predicated.v16i8.v16i1(<16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 0, i32 0, i32 0, <16 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <16 x i8> [[TMP2]]
+//
+int8x16_t test_vqdmladhq_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p) {
+#ifdef POLYMORPHIC
+  return vqdmladhq_m(inactive, a, b, p);
+#else  /* POLYMORPHIC */
+  return vqdmladhq_m_s8(inactive, a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqdmladhq_m_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vqdmlad.predicated.v8i16.v8i1(<8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 0, i32 0, <8 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vqdmladhq_m_s16(int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p) {
+#ifdef POLYMORPHIC
+  return vqdmladhq_m(inactive, a, b, p);
+#else  /* POLYMORPHIC */
+  return vqdmladhq_m_s16(inactive, a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqdmladhq_m_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vqdmlad.predicated.v4i32.v4i1(<4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 0, i32 0, <4 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vqdmladhq_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p) {
+#ifdef POLYMORPHIC
+  return vqdmladhq_m(inactive, a, b, p);
+#else  /* POLYMORPHIC */
+  return vqdmladhq_m_s32(inactive, a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqdmladhxq_m_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vqdmlad.predicated.v16i8.v16i1(<16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 1, i32 0, i32 0, <16 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <16 x i8> [[TMP2]]
+//
+int8x16_t test_vqdmladhxq_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p) {
+#ifdef POLYMORPHIC
+  return vqdmladhxq_m(inactive, a, b, p);
+#else  /* POLYMORPHIC */
+  return vqdmladhxq_m_s8(inactive, a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqdmladhxq_m_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vqdmlad.predicated.v8i16.v8i1(<8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 0, i32 0, <8 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vqdmladhxq_m_s16(int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p) {
+#ifdef POLYMORPHIC
+  return vqdmladhxq_m(inactive, a, b, p);
+#else  /* POLYMORPHIC */
+  return vqdmladhxq_m_s16(inactive, a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqdmladhxq_m_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vqdmlad.predicated.v4i32.v4i1(<4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 0, i32 0, <4 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vqdmladhxq_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p) {
+#ifdef POLYMORPHIC
+  return vqdmladhxq_m(inactive, a, b, p);
+#else  /* POLYMORPHIC */
+  return vqdmladhxq_m_s32(inactive, a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqdmlsdhq_m_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vqdmlad.predicated.v16i8.v16i1(<16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 0, i32 0, i32 1, <16 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <16 x i8> [[TMP2]]
+//
+int8x16_t test_vqdmlsdhq_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p) {
+#ifdef POLYMORPHIC
+  return vqdmlsdhq_m(inactive, a, b, p);
+#else  /* POLYMORPHIC */
+  return vqdmlsdhq_m_s8(inactive, a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqdmlsdhq_m_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vqdmlad.predicated.v8i16.v8i1(<8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 0, i32 1, <8 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vqdmlsdhq_m_s16(int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p) {
+#ifdef POLYMORPHIC
+  return vqdmlsdhq_m(inactive, a, b, p);
+#else  /* POLYMORPHIC */
+  return vqdmlsdhq_m_s16(inactive, a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqdmlsdhq_m_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vqdmlad.predicated.v4i32.v4i1(<4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 0, i32 1, <4 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vqdmlsdhq_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p) {
+#ifdef POLYMORPHIC
+  return vqdmlsdhq_m(inactive, a, b, p);
+#else  /* POLYMORPHIC */
+  return vqdmlsdhq_m_s32(inactive, a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqdmlsdhxq_m_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vqdmlad.predicated.v16i8.v16i1(<16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 1, i32 0, i32 1, <16 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <16 x i8> [[TMP2]]
+//
+int8x16_t test_vqdmlsdhxq_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p) {
+#ifdef POLYMORPHIC
+  return vqdmlsdhxq_m(inactive, a, b, p);
+#else  /* POLYMORPHIC */
+  return vqdmlsdhxq_m_s8(inactive, a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqdmlsdhxq_m_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vqdmlad.predicated.v8i16.v8i1(<8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 0, i32 1, <8 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vqdmlsdhxq_m_s16(int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p) {
+#ifdef POLYMORPHIC
+  return vqdmlsdhxq_m(inactive, a, b, p);
+#else  /* POLYMORPHIC */
+  return vqdmlsdhxq_m_s16(inactive, a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqdmlsdhxq_m_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vqdmlad.predicated.v4i32.v4i1(<4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 0, i32 1, <4 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vqdmlsdhxq_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p) {
+#ifdef POLYMORPHIC
+  return vqdmlsdhxq_m(inactive, a, b, p);
+#else  /* POLYMORPHIC */
+  return vqdmlsdhxq_m_s32(inactive, a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqrdmladhq_m_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vqdmlad.predicated.v16i8.v16i1(<16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 0, i32 1, i32 0, <16 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <16 x i8> [[TMP2]]
+//
+int8x16_t test_vqrdmladhq_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p) {
+#ifdef POLYMORPHIC
+  return vqrdmladhq_m(inactive, a, b, p);
+#else  /* POLYMORPHIC */
+  return vqrdmladhq_m_s8(inactive, a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqrdmladhq_m_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vqdmlad.predicated.v8i16.v8i1(<8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 1, i32 0, <8 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vqrdmladhq_m_s16(int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p) {
+#ifdef POLYMORPHIC
+  return vqrdmladhq_m(inactive, a, b, p);
+#else  /* POLYMORPHIC */
+  return vqrdmladhq_m_s16(inactive, a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqrdmladhq_m_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vqdmlad.predicated.v4i32.v4i1(<4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 1, i32 0, <4 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vqrdmladhq_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p) {
+#ifdef POLYMORPHIC
+  return vqrdmladhq_m(inactive, a, b, p);
+#else  /* POLYMORPHIC */
+  return vqrdmladhq_m_s32(inactive, a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqrdmladhxq_m_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vqdmlad.predicated.v16i8.v16i1(<16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 1, i32 1, i32 0, <16 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <16 x i8> [[TMP2]]
+//
+int8x16_t test_vqrdmladhxq_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p) {
+#ifdef POLYMORPHIC
+  return vqrdmladhxq_m(inactive, a, b, p);
+#else  /* POLYMORPHIC */
+  return vqrdmladhxq_m_s8(inactive, a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqrdmladhxq_m_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vqdmlad.predicated.v8i16.v8i1(<8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 1, i32 0, <8 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vqrdmladhxq_m_s16(int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p) {
+#ifdef POLYMORPHIC
+  return vqrdmladhxq_m(inactive, a, b, p);
+#else  /* POLYMORPHIC */
+  return vqrdmladhxq_m_s16(inactive, a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqrdmladhxq_m_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vqdmlad.predicated.v4i32.v4i1(<4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 1, i32 0, <4 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vqrdmladhxq_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p) {
+#ifdef POLYMORPHIC
+  return vqrdmladhxq_m(inactive, a, b, p);
+#else  /* POLYMORPHIC */
+  return vqrdmladhxq_m_s32(inactive, a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqrdmlsdhq_m_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vqdmlad.predicated.v16i8.v16i1(<16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 0, i32 1, i32 1, <16 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <16 x i8> [[TMP2]]
+//
+int8x16_t test_vqrdmlsdhq_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p) {
+#ifdef POLYMORPHIC
+  return vqrdmlsdhq_m(inactive, a, b, p);
+#else  /* POLYMORPHIC */
+  return vqrdmlsdhq_m_s8(inactive, a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqrdmlsdhq_m_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vqdmlad.predicated.v8i16.v8i1(<8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 1, i32 1, <8 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vqrdmlsdhq_m_s16(int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p) {
+#ifdef POLYMORPHIC
+  return vqrdmlsdhq_m(inactive, a, b, p);
+#else  /* POLYMORPHIC */
+  return vqrdmlsdhq_m_s16(inactive, a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqrdmlsdhq_m_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vqdmlad.predicated.v4i32.v4i1(<4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 1, i32 1, <4 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vqrdmlsdhq_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p) {
+#ifdef POLYMORPHIC
+  return vqrdmlsdhq_m(inactive, a, b, p);
+#else  /* POLYMORPHIC */
+  return vqrdmlsdhq_m_s32(inactive, a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqrdmlsdhxq_m_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vqdmlad.predicated.v16i8.v16i1(<16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 1, i32 1, i32 1, <16 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <16 x i8> [[TMP2]]
+//
+int8x16_t test_vqrdmlsdhxq_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p) {
+#ifdef POLYMORPHIC
+  return vqrdmlsdhxq_m(inactive, a, b, p);
+#else  /* POLYMORPHIC */
+  return vqrdmlsdhxq_m_s8(inactive, a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqrdmlsdhxq_m_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vqdmlad.predicated.v8i16.v8i1(<8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 1, i32 1, <8 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vqrdmlsdhxq_m_s16(int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p) {
+#ifdef POLYMORPHIC
+  return vqrdmlsdhxq_m(inactive, a, b, p);
+#else  /* POLYMORPHIC */
+  return vqrdmlsdhxq_m_s16(inactive, a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqrdmlsdhxq_m_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vqdmlad.predicated.v4i32.v4i1(<4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 1, i32 1, <4 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vqrdmlsdhxq_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p) {
+#ifdef POLYMORPHIC
+  return vqrdmlsdhxq_m(inactive, a, b, p);
+#else  /* POLYMORPHIC */
+  return vqrdmlsdhxq_m_s32(inactive, a, b, p);
+#endif /* POLYMORPHIC */
+}

diff  --git a/llvm/include/llvm/IR/IntrinsicsARM.td b/llvm/include/llvm/IR/IntrinsicsARM.td
index 00251e52ca24..80ed0792a209 100644
--- a/llvm/include/llvm/IR/IntrinsicsARM.td
+++ b/llvm/include/llvm/IR/IntrinsicsARM.td
@@ -1268,6 +1268,11 @@ defm int_arm_mve_vqrdmlash: MVEPredicated<[llvm_anyvector_ty],
   [LLVMMatchType<0> /* mult op #1 */, LLVMMatchType<0> /* mult op #2 */,
    llvm_i32_ty /* addend (scalar) */]>;
 
+defm int_arm_mve_vqdmlad: MVEPredicated<[llvm_anyvector_ty],
+  [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
+   llvm_i32_ty /* exchange */, llvm_i32_ty /* round */,
+   llvm_i32_ty /* subtract */]>;
+
 // CDE (Custom Datapath Extension)
 
 def int_arm_cde_cx1: Intrinsic<

diff  --git a/llvm/lib/Target/ARM/ARMInstrMVE.td b/llvm/lib/Target/ARM/ARMInstrMVE.td
index d51807501d0d..58164e57ab81 100644
--- a/llvm/lib/Target/ARM/ARMInstrMVE.td
+++ b/llvm/lib/Target/ARM/ARMInstrMVE.td
@@ -4210,11 +4210,32 @@ class MVE_VQxDMLxDH<string iname, bit exch, bit round, bit subtract,
   let Inst{0} = round;
 }
 
+multiclass MVE_VQxDMLxDH_p<string iname, bit exch, bit round, bit subtract,
+                           MVEVectorVTInfo VTI> {
+  def "": MVE_VQxDMLxDH<iname, exch, round, subtract, VTI.Suffix, VTI.Size,
+                        !if(!eq(VTI.LaneBits, 32), ", at earlyclobber $Qd", "")>;
+  defvar Inst = !cast<Instruction>(NAME);
+  defvar ConstParams = (? (i32 exch), (i32 round), (i32 subtract));
+  defvar unpred_intr = int_arm_mve_vqdmlad;
+  defvar pred_intr = int_arm_mve_vqdmlad_predicated;
+
+  def : Pat<(VTI.Vec !con((unpred_intr (VTI.Vec MQPR:$a), (VTI.Vec MQPR:$b),
+                                       (VTI.Vec MQPR:$c)), ConstParams)),
+            (VTI.Vec (Inst (VTI.Vec MQPR:$a), (VTI.Vec MQPR:$b),
+                           (VTI.Vec MQPR:$c)))>;
+  def : Pat<(VTI.Vec !con((pred_intr (VTI.Vec MQPR:$a), (VTI.Vec MQPR:$b),
+                                     (VTI.Vec MQPR:$c)), ConstParams,
+                          (? (VTI.Pred VCCR:$pred)))),
+            (VTI.Vec (Inst (VTI.Vec MQPR:$a), (VTI.Vec MQPR:$b),
+                           (VTI.Vec MQPR:$c),
+                           ARMVCCThen, (VTI.Pred VCCR:$pred)))>;
+}
+
 multiclass MVE_VQxDMLxDH_multi<string iname, bit exch,
                                bit round, bit subtract> {
-  def s8  : MVE_VQxDMLxDH<iname, exch, round, subtract, "s8",  0b00>;
-  def s16 : MVE_VQxDMLxDH<iname, exch, round, subtract, "s16", 0b01>;
-  def s32 : MVE_VQxDMLxDH<iname, exch, round, subtract, "s32", 0b10, ", at earlyclobber $Qd">;
+  defm s8  : MVE_VQxDMLxDH_p<iname, exch, round, subtract, MVE_v16s8>;
+  defm s16 : MVE_VQxDMLxDH_p<iname, exch, round, subtract, MVE_v8s16>;
+  defm s32 : MVE_VQxDMLxDH_p<iname, exch, round, subtract, MVE_v4s32>;
 }
 
 defm MVE_VQDMLADH   : MVE_VQxDMLxDH_multi<"vqdmladh",   0b0, 0b0, 0b0>;

diff  --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vqdmlad.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vqdmlad.ll
new file mode 100644
index 000000000000..0396f96457f4
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vqdmlad.ll
@@ -0,0 +1,589 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqdmladhq_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vqdmladhq_s8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmladh.s8 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <16 x i8> @llvm.arm.mve.vqdmlad.v16i8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i32 0, i32 0, i32 0)
+  ret <16 x i8> %0
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqdmladhq_s16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vqdmladhq_s16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmladh.s16 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <8 x i16> @llvm.arm.mve.vqdmlad.v8i16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i32 0, i32 0, i32 0)
+  ret <8 x i16> %0
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vqdmladhq_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vqdmladhq_s32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmladh.s32 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <4 x i32> @llvm.arm.mve.vqdmlad.v4i32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 0, i32 0, i32 0)
+  ret <4 x i32> %0
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqdmladhxq_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vqdmladhxq_s8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmladhx.s8 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <16 x i8> @llvm.arm.mve.vqdmlad.v16i8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i32 1, i32 0, i32 0)
+  ret <16 x i8> %0
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqdmladhxq_s16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vqdmladhxq_s16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmladhx.s16 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <8 x i16> @llvm.arm.mve.vqdmlad.v8i16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i32 1, i32 0, i32 0)
+  ret <8 x i16> %0
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vqdmladhxq_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vqdmladhxq_s32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmladhx.s32 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <4 x i32> @llvm.arm.mve.vqdmlad.v4i32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 1, i32 0, i32 0)
+  ret <4 x i32> %0
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqdmlsdhq_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vqdmlsdhq_s8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmlsdh.s8 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <16 x i8> @llvm.arm.mve.vqdmlad.v16i8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i32 0, i32 0, i32 1)
+  ret <16 x i8> %0
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqdmlsdhq_s16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vqdmlsdhq_s16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmlsdh.s16 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <8 x i16> @llvm.arm.mve.vqdmlad.v8i16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i32 0, i32 0, i32 1)
+  ret <8 x i16> %0
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vqdmlsdhq_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vqdmlsdhq_s32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmlsdh.s32 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <4 x i32> @llvm.arm.mve.vqdmlad.v4i32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 0, i32 0, i32 1)
+  ret <4 x i32> %0
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqdmlsdhxq_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vqdmlsdhxq_s8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmlsdhx.s8 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <16 x i8> @llvm.arm.mve.vqdmlad.v16i8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i32 1, i32 0, i32 1)
+  ret <16 x i8> %0
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqdmlsdhxq_s16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vqdmlsdhxq_s16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmlsdhx.s16 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <8 x i16> @llvm.arm.mve.vqdmlad.v8i16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i32 1, i32 0, i32 1)
+  ret <8 x i16> %0
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vqdmlsdhxq_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vqdmlsdhxq_s32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqdmlsdhx.s32 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <4 x i32> @llvm.arm.mve.vqdmlad.v4i32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 1, i32 0, i32 1)
+  ret <4 x i32> %0
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqrdmladhq_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vqrdmladhq_s8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqrdmladh.s8 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <16 x i8> @llvm.arm.mve.vqdmlad.v16i8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i32 0, i32 1, i32 0)
+  ret <16 x i8> %0
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqrdmladhq_s16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vqrdmladhq_s16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqrdmladh.s16 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <8 x i16> @llvm.arm.mve.vqdmlad.v8i16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i32 0, i32 1, i32 0)
+  ret <8 x i16> %0
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vqrdmladhq_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vqrdmladhq_s32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqrdmladh.s32 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <4 x i32> @llvm.arm.mve.vqdmlad.v4i32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 0, i32 1, i32 0)
+  ret <4 x i32> %0
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqrdmladhxq_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vqrdmladhxq_s8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqrdmladhx.s8 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <16 x i8> @llvm.arm.mve.vqdmlad.v16i8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i32 1, i32 1, i32 0)
+  ret <16 x i8> %0
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqrdmladhxq_s16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vqrdmladhxq_s16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqrdmladhx.s16 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <8 x i16> @llvm.arm.mve.vqdmlad.v8i16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i32 1, i32 1, i32 0)
+  ret <8 x i16> %0
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vqrdmladhxq_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vqrdmladhxq_s32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqrdmladhx.s32 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <4 x i32> @llvm.arm.mve.vqdmlad.v4i32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 1, i32 1, i32 0)
+  ret <4 x i32> %0
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqrdmlsdhq_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vqrdmlsdhq_s8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqrdmlsdh.s8 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <16 x i8> @llvm.arm.mve.vqdmlad.v16i8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i32 0, i32 1, i32 1)
+  ret <16 x i8> %0
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqrdmlsdhq_s16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vqrdmlsdhq_s16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqrdmlsdh.s16 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <8 x i16> @llvm.arm.mve.vqdmlad.v8i16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i32 0, i32 1, i32 1)
+  ret <8 x i16> %0
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vqrdmlsdhq_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vqrdmlsdhq_s32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqrdmlsdh.s32 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <4 x i32> @llvm.arm.mve.vqdmlad.v4i32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 0, i32 1, i32 1)
+  ret <4 x i32> %0
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqrdmlsdhxq_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vqrdmlsdhxq_s8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqrdmlsdhx.s8 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <16 x i8> @llvm.arm.mve.vqdmlad.v16i8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i32 1, i32 1, i32 1)
+  ret <16 x i8> %0
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqrdmlsdhxq_s16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vqrdmlsdhxq_s16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqrdmlsdhx.s16 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <8 x i16> @llvm.arm.mve.vqdmlad.v8i16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i32 1, i32 1, i32 1)
+  ret <8 x i16> %0
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vqrdmlsdhxq_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vqrdmlsdhxq_s32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vqrdmlsdhx.s32 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <4 x i32> @llvm.arm.mve.vqdmlad.v4i32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 1, i32 1, i32 1)
+  ret <4 x i32> %0
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqdmladhq_m_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqdmladhq_m_s8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vqdmladht.s8 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
+  %2 = tail call <16 x i8> @llvm.arm.mve.vqdmlad.predicated.v16i8.v16i1(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i32 0, i32 0, i32 0, <16 x i1> %1)
+  ret <16 x i8> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqdmladhq_m_s16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqdmladhq_m_s16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vqdmladht.s16 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = tail call <8 x i16> @llvm.arm.mve.vqdmlad.predicated.v8i16.v8i1(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i32 0, i32 0, i32 0, <8 x i1> %1)
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vqdmladhq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqdmladhq_m_s32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vqdmladht.s32 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = tail call <4 x i32> @llvm.arm.mve.vqdmlad.predicated.v4i32.v4i1(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 0, i32 0, i32 0, <4 x i1> %1)
+  ret <4 x i32> %2
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqdmladhxq_m_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqdmladhxq_m_s8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vqdmladhxt.s8 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
+  %2 = tail call <16 x i8> @llvm.arm.mve.vqdmlad.predicated.v16i8.v16i1(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i32 1, i32 0, i32 0, <16 x i1> %1)
+  ret <16 x i8> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqdmladhxq_m_s16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqdmladhxq_m_s16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vqdmladhxt.s16 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = tail call <8 x i16> @llvm.arm.mve.vqdmlad.predicated.v8i16.v8i1(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i32 1, i32 0, i32 0, <8 x i1> %1)
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vqdmladhxq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqdmladhxq_m_s32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vqdmladhxt.s32 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = tail call <4 x i32> @llvm.arm.mve.vqdmlad.predicated.v4i32.v4i1(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 1, i32 0, i32 0, <4 x i1> %1)
+  ret <4 x i32> %2
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqdmlsdhq_m_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqdmlsdhq_m_s8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vqdmlsdht.s8 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
+  %2 = tail call <16 x i8> @llvm.arm.mve.vqdmlad.predicated.v16i8.v16i1(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i32 0, i32 0, i32 1, <16 x i1> %1)
+  ret <16 x i8> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqdmlsdhq_m_s16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqdmlsdhq_m_s16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vqdmlsdht.s16 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = tail call <8 x i16> @llvm.arm.mve.vqdmlad.predicated.v8i16.v8i1(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i32 0, i32 0, i32 1, <8 x i1> %1)
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vqdmlsdhq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqdmlsdhq_m_s32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vqdmlsdht.s32 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = tail call <4 x i32> @llvm.arm.mve.vqdmlad.predicated.v4i32.v4i1(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 0, i32 0, i32 1, <4 x i1> %1)
+  ret <4 x i32> %2
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqdmlsdhxq_m_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqdmlsdhxq_m_s8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vqdmlsdhxt.s8 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
+  %2 = tail call <16 x i8> @llvm.arm.mve.vqdmlad.predicated.v16i8.v16i1(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i32 1, i32 0, i32 1, <16 x i1> %1)
+  ret <16 x i8> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqdmlsdhxq_m_s16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqdmlsdhxq_m_s16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vqdmlsdhxt.s16 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = tail call <8 x i16> @llvm.arm.mve.vqdmlad.predicated.v8i16.v8i1(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i32 1, i32 0, i32 1, <8 x i1> %1)
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vqdmlsdhxq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqdmlsdhxq_m_s32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vqdmlsdhxt.s32 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = tail call <4 x i32> @llvm.arm.mve.vqdmlad.predicated.v4i32.v4i1(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 1, i32 0, i32 1, <4 x i1> %1)
+  ret <4 x i32> %2
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqrdmladhq_m_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqrdmladhq_m_s8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vqrdmladht.s8 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
+  %2 = tail call <16 x i8> @llvm.arm.mve.vqdmlad.predicated.v16i8.v16i1(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i32 0, i32 1, i32 0, <16 x i1> %1)
+  ret <16 x i8> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqrdmladhq_m_s16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqrdmladhq_m_s16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vqrdmladht.s16 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = tail call <8 x i16> @llvm.arm.mve.vqdmlad.predicated.v8i16.v8i1(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i32 0, i32 1, i32 0, <8 x i1> %1)
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vqrdmladhq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqrdmladhq_m_s32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vqrdmladht.s32 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = tail call <4 x i32> @llvm.arm.mve.vqdmlad.predicated.v4i32.v4i1(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 0, i32 1, i32 0, <4 x i1> %1)
+  ret <4 x i32> %2
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqrdmladhxq_m_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqrdmladhxq_m_s8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vqrdmladhxt.s8 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
+  %2 = tail call <16 x i8> @llvm.arm.mve.vqdmlad.predicated.v16i8.v16i1(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i32 1, i32 1, i32 0, <16 x i1> %1)
+  ret <16 x i8> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqrdmladhxq_m_s16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqrdmladhxq_m_s16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vqrdmladhxt.s16 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = tail call <8 x i16> @llvm.arm.mve.vqdmlad.predicated.v8i16.v8i1(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i32 1, i32 1, i32 0, <8 x i1> %1)
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vqrdmladhxq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqrdmladhxq_m_s32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vqrdmladhxt.s32 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = tail call <4 x i32> @llvm.arm.mve.vqdmlad.predicated.v4i32.v4i1(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 1, i32 1, i32 0, <4 x i1> %1)
+  ret <4 x i32> %2
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqrdmlsdhq_m_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqrdmlsdhq_m_s8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vqrdmlsdht.s8 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
+  %2 = tail call <16 x i8> @llvm.arm.mve.vqdmlad.predicated.v16i8.v16i1(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i32 0, i32 1, i32 1, <16 x i1> %1)
+  ret <16 x i8> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqrdmlsdhq_m_s16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqrdmlsdhq_m_s16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vqrdmlsdht.s16 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = tail call <8 x i16> @llvm.arm.mve.vqdmlad.predicated.v8i16.v8i1(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i32 0, i32 1, i32 1, <8 x i1> %1)
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vqrdmlsdhq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqrdmlsdhq_m_s32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vqrdmlsdht.s32 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = tail call <4 x i32> @llvm.arm.mve.vqdmlad.predicated.v4i32.v4i1(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 0, i32 1, i32 1, <4 x i1> %1)
+  ret <4 x i32> %2
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqrdmlsdhxq_m_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqrdmlsdhxq_m_s8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vqrdmlsdhxt.s8 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
+  %2 = tail call <16 x i8> @llvm.arm.mve.vqdmlad.predicated.v16i8.v16i1(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i32 1, i32 1, i32 1, <16 x i1> %1)
+  ret <16 x i8> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqrdmlsdhxq_m_s16(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqrdmlsdhxq_m_s16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vqrdmlsdhxt.s16 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = tail call <8 x i16> @llvm.arm.mve.vqdmlad.predicated.v8i16.v8i1(<8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, i32 1, i32 1, i32 1, <8 x i1> %1)
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vqrdmlsdhxq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqrdmlsdhxq_m_s32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vqrdmlsdhxt.s32 q0, q1, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = tail call <4 x i32> @llvm.arm.mve.vqdmlad.predicated.v4i32.v4i1(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 1, i32 1, i32 1, <4 x i1> %1)
+  ret <4 x i32> %2
+}
+
+declare <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32)
+declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32)
+declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32)
+
+declare <16 x i8> @llvm.arm.mve.vqdmlad.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, i32, i32, i32)
+declare <8 x i16> @llvm.arm.mve.vqdmlad.v8i16(<8 x i16>, <8 x i16>, <8 x i16>, i32, i32, i32)
+declare <4 x i32> @llvm.arm.mve.vqdmlad.v4i32(<4 x i32>, <4 x i32>, <4 x i32>, i32, i32, i32)
+declare <16 x i8> @llvm.arm.mve.vqdmlad.predicated.v16i8.v16i1(<16 x i8>, <16 x i8>, <16 x i8>, i32, i32, i32, <16 x i1>)
+declare <8 x i16> @llvm.arm.mve.vqdmlad.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, <8 x i16>, i32, i32, i32, <8 x i1>)
+declare <4 x i32> @llvm.arm.mve.vqdmlad.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i32>, i32, i32, i32, <4 x i1>)


        


More information about the cfe-commits mailing list