[clang] 1a8cbfa - [ARM, MVE] Add ACLE intrinsics for VCVT[ANPM] family.

Simon Tatham via cfe-commits cfe-commits at lists.llvm.org
Mon Mar 2 02:33:57 PST 2020


Author: Simon Tatham
Date: 2020-03-02T10:33:30Z
New Revision: 1a8cbfa514ff83ac62c20deec0d9ea2c6606bbdf

URL: https://github.com/llvm/llvm-project/commit/1a8cbfa514ff83ac62c20deec0d9ea2c6606bbdf
DIFF: https://github.com/llvm/llvm-project/commit/1a8cbfa514ff83ac62c20deec0d9ea2c6606bbdf.diff

LOG: [ARM,MVE] Add ACLE intrinsics for VCVT[ANPM] family.

Summary:
These instructions convert a vector of floats to a vector of integers
of the same size, with assorted non-default rounding modes.
Implemented in IR as target-specific intrinsics, because as far as I
can see there are no matches for that functionality in the standard IR
intrinsics list.

Reviewers: MarkMurrayARM, dmgreen, miyuki, ostannard

Reviewed By: dmgreen

Subscribers: kristof.beyls, hiraditya, cfe-commits, llvm-commits

Tags: #clang, #llvm

Differential Revision: https://reviews.llvm.org/D75255

Added: 
    clang/test/CodeGen/arm-mve-intrinsics/vcvt_anpm.c
    llvm/test/CodeGen/Thumb2/mve-intrinsics/vcvt_anpm.ll

Modified: 
    clang/include/clang/Basic/arm_mve.td
    llvm/include/llvm/IR/IntrinsicsARM.td
    llvm/lib/Target/ARM/ARMInstrMVE.td

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/arm_mve.td b/clang/include/clang/Basic/arm_mve.td
index dfdb101d587f..c1cc10b09dc6 100644
--- a/clang/include/clang/Basic/arm_mve.td
+++ b/clang/include/clang/Basic/arm_mve.td
@@ -482,11 +482,25 @@ multiclass float_int_conversions<Type FScalar, Type IScalar, IRBuilderBase ftoi,
     let pnt = PNT_None in {
       def : Intrinsic<IVector, (args FVector:$a), (ftoi $a, IVector)>,
             NameOverride<"vcvtq_" # IScalar>;
+
+      foreach suffix = ["a","n","p","m"] in
+        def : Intrinsic<IVector, (args FVector:$a),
+                        (IRInt<"vcvt"#suffix, [IVector, FVector]>
+                            (unsignedflag IScalar), $a)>,
+              NameOverride<"vcvt"#suffix#"q_" # IScalar>;
     }
     defm vcvtq: IntrinsicMX<IVector, (args FVector:$a, Predicate:$pred),
         (IRInt<"vcvt_fp_int_predicated", [IVector, FVector, Predicate]>
             $a, (unsignedflag IScalar), $pred, $inactive),
         1, "_" # IScalar, PNT_2Type, PNT_None>;
+
+    foreach suffix = ["a","n","p","m"] in {
+      defm "vcvt"#suffix#"q" : IntrinsicMX<
+          IVector, (args FVector:$a, Predicate:$pred),
+          (IRInt<"vcvt"#suffix#"_predicated", [IVector, FVector, Predicate]>
+              (unsignedflag IScalar), $inactive, $a, $pred),
+          1, "_" # IScalar, PNT_2Type, PNT_None>;
+    }
   }
 }
 

diff  --git a/clang/test/CodeGen/arm-mve-intrinsics/vcvt_anpm.c b/clang/test/CodeGen/arm-mve-intrinsics/vcvt_anpm.c
new file mode 100644
index 000000000000..e5dbd4c8f68b
--- /dev/null
+++ b/clang/test/CodeGen/arm-mve-intrinsics/vcvt_anpm.c
@@ -0,0 +1,614 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
+// RUN: %clang_cc1 -DPOLYMORPHIC -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
+
+#include <arm_mve.h>
+
+// CHECK-LABEL: @test_vcvtaq_s16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvta.v8i16.v8f16(i32 0, <8 x half> [[A:%.*]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP0]]
+//
+int16x8_t test_vcvtaq_s16_f16(float16x8_t a)
+{
+    return vcvtaq_s16_f16(a);
+}
+
+// CHECK-LABEL: @test_vcvtaq_s32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvta.v4i32.v4f32(i32 0, <4 x float> [[A:%.*]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP0]]
+//
+int32x4_t test_vcvtaq_s32_f32(float32x4_t a)
+{
+    return vcvtaq_s32_f32(a);
+}
+
+// CHECK-LABEL: @test_vcvtaq_u16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvta.v8i16.v8f16(i32 1, <8 x half> [[A:%.*]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP0]]
+//
+uint16x8_t test_vcvtaq_u16_f16(float16x8_t a)
+{
+    return vcvtaq_u16_f16(a);
+}
+
+// CHECK-LABEL: @test_vcvtaq_u32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvta.v4i32.v4f32(i32 1, <4 x float> [[A:%.*]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP0]]
+//
+uint32x4_t test_vcvtaq_u32_f32(float32x4_t a)
+{
+    return vcvtaq_u32_f32(a);
+}
+
+// CHECK-LABEL: @test_vcvtmq_s16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtm.v8i16.v8f16(i32 0, <8 x half> [[A:%.*]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP0]]
+//
+int16x8_t test_vcvtmq_s16_f16(float16x8_t a)
+{
+    return vcvtmq_s16_f16(a);
+}
+
+// CHECK-LABEL: @test_vcvtmq_s32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtm.v4i32.v4f32(i32 0, <4 x float> [[A:%.*]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP0]]
+//
+int32x4_t test_vcvtmq_s32_f32(float32x4_t a)
+{
+    return vcvtmq_s32_f32(a);
+}
+
+// CHECK-LABEL: @test_vcvtmq_u16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtm.v8i16.v8f16(i32 1, <8 x half> [[A:%.*]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP0]]
+//
+uint16x8_t test_vcvtmq_u16_f16(float16x8_t a)
+{
+    return vcvtmq_u16_f16(a);
+}
+
+// CHECK-LABEL: @test_vcvtmq_u32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtm.v4i32.v4f32(i32 1, <4 x float> [[A:%.*]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP0]]
+//
+uint32x4_t test_vcvtmq_u32_f32(float32x4_t a)
+{
+    return vcvtmq_u32_f32(a);
+}
+
+// CHECK-LABEL: @test_vcvtnq_s16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtn.v8i16.v8f16(i32 0, <8 x half> [[A:%.*]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP0]]
+//
+int16x8_t test_vcvtnq_s16_f16(float16x8_t a)
+{
+    return vcvtnq_s16_f16(a);
+}
+
+// CHECK-LABEL: @test_vcvtnq_s32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtn.v4i32.v4f32(i32 0, <4 x float> [[A:%.*]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP0]]
+//
+int32x4_t test_vcvtnq_s32_f32(float32x4_t a)
+{
+    return vcvtnq_s32_f32(a);
+}
+
+// CHECK-LABEL: @test_vcvtnq_u16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtn.v8i16.v8f16(i32 1, <8 x half> [[A:%.*]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP0]]
+//
+uint16x8_t test_vcvtnq_u16_f16(float16x8_t a)
+{
+    return vcvtnq_u16_f16(a);
+}
+
+// CHECK-LABEL: @test_vcvtnq_u32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtn.v4i32.v4f32(i32 1, <4 x float> [[A:%.*]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP0]]
+//
+uint32x4_t test_vcvtnq_u32_f32(float32x4_t a)
+{
+    return vcvtnq_u32_f32(a);
+}
+
+// CHECK-LABEL: @test_vcvtpq_s16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtp.v8i16.v8f16(i32 0, <8 x half> [[A:%.*]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP0]]
+//
+int16x8_t test_vcvtpq_s16_f16(float16x8_t a)
+{
+    return vcvtpq_s16_f16(a);
+}
+
+// CHECK-LABEL: @test_vcvtpq_s32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtp.v4i32.v4f32(i32 0, <4 x float> [[A:%.*]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP0]]
+//
+int32x4_t test_vcvtpq_s32_f32(float32x4_t a)
+{
+    return vcvtpq_s32_f32(a);
+}
+
+// CHECK-LABEL: @test_vcvtpq_u16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtp.v8i16.v8f16(i32 1, <8 x half> [[A:%.*]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP0]]
+//
+uint16x8_t test_vcvtpq_u16_f16(float16x8_t a)
+{
+    return vcvtpq_u16_f16(a);
+}
+
+// CHECK-LABEL: @test_vcvtpq_u32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtp.v4i32.v4f32(i32 1, <4 x float> [[A:%.*]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP0]]
+//
+uint32x4_t test_vcvtpq_u32_f32(float32x4_t a)
+{
+    return vcvtpq_u32_f32(a);
+}
+
+// CHECK-LABEL: @test_vcvtaq_m_s16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvta.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vcvtaq_m_s16_f16(int16x8_t inactive, float16x8_t a, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+    return vcvtaq_m(inactive, a, p);
+#else /* POLYMORPHIC */
+    return vcvtaq_m_s16_f16(inactive, a, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vcvtaq_m_s32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvta.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vcvtaq_m_s32_f32(int32x4_t inactive, float32x4_t a, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+    return vcvtaq_m(inactive, a, p);
+#else /* POLYMORPHIC */
+    return vcvtaq_m_s32_f32(inactive, a, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vcvtaq_m_u16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvta.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+uint16x8_t test_vcvtaq_m_u16_f16(uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+    return vcvtaq_m(inactive, a, p);
+#else /* POLYMORPHIC */
+    return vcvtaq_m_u16_f16(inactive, a, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vcvtaq_m_u32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvta.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+uint32x4_t test_vcvtaq_m_u32_f32(uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+    return vcvtaq_m(inactive, a, p);
+#else /* POLYMORPHIC */
+    return vcvtaq_m_u32_f32(inactive, a, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vcvtmq_m_s16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtm.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vcvtmq_m_s16_f16(int16x8_t inactive, float16x8_t a, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+    return vcvtmq_m(inactive, a, p);
+#else /* POLYMORPHIC */
+    return vcvtmq_m_s16_f16(inactive, a, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vcvtmq_m_s32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtm.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vcvtmq_m_s32_f32(int32x4_t inactive, float32x4_t a, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+    return vcvtmq_m(inactive, a, p);
+#else /* POLYMORPHIC */
+    return vcvtmq_m_s32_f32(inactive, a, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vcvtmq_m_u16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtm.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+uint16x8_t test_vcvtmq_m_u16_f16(uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+    return vcvtmq_m(inactive, a, p);
+#else /* POLYMORPHIC */
+    return vcvtmq_m_u16_f16(inactive, a, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vcvtmq_m_u32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtm.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+uint32x4_t test_vcvtmq_m_u32_f32(uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+    return vcvtmq_m(inactive, a, p);
+#else /* POLYMORPHIC */
+    return vcvtmq_m_u32_f32(inactive, a, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vcvtnq_m_s16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtn.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vcvtnq_m_s16_f16(int16x8_t inactive, float16x8_t a, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+    return vcvtnq_m(inactive, a, p);
+#else /* POLYMORPHIC */
+    return vcvtnq_m_s16_f16(inactive, a, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vcvtnq_m_s32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtn.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vcvtnq_m_s32_f32(int32x4_t inactive, float32x4_t a, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+    return vcvtnq_m(inactive, a, p);
+#else /* POLYMORPHIC */
+    return vcvtnq_m_s32_f32(inactive, a, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vcvtnq_m_u16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtn.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+uint16x8_t test_vcvtnq_m_u16_f16(uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+    return vcvtnq_m(inactive, a, p);
+#else /* POLYMORPHIC */
+    return vcvtnq_m_u16_f16(inactive, a, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vcvtnq_m_u32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtn.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+uint32x4_t test_vcvtnq_m_u32_f32(uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+    return vcvtnq_m(inactive, a, p);
+#else /* POLYMORPHIC */
+    return vcvtnq_m_u32_f32(inactive, a, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vcvtpq_m_s16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtp.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vcvtpq_m_s16_f16(int16x8_t inactive, float16x8_t a, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+    return vcvtpq_m(inactive, a, p);
+#else /* POLYMORPHIC */
+    return vcvtpq_m_s16_f16(inactive, a, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vcvtpq_m_s32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtp.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vcvtpq_m_s32_f32(int32x4_t inactive, float32x4_t a, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+    return vcvtpq_m(inactive, a, p);
+#else /* POLYMORPHIC */
+    return vcvtpq_m_s32_f32(inactive, a, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vcvtpq_m_u16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtp.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+uint16x8_t test_vcvtpq_m_u16_f16(uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+    return vcvtpq_m(inactive, a, p);
+#else /* POLYMORPHIC */
+    return vcvtpq_m_u16_f16(inactive, a, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vcvtpq_m_u32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtp.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+uint32x4_t test_vcvtpq_m_u32_f32(uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+    return vcvtpq_m(inactive, a, p);
+#else /* POLYMORPHIC */
+    return vcvtpq_m_u32_f32(inactive, a, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vcvtaq_x_s16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvta.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> undef, <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vcvtaq_x_s16_f16(float16x8_t a, mve_pred16_t p)
+{
+    return vcvtaq_x_s16_f16(a, p);
+}
+
+// CHECK-LABEL: @test_vcvtaq_x_s32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvta.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> undef, <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vcvtaq_x_s32_f32(float32x4_t a, mve_pred16_t p)
+{
+    return vcvtaq_x_s32_f32(a, p);
+}
+
+// CHECK-LABEL: @test_vcvtaq_x_u16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvta.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> undef, <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+uint16x8_t test_vcvtaq_x_u16_f16(float16x8_t a, mve_pred16_t p)
+{
+    return vcvtaq_x_u16_f16(a, p);
+}
+
+// CHECK-LABEL: @test_vcvtaq_x_u32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvta.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> undef, <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+uint32x4_t test_vcvtaq_x_u32_f32(float32x4_t a, mve_pred16_t p)
+{
+    return vcvtaq_x_u32_f32(a, p);
+}
+
+// CHECK-LABEL: @test_vcvtmq_x_s16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtm.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> undef, <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vcvtmq_x_s16_f16(float16x8_t a, mve_pred16_t p)
+{
+    return vcvtmq_x_s16_f16(a, p);
+}
+
+// CHECK-LABEL: @test_vcvtmq_x_s32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtm.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> undef, <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vcvtmq_x_s32_f32(float32x4_t a, mve_pred16_t p)
+{
+    return vcvtmq_x_s32_f32(a, p);
+}
+
+// CHECK-LABEL: @test_vcvtmq_x_u16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtm.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> undef, <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+uint16x8_t test_vcvtmq_x_u16_f16(float16x8_t a, mve_pred16_t p)
+{
+    return vcvtmq_x_u16_f16(a, p);
+}
+
+// CHECK-LABEL: @test_vcvtmq_x_u32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtm.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> undef, <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+uint32x4_t test_vcvtmq_x_u32_f32(float32x4_t a, mve_pred16_t p)
+{
+    return vcvtmq_x_u32_f32(a, p);
+}
+
+// CHECK-LABEL: @test_vcvtnq_x_s16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtn.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> undef, <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vcvtnq_x_s16_f16(float16x8_t a, mve_pred16_t p)
+{
+    return vcvtnq_x_s16_f16(a, p);
+}
+
+// CHECK-LABEL: @test_vcvtnq_x_s32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtn.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> undef, <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vcvtnq_x_s32_f32(float32x4_t a, mve_pred16_t p)
+{
+    return vcvtnq_x_s32_f32(a, p);
+}
+
+// CHECK-LABEL: @test_vcvtnq_x_u16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtn.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> undef, <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+uint16x8_t test_vcvtnq_x_u16_f16(float16x8_t a, mve_pred16_t p)
+{
+    return vcvtnq_x_u16_f16(a, p);
+}
+
+// CHECK-LABEL: @test_vcvtnq_x_u32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtn.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> undef, <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+uint32x4_t test_vcvtnq_x_u32_f32(float32x4_t a, mve_pred16_t p)
+{
+    return vcvtnq_x_u32_f32(a, p);
+}
+
+// CHECK-LABEL: @test_vcvtpq_x_s16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtp.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> undef, <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vcvtpq_x_s16_f16(float16x8_t a, mve_pred16_t p)
+{
+    return vcvtpq_x_s16_f16(a, p);
+}
+
+// CHECK-LABEL: @test_vcvtpq_x_s32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtp.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> undef, <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vcvtpq_x_s32_f32(float32x4_t a, mve_pred16_t p)
+{
+    return vcvtpq_x_s32_f32(a, p);
+}
+
+// CHECK-LABEL: @test_vcvtpq_x_u16_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtp.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> undef, <8 x half> [[A:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+uint16x8_t test_vcvtpq_x_u16_f16(float16x8_t a, mve_pred16_t p)
+{
+    return vcvtpq_x_u16_f16(a, p);
+}
+
+// CHECK-LABEL: @test_vcvtpq_x_u32_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtp.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> undef, <4 x float> [[A:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+uint32x4_t test_vcvtpq_x_u32_f32(float32x4_t a, mve_pred16_t p)
+{
+    return vcvtpq_x_u32_f32(a, p);
+}
+

diff  --git a/llvm/include/llvm/IR/IntrinsicsARM.td b/llvm/include/llvm/IR/IntrinsicsARM.td
index 64c1faaa68fc..945b67038bb2 100644
--- a/llvm/include/llvm/IR/IntrinsicsARM.td
+++ b/llvm/include/llvm/IR/IntrinsicsARM.td
@@ -1166,6 +1166,12 @@ def int_arm_mve_vcvt_fp_int_predicated: Intrinsic<
    llvm_anyvector_ty /* predicate */, LLVMMatchType<0> /* inactive */],
   [IntrNoMem]>;
 
+foreach suffix = ["a","n","p","m"] in {
+  defm "int_arm_mve_vcvt"#suffix: MVEMXPredicated<
+    [llvm_anyvector_ty /* output */], [llvm_i32_ty /* unsigned */],
+    [llvm_anyvector_ty /* input */], LLVMMatchType<0>, llvm_anyvector_ty>;
+}
+
 def int_arm_mve_vrintn: Intrinsic<
   [llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
 def int_arm_mve_vcls: Intrinsic<

diff  --git a/llvm/lib/Target/ARM/ARMInstrMVE.td b/llvm/lib/Target/ARM/ARMInstrMVE.td
index b4b3c68c227e..968387207d04 100644
--- a/llvm/lib/Target/ARM/ARMInstrMVE.td
+++ b/llvm/lib/Target/ARM/ARMInstrMVE.td
@@ -3659,21 +3659,42 @@ class MVE_VCVT_fp_int_anpm<string suffix, bits<2> size, bit op, string anpm,
   let validForTailPredication = 1;
 }
 
-multiclass MVE_VCVT_fp_int_anpm_multi<string suffix, bits<2> size, bit op,
-                                list<dag> pattern=[]> {
-  def a : MVE_VCVT_fp_int_anpm<suffix, size, op, "a", 0b00>;
-  def n : MVE_VCVT_fp_int_anpm<suffix, size, op, "n", 0b01>;
-  def p : MVE_VCVT_fp_int_anpm<suffix, size, op, "p", 0b10>;
-  def m : MVE_VCVT_fp_int_anpm<suffix, size, op, "m", 0b11>;
+multiclass MVE_VCVT_fp_int_anpm_inner<MVEVectorVTInfo Int, MVEVectorVTInfo Flt,
+                                      string anpm, bits<2> rm> {
+  def "": MVE_VCVT_fp_int_anpm<Int.Suffix # "." # Flt.Suffix, Int.Size,
+                               Int.Unsigned, anpm, rm>;
+
+  defvar Inst         = !cast<Instruction>(NAME);
+  defvar IntrBaseName = "int_arm_mve_vcvt" # anpm;
+  defvar UnpredIntr   = !cast<Intrinsic>(IntrBaseName);
+  defvar PredIntr     = !cast<Intrinsic>(IntrBaseName # "_predicated");
+
+  let Predicates = [HasMVEFloat] in {
+    def : Pat<(Int.Vec (UnpredIntr (i32 Int.Unsigned), (Flt.Vec MQPR:$in))),
+              (Int.Vec (Inst (Flt.Vec MQPR:$in)))>;
+
+    def : Pat<(Int.Vec (PredIntr (i32 Int.Unsigned), (Int.Vec MQPR:$inactive),
+                                 (Flt.Vec MQPR:$in), (Flt.Pred VCCR:$pred))),
+              (Int.Vec (Inst (Flt.Vec MQPR:$in), ARMVCCThen,
+                             (Flt.Pred VCCR:$pred), (Int.Vec MQPR:$inactive)))>;
+  }
+}
+
+multiclass MVE_VCVT_fp_int_anpm_outer<MVEVectorVTInfo Int,
+                                      MVEVectorVTInfo Flt> {
+  defm a : MVE_VCVT_fp_int_anpm_inner<Int, Flt, "a", 0b00>;
+  defm n : MVE_VCVT_fp_int_anpm_inner<Int, Flt, "n", 0b01>;
+  defm p : MVE_VCVT_fp_int_anpm_inner<Int, Flt, "p", 0b10>;
+  defm m : MVE_VCVT_fp_int_anpm_inner<Int, Flt, "m", 0b11>;
 }
 
 // This defines instructions such as MVE_VCVTu16f16a, with an explicit
 // rounding-mode suffix on the mnemonic. The class below will define
 // the bare MVE_VCVTu16f16 (with implied rounding toward zero).
-defm MVE_VCVTs16f16 : MVE_VCVT_fp_int_anpm_multi<"s16.f16", 0b01, 0b0>;
-defm MVE_VCVTu16f16 : MVE_VCVT_fp_int_anpm_multi<"u16.f16", 0b01, 0b1>;
-defm MVE_VCVTs32f32 : MVE_VCVT_fp_int_anpm_multi<"s32.f32", 0b10, 0b0>;
-defm MVE_VCVTu32f32 : MVE_VCVT_fp_int_anpm_multi<"u32.f32", 0b10, 0b1>;
+defm MVE_VCVTs16f16 : MVE_VCVT_fp_int_anpm_outer<MVE_v8s16, MVE_v8f16>;
+defm MVE_VCVTu16f16 : MVE_VCVT_fp_int_anpm_outer<MVE_v8u16, MVE_v8f16>;
+defm MVE_VCVTs32f32 : MVE_VCVT_fp_int_anpm_outer<MVE_v4s32, MVE_v4f32>;
+defm MVE_VCVTu32f32 : MVE_VCVT_fp_int_anpm_outer<MVE_v4u32, MVE_v4f32>;
 
 class MVE_VCVT_fp_int<string suffix, bits<2> size, bit toint, bit unsigned,
                       list<dag> pattern=[]>

diff  --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vcvt_anpm.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vcvt_anpm.ll
new file mode 100644
index 000000000000..ab0b11716c3c
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vcvt_anpm.ll
@@ -0,0 +1,631 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s
+
+define arm_aapcs_vfpcc <8 x i16> @test_vcvtaq_s16_f16(<8 x half> %a) {
+; CHECK-LABEL: test_vcvtaq_s16_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcvta.s16.f16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <8 x i16> @llvm.arm.mve.vcvta.v8i16.v8f16(i32 0, <8 x half> %a)
+  ret <8 x i16> %0
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vcvtaq_s32_f32(<4 x float> %a) {
+; CHECK-LABEL: test_vcvtaq_s32_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcvta.s32.f32 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <4 x i32> @llvm.arm.mve.vcvta.v4i32.v4f32(i32 0, <4 x float> %a)
+  ret <4 x i32> %0
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vcvtaq_u16_f16(<8 x half> %a) {
+; CHECK-LABEL: test_vcvtaq_u16_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcvta.u16.f16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <8 x i16> @llvm.arm.mve.vcvta.v8i16.v8f16(i32 1, <8 x half> %a)
+  ret <8 x i16> %0
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vcvtaq_u32_f32(<4 x float> %a) {
+; CHECK-LABEL: test_vcvtaq_u32_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcvta.u32.f32 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <4 x i32> @llvm.arm.mve.vcvta.v4i32.v4f32(i32 1, <4 x float> %a)
+  ret <4 x i32> %0
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vcvtmq_s16_f16(<8 x half> %a) {
+; CHECK-LABEL: test_vcvtmq_s16_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcvtm.s16.f16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <8 x i16> @llvm.arm.mve.vcvtm.v8i16.v8f16(i32 0, <8 x half> %a)
+  ret <8 x i16> %0
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vcvtmq_s32_f32(<4 x float> %a) {
+; CHECK-LABEL: test_vcvtmq_s32_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcvtm.s32.f32 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <4 x i32> @llvm.arm.mve.vcvtm.v4i32.v4f32(i32 0, <4 x float> %a)
+  ret <4 x i32> %0
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vcvtmq_u16_f16(<8 x half> %a) {
+; CHECK-LABEL: test_vcvtmq_u16_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcvtm.u16.f16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <8 x i16> @llvm.arm.mve.vcvtm.v8i16.v8f16(i32 1, <8 x half> %a)
+  ret <8 x i16> %0
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vcvtmq_u32_f32(<4 x float> %a) {
+; CHECK-LABEL: test_vcvtmq_u32_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcvtm.u32.f32 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <4 x i32> @llvm.arm.mve.vcvtm.v4i32.v4f32(i32 1, <4 x float> %a)
+  ret <4 x i32> %0
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vcvtnq_s16_f16(<8 x half> %a) {
+; CHECK-LABEL: test_vcvtnq_s16_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcvtn.s16.f16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <8 x i16> @llvm.arm.mve.vcvtn.v8i16.v8f16(i32 0, <8 x half> %a)
+  ret <8 x i16> %0
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vcvtnq_s32_f32(<4 x float> %a) {
+; CHECK-LABEL: test_vcvtnq_s32_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcvtn.s32.f32 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <4 x i32> @llvm.arm.mve.vcvtn.v4i32.v4f32(i32 0, <4 x float> %a)
+  ret <4 x i32> %0
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vcvtnq_u16_f16(<8 x half> %a) {
+; CHECK-LABEL: test_vcvtnq_u16_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcvtn.u16.f16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <8 x i16> @llvm.arm.mve.vcvtn.v8i16.v8f16(i32 1, <8 x half> %a)
+  ret <8 x i16> %0
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vcvtnq_u32_f32(<4 x float> %a) {
+; CHECK-LABEL: test_vcvtnq_u32_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcvtn.u32.f32 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <4 x i32> @llvm.arm.mve.vcvtn.v4i32.v4f32(i32 1, <4 x float> %a)
+  ret <4 x i32> %0
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vcvtpq_s16_f16(<8 x half> %a) {
+; CHECK-LABEL: test_vcvtpq_s16_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcvtp.s16.f16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <8 x i16> @llvm.arm.mve.vcvtp.v8i16.v8f16(i32 0, <8 x half> %a)
+  ret <8 x i16> %0
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vcvtpq_s32_f32(<4 x float> %a) {
+; CHECK-LABEL: test_vcvtpq_s32_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcvtp.s32.f32 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <4 x i32> @llvm.arm.mve.vcvtp.v4i32.v4f32(i32 0, <4 x float> %a)
+  ret <4 x i32> %0
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vcvtpq_u16_f16(<8 x half> %a) {
+; CHECK-LABEL: test_vcvtpq_u16_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcvtp.u16.f16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <8 x i16> @llvm.arm.mve.vcvtp.v8i16.v8f16(i32 1, <8 x half> %a)
+  ret <8 x i16> %0
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vcvtpq_u32_f32(<4 x float> %a) {
+; CHECK-LABEL: test_vcvtpq_u32_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcvtp.u32.f32 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = tail call <4 x i32> @llvm.arm.mve.vcvtp.v4i32.v4f32(i32 1, <4 x float> %a)
+  ret <4 x i32> %0
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vcvtaq_m_s16_f16(<8 x i16> %inactive, <8 x half> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtaq_m_s16_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtat.s16.f16 q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = tail call <8 x i16> @llvm.arm.mve.vcvta.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> %inactive, <8 x half> %a, <8 x i1> %1)
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vcvtaq_m_s32_f32(<4 x i32> %inactive, <4 x float> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtaq_m_s32_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtat.s32.f32 q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = tail call <4 x i32> @llvm.arm.mve.vcvta.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> %inactive, <4 x float> %a, <4 x i1> %1)
+  ret <4 x i32> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vcvtaq_m_u16_f16(<8 x i16> %inactive, <8 x half> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtaq_m_u16_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtat.u16.f16 q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = tail call <8 x i16> @llvm.arm.mve.vcvta.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> %inactive, <8 x half> %a, <8 x i1> %1)
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vcvtaq_m_u32_f32(<4 x i32> %inactive, <4 x float> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtaq_m_u32_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtat.u32.f32 q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = tail call <4 x i32> @llvm.arm.mve.vcvta.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> %inactive, <4 x float> %a, <4 x i1> %1)
+  ret <4 x i32> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vcvtmq_m_s16_f16(<8 x i16> %inactive, <8 x half> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtmq_m_s16_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtmt.s16.f16 q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = tail call <8 x i16> @llvm.arm.mve.vcvtm.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> %inactive, <8 x half> %a, <8 x i1> %1)
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vcvtmq_m_s32_f32(<4 x i32> %inactive, <4 x float> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtmq_m_s32_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtmt.s32.f32 q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = tail call <4 x i32> @llvm.arm.mve.vcvtm.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> %inactive, <4 x float> %a, <4 x i1> %1)
+  ret <4 x i32> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vcvtmq_m_u16_f16(<8 x i16> %inactive, <8 x half> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtmq_m_u16_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtmt.u16.f16 q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = tail call <8 x i16> @llvm.arm.mve.vcvtm.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> %inactive, <8 x half> %a, <8 x i1> %1)
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vcvtmq_m_u32_f32(<4 x i32> %inactive, <4 x float> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtmq_m_u32_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtmt.u32.f32 q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = tail call <4 x i32> @llvm.arm.mve.vcvtm.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> %inactive, <4 x float> %a, <4 x i1> %1)
+  ret <4 x i32> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vcvtnq_m_s16_f16(<8 x i16> %inactive, <8 x half> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtnq_m_s16_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtnt.s16.f16 q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = tail call <8 x i16> @llvm.arm.mve.vcvtn.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> %inactive, <8 x half> %a, <8 x i1> %1)
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vcvtnq_m_s32_f32(<4 x i32> %inactive, <4 x float> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtnq_m_s32_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtnt.s32.f32 q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = tail call <4 x i32> @llvm.arm.mve.vcvtn.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> %inactive, <4 x float> %a, <4 x i1> %1)
+  ret <4 x i32> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vcvtnq_m_u16_f16(<8 x i16> %inactive, <8 x half> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtnq_m_u16_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtnt.u16.f16 q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = tail call <8 x i16> @llvm.arm.mve.vcvtn.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> %inactive, <8 x half> %a, <8 x i1> %1)
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vcvtnq_m_u32_f32(<4 x i32> %inactive, <4 x float> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtnq_m_u32_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtnt.u32.f32 q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = tail call <4 x i32> @llvm.arm.mve.vcvtn.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> %inactive, <4 x float> %a, <4 x i1> %1)
+  ret <4 x i32> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vcvtpq_m_s16_f16(<8 x i16> %inactive, <8 x half> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtpq_m_s16_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtpt.s16.f16 q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = tail call <8 x i16> @llvm.arm.mve.vcvtp.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> %inactive, <8 x half> %a, <8 x i1> %1)
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vcvtpq_m_s32_f32(<4 x i32> %inactive, <4 x float> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtpq_m_s32_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtpt.s32.f32 q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = tail call <4 x i32> @llvm.arm.mve.vcvtp.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> %inactive, <4 x float> %a, <4 x i1> %1)
+  ret <4 x i32> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vcvtpq_m_u16_f16(<8 x i16> %inactive, <8 x half> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtpq_m_u16_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtpt.u16.f16 q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = tail call <8 x i16> @llvm.arm.mve.vcvtp.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> %inactive, <8 x half> %a, <8 x i1> %1)
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vcvtpq_m_u32_f32(<4 x i32> %inactive, <4 x float> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtpq_m_u32_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtpt.u32.f32 q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = tail call <4 x i32> @llvm.arm.mve.vcvtp.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> %inactive, <4 x float> %a, <4 x i1> %1)
+  ret <4 x i32> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vcvtaq_x_s16_f16(<8 x half> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtaq_x_s16_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtat.s16.f16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = tail call <8 x i16> @llvm.arm.mve.vcvta.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> undef, <8 x half> %a, <8 x i1> %1)
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vcvtaq_x_s32_f32(<4 x float> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtaq_x_s32_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtat.s32.f32 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = tail call <4 x i32> @llvm.arm.mve.vcvta.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> undef, <4 x float> %a, <4 x i1> %1)
+  ret <4 x i32> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vcvtaq_x_u16_f16(<8 x half> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtaq_x_u16_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtat.u16.f16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = tail call <8 x i16> @llvm.arm.mve.vcvta.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> undef, <8 x half> %a, <8 x i1> %1)
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vcvtaq_x_u32_f32(<4 x float> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtaq_x_u32_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtat.u32.f32 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = tail call <4 x i32> @llvm.arm.mve.vcvta.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> undef, <4 x float> %a, <4 x i1> %1)
+  ret <4 x i32> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vcvtmq_x_s16_f16(<8 x half> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtmq_x_s16_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtmt.s16.f16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = tail call <8 x i16> @llvm.arm.mve.vcvtm.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> undef, <8 x half> %a, <8 x i1> %1)
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vcvtmq_x_s32_f32(<4 x float> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtmq_x_s32_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtmt.s32.f32 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = tail call <4 x i32> @llvm.arm.mve.vcvtm.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> undef, <4 x float> %a, <4 x i1> %1)
+  ret <4 x i32> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vcvtmq_x_u16_f16(<8 x half> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtmq_x_u16_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtmt.u16.f16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = tail call <8 x i16> @llvm.arm.mve.vcvtm.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> undef, <8 x half> %a, <8 x i1> %1)
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vcvtmq_x_u32_f32(<4 x float> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtmq_x_u32_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtmt.u32.f32 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = tail call <4 x i32> @llvm.arm.mve.vcvtm.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> undef, <4 x float> %a, <4 x i1> %1)
+  ret <4 x i32> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vcvtnq_x_s16_f16(<8 x half> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtnq_x_s16_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtnt.s16.f16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = tail call <8 x i16> @llvm.arm.mve.vcvtn.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> undef, <8 x half> %a, <8 x i1> %1)
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vcvtnq_x_s32_f32(<4 x float> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtnq_x_s32_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtnt.s32.f32 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = tail call <4 x i32> @llvm.arm.mve.vcvtn.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> undef, <4 x float> %a, <4 x i1> %1)
+  ret <4 x i32> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vcvtnq_x_u16_f16(<8 x half> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtnq_x_u16_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtnt.u16.f16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = tail call <8 x i16> @llvm.arm.mve.vcvtn.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> undef, <8 x half> %a, <8 x i1> %1)
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vcvtnq_x_u32_f32(<4 x float> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtnq_x_u32_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtnt.u32.f32 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = tail call <4 x i32> @llvm.arm.mve.vcvtn.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> undef, <4 x float> %a, <4 x i1> %1)
+  ret <4 x i32> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vcvtpq_x_s16_f16(<8 x half> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtpq_x_s16_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtpt.s16.f16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = tail call <8 x i16> @llvm.arm.mve.vcvtp.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> undef, <8 x half> %a, <8 x i1> %1)
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vcvtpq_x_s32_f32(<4 x float> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtpq_x_s32_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtpt.s32.f32 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = tail call <4 x i32> @llvm.arm.mve.vcvtp.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> undef, <4 x float> %a, <4 x i1> %1)
+  ret <4 x i32> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vcvtpq_x_u16_f16(<8 x half> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtpq_x_u16_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtpt.u16.f16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = tail call <8 x i16> @llvm.arm.mve.vcvtp.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> undef, <8 x half> %a, <8 x i1> %1)
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vcvtpq_x_u32_f32(<4 x float> %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vcvtpq_x_u32_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcvtpt.u32.f32 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = tail call <4 x i32> @llvm.arm.mve.vcvtp.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> undef, <4 x float> %a, <4 x i1> %1)
+  ret <4 x i32> %2
+}
+
+declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32)
+declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32)
+
+declare <8 x i16> @llvm.arm.mve.vcvta.v8i16.v8f16(i32, <8 x half>)
+declare <4 x i32> @llvm.arm.mve.vcvta.v4i32.v4f32(i32, <4 x float>)
+declare <8 x i16> @llvm.arm.mve.vcvtm.v8i16.v8f16(i32, <8 x half>)
+declare <4 x i32> @llvm.arm.mve.vcvtm.v4i32.v4f32(i32, <4 x float>)
+declare <8 x i16> @llvm.arm.mve.vcvtn.v8i16.v8f16(i32, <8 x half>)
+declare <4 x i32> @llvm.arm.mve.vcvtn.v4i32.v4f32(i32, <4 x float>)
+declare <8 x i16> @llvm.arm.mve.vcvtp.v8i16.v8f16(i32, <8 x half>)
+declare <4 x i32> @llvm.arm.mve.vcvtp.v4i32.v4f32(i32, <4 x float>)
+
+declare <8 x i16> @llvm.arm.mve.vcvta.predicated.v8i16.v8f16.v8i1(i32, <8 x i16>, <8 x half>, <8 x i1>)
+declare <4 x i32> @llvm.arm.mve.vcvta.predicated.v4i32.v4f32.v4i1(i32, <4 x i32>, <4 x float>, <4 x i1>)
+declare <8 x i16> @llvm.arm.mve.vcvtm.predicated.v8i16.v8f16.v8i1(i32, <8 x i16>, <8 x half>, <8 x i1>)
+declare <4 x i32> @llvm.arm.mve.vcvtm.predicated.v4i32.v4f32.v4i1(i32, <4 x i32>, <4 x float>, <4 x i1>)
+declare <8 x i16> @llvm.arm.mve.vcvtn.predicated.v8i16.v8f16.v8i1(i32, <8 x i16>, <8 x half>, <8 x i1>)
+declare <4 x i32> @llvm.arm.mve.vcvtn.predicated.v4i32.v4f32.v4i1(i32, <4 x i32>, <4 x float>, <4 x i1>)
+declare <8 x i16> @llvm.arm.mve.vcvtp.predicated.v8i16.v8f16.v8i1(i32, <8 x i16>, <8 x half>, <8 x i1>)
+declare <4 x i32> @llvm.arm.mve.vcvtp.predicated.v4i32.v4f32.v4i1(i32, <4 x i32>, <4 x float>, <4 x i1>)


        


More information about the cfe-commits mailing list