[llvm] a41ecf0 - [ARM,MVE] Add ACLE intrinsics for VQMOV[U]N family.
Simon Tatham via llvm-commits
llvm-commits at lists.llvm.org
Mon Mar 2 02:33:50 PST 2020
Author: Simon Tatham
Date: 2020-03-02T10:33:30Z
New Revision: a41ecf0eb05190c8597f98b8d41d7a6e678aec0b
URL: https://github.com/llvm/llvm-project/commit/a41ecf0eb05190c8597f98b8d41d7a6e678aec0b
DIFF: https://github.com/llvm/llvm-project/commit/a41ecf0eb05190c8597f98b8d41d7a6e678aec0b.diff
LOG: [ARM,MVE] Add ACLE intrinsics for VQMOV[U]N family.
Summary:
These instructions work like VMOVN (narrowing a vector of wide values
to half size, and overwriting every other lane of an output register
with the result), except that the narrowing conversion is saturating.
They come in three signedness flavours: signed to signed, unsigned to
unsigned, and signed to unsigned. All are represented in IR by a
target-specific intrinsic that takes two separate 'unsigned' flags.
Reviewers: MarkMurrayARM, dmgreen, miyuki, ostannard
Reviewed By: dmgreen
Subscribers: kristof.beyls, hiraditya, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D75252
Added:
clang/test/CodeGen/arm-mve-intrinsics/vqmovn.c
llvm/test/CodeGen/Thumb2/mve-intrinsics/vqmovn.ll
Modified:
clang/include/clang/Basic/arm_mve.td
clang/include/clang/Basic/arm_mve_defs.td
llvm/include/llvm/IR/IntrinsicsARM.td
llvm/lib/Target/ARM/ARMInstrMVE.td
Removed:
################################################################################
diff --git a/clang/include/clang/Basic/arm_mve.td b/clang/include/clang/Basic/arm_mve.td
index efc6be1158b8..c64a75ffeb8e 100644
--- a/clang/include/clang/Basic/arm_mve.td
+++ b/clang/include/clang/Basic/arm_mve.td
@@ -514,6 +514,33 @@ defm vmovntq: vmovn<1, (zip (vreinterpret $inactive, Vector), $a)>;
defm vmovnbq: vmovn<0,
(zip $a, (vreinterpret (vrev $inactive, (bitsize Scalar)), Vector))>;
+multiclass vqmovn<bit top, Type RetScalar> {
+ defvar RetVector = VecOf<RetScalar>;
+
+ let params = [s16, u16, s32, u32] in {
+ def : Intrinsic<
+ RetVector, (args RetVector:$inactive, Vector:$a),
+ (IRInt<"vqmovn", [RetVector, Vector]>
+ $inactive, $a, (unsignedflag RetScalar), (unsignedflag Scalar), top)>,
+ NameOverride<NAME>;
+ def: Intrinsic<
+ RetVector, (args RetVector:$inactive, Vector:$a, Predicate:$pred),
+ (IRInt<"vqmovn_predicated", [RetVector, Vector, Predicate]>
+ $inactive, $a, (unsignedflag RetScalar), (unsignedflag Scalar),
+ top, $pred)>,
+ NameOverride<NAME # "_m">;
+ }
+}
+
+let params = [s16, s32, u16, u32] in {
+ defm vqmovntq: vqmovn<1, HalfScalar>;
+ defm vqmovnbq: vqmovn<0, HalfScalar>;
+}
+let params = [s16, s32] in {
+ defm vqmovuntq: vqmovn<1, UHalfScalar>;
+ defm vqmovunbq: vqmovn<0, UHalfScalar>;
+}
+
multiclass vrnd<IRIntBase ir_int, string suffix> {
let params = T.Float in {
def "": Intrinsic<Vector, (args Vector:$a), (ir_int $a)>;
diff --git a/clang/include/clang/Basic/arm_mve_defs.td b/clang/include/clang/Basic/arm_mve_defs.td
index dbcad78cce75..daf73871f052 100644
--- a/clang/include/clang/Basic/arm_mve_defs.td
+++ b/clang/include/clang/Basic/arm_mve_defs.td
@@ -323,8 +323,10 @@ def SVector: VecOf<SScalar>;
// UHalfVector is a vector of half-sized _unsigned integers_.
def DblVector: VecOf<DoubleSize<Scalar>>;
def DblPredicate: PredOf<DoubleSize<Scalar>>;
-def HalfVector: VecOf<HalfSize<Scalar>>;
-def UHalfVector: VecOf<Unsigned<HalfSize<Scalar>>>;
+def HalfScalar: HalfSize<Scalar>;
+def HalfVector: VecOf<HalfScalar>;
+def UHalfScalar: Unsigned<HalfSize<Scalar>>;
+def UHalfVector: VecOf<UHalfScalar>;
// Expands to the 32-bit integer of the same signedness as Scalar.
def Scalar32: CopyKind<u32, Scalar>;
diff --git a/clang/test/CodeGen/arm-mve-intrinsics/vqmovn.c b/clang/test/CodeGen/arm-mve-intrinsics/vqmovn.c
new file mode 100644
index 000000000000..24c3fd550bf4
--- /dev/null
+++ b/clang/test/CodeGen/arm-mve-intrinsics/vqmovn.c
@@ -0,0 +1,366 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
+// RUN: %clang_cc1 -DPOLYMORPHIC -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
+
+#include <arm_mve.h>
+
+// CHECK-LABEL: @test_vqmovnbq_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 0, i32 0)
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
+//
+int8x16_t test_vqmovnbq_s16(int8x16_t a, int16x8_t b)
+{
+#ifdef POLYMORPHIC
+ return vqmovnbq(a, b);
+#else /* POLYMORPHIC */
+ return vqmovnbq_s16(a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqmovnbq_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 0, i32 0)
+// CHECK-NEXT: ret <8 x i16> [[TMP0]]
+//
+int16x8_t test_vqmovnbq_s32(int16x8_t a, int32x4_t b)
+{
+#ifdef POLYMORPHIC
+ return vqmovnbq(a, b);
+#else /* POLYMORPHIC */
+ return vqmovnbq_s32(a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqmovnbq_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 1, i32 0)
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
+//
+uint8x16_t test_vqmovnbq_u16(uint8x16_t a, uint16x8_t b)
+{
+#ifdef POLYMORPHIC
+ return vqmovnbq(a, b);
+#else /* POLYMORPHIC */
+ return vqmovnbq_u16(a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqmovnbq_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 1, i32 0)
+// CHECK-NEXT: ret <8 x i16> [[TMP0]]
+//
+uint16x8_t test_vqmovnbq_u32(uint16x8_t a, uint32x4_t b)
+{
+#ifdef POLYMORPHIC
+ return vqmovnbq(a, b);
+#else /* POLYMORPHIC */
+ return vqmovnbq_u32(a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqmovntq_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 0, i32 1)
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
+//
+int8x16_t test_vqmovntq_s16(int8x16_t a, int16x8_t b)
+{
+#ifdef POLYMORPHIC
+ return vqmovntq(a, b);
+#else /* POLYMORPHIC */
+ return vqmovntq_s16(a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqmovntq_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 0, i32 1)
+// CHECK-NEXT: ret <8 x i16> [[TMP0]]
+//
+int16x8_t test_vqmovntq_s32(int16x8_t a, int32x4_t b)
+{
+#ifdef POLYMORPHIC
+ return vqmovntq(a, b);
+#else /* POLYMORPHIC */
+ return vqmovntq_s32(a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqmovntq_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 1, i32 1)
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
+//
+uint8x16_t test_vqmovntq_u16(uint8x16_t a, uint16x8_t b)
+{
+#ifdef POLYMORPHIC
+ return vqmovntq(a, b);
+#else /* POLYMORPHIC */
+ return vqmovntq_u16(a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqmovntq_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 1, i32 1)
+// CHECK-NEXT: ret <8 x i16> [[TMP0]]
+//
+uint16x8_t test_vqmovntq_u32(uint16x8_t a, uint32x4_t b)
+{
+#ifdef POLYMORPHIC
+ return vqmovntq(a, b);
+#else /* POLYMORPHIC */
+ return vqmovntq_u32(a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqmovunbq_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 0, i32 0)
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
+//
+uint8x16_t test_vqmovunbq_s16(uint8x16_t a, int16x8_t b)
+{
+#ifdef POLYMORPHIC
+ return vqmovunbq(a, b);
+#else /* POLYMORPHIC */
+ return vqmovunbq_s16(a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqmovunbq_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 0, i32 0)
+// CHECK-NEXT: ret <8 x i16> [[TMP0]]
+//
+uint16x8_t test_vqmovunbq_s32(uint16x8_t a, int32x4_t b)
+{
+#ifdef POLYMORPHIC
+ return vqmovunbq(a, b);
+#else /* POLYMORPHIC */
+ return vqmovunbq_s32(a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqmovuntq_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 0, i32 1)
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
+//
+uint8x16_t test_vqmovuntq_s16(uint8x16_t a, int16x8_t b)
+{
+#ifdef POLYMORPHIC
+ return vqmovuntq(a, b);
+#else /* POLYMORPHIC */
+ return vqmovuntq_s16(a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqmovuntq_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 0, i32 1)
+// CHECK-NEXT: ret <8 x i16> [[TMP0]]
+//
+uint16x8_t test_vqmovuntq_s32(uint16x8_t a, int32x4_t b)
+{
+#ifdef POLYMORPHIC
+ return vqmovuntq(a, b);
+#else /* POLYMORPHIC */
+ return vqmovuntq_s32(a, b);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqmovnbq_m_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 0, i32 0, <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <16 x i8> [[TMP2]]
+//
+int8x16_t test_vqmovnbq_m_s16(int8x16_t a, int16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vqmovnbq_m(a, b, p);
+#else /* POLYMORPHIC */
+ return vqmovnbq_m_s16(a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqmovnbq_m_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 0, i32 0, <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vqmovnbq_m_s32(int16x8_t a, int32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vqmovnbq_m(a, b, p);
+#else /* POLYMORPHIC */
+ return vqmovnbq_m_s32(a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqmovnbq_m_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 1, i32 0, <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <16 x i8> [[TMP2]]
+//
+uint8x16_t test_vqmovnbq_m_u16(uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vqmovnbq_m(a, b, p);
+#else /* POLYMORPHIC */
+ return vqmovnbq_m_u16(a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqmovnbq_m_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 1, i32 0, <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+//
+uint16x8_t test_vqmovnbq_m_u32(uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vqmovnbq_m(a, b, p);
+#else /* POLYMORPHIC */
+ return vqmovnbq_m_u32(a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqmovntq_m_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 0, i32 1, <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <16 x i8> [[TMP2]]
+//
+int8x16_t test_vqmovntq_m_s16(int8x16_t a, int16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vqmovntq_m(a, b, p);
+#else /* POLYMORPHIC */
+ return vqmovntq_m_s16(a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqmovntq_m_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 0, i32 1, <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vqmovntq_m_s32(int16x8_t a, int32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vqmovntq_m(a, b, p);
+#else /* POLYMORPHIC */
+ return vqmovntq_m_s32(a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqmovntq_m_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 1, i32 1, <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <16 x i8> [[TMP2]]
+//
+uint8x16_t test_vqmovntq_m_u16(uint8x16_t a, uint16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vqmovntq_m(a, b, p);
+#else /* POLYMORPHIC */
+ return vqmovntq_m_u16(a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqmovntq_m_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 1, i32 1, <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+//
+uint16x8_t test_vqmovntq_m_u32(uint16x8_t a, uint32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vqmovntq_m(a, b, p);
+#else /* POLYMORPHIC */
+ return vqmovntq_m_u32(a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqmovunbq_m_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 0, i32 0, <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <16 x i8> [[TMP2]]
+//
+uint8x16_t test_vqmovunbq_m_s16(uint8x16_t a, int16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vqmovunbq_m(a, b, p);
+#else /* POLYMORPHIC */
+ return vqmovunbq_m_s16(a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqmovunbq_m_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 0, i32 0, <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+//
+uint16x8_t test_vqmovunbq_m_s32(uint16x8_t a, int32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vqmovunbq_m(a, b, p);
+#else /* POLYMORPHIC */
+ return vqmovunbq_m_s32(a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqmovuntq_m_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 0, i32 1, <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <16 x i8> [[TMP2]]
+//
+uint8x16_t test_vqmovuntq_m_s16(uint8x16_t a, int16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vqmovuntq_m(a, b, p);
+#else /* POLYMORPHIC */
+ return vqmovuntq_m_s16(a, b, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vqmovuntq_m_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 0, i32 1, <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+//
+uint16x8_t test_vqmovuntq_m_s32(uint16x8_t a, int32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vqmovuntq_m(a, b, p);
+#else /* POLYMORPHIC */
+ return vqmovuntq_m_s32(a, b, p);
+#endif /* POLYMORPHIC */
+}
+
diff --git a/llvm/include/llvm/IR/IntrinsicsARM.td b/llvm/include/llvm/IR/IntrinsicsARM.td
index 4e939fb4bc3a..743d93546af7 100644
--- a/llvm/include/llvm/IR/IntrinsicsARM.td
+++ b/llvm/include/llvm/IR/IntrinsicsARM.td
@@ -1211,4 +1211,13 @@ def int_arm_mve_vmovn_predicated: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty, llvm_i32_ty /* top half */,
llvm_anyvector_ty /* predicate */], [IntrNoMem]>;
+def int_arm_mve_vqmovn: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty,
+ llvm_i32_ty /* unsigned output */, llvm_i32_ty /* unsigned input */,
+ llvm_i32_ty /* top half */], [IntrNoMem]>;
+def int_arm_mve_vqmovn_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty,
+ llvm_i32_ty /* unsigned output */, llvm_i32_ty /* unsigned input */,
+ llvm_i32_ty /* top half */, llvm_anyvector_ty /* pred */], [IntrNoMem]>;
+
} // end TargetPrefix
diff --git a/llvm/lib/Target/ARM/ARMInstrMVE.td b/llvm/lib/Target/ARM/ARMInstrMVE.td
index ddd9d29afcfc..995c9c949a00 100644
--- a/llvm/lib/Target/ARM/ARMInstrMVE.td
+++ b/llvm/lib/Target/ARM/ARMInstrMVE.td
@@ -4452,6 +4452,35 @@ defm : MVE_VMOVN_p<MVE_VMOVNi32th, 1, MVE_v8i16, MVE_v4i32>;
defm : MVE_VMOVN_p<MVE_VMOVNi16bh, 0, MVE_v16i8, MVE_v8i16>;
defm : MVE_VMOVN_p<MVE_VMOVNi16th, 1, MVE_v16i8, MVE_v8i16>;
+multiclass MVE_VQMOVN_p<Instruction Inst, bit outU, bit inU, bit top,
+ MVEVectorVTInfo VTI, MVEVectorVTInfo InVTI> {
+ def : Pat<(VTI.Vec (int_arm_mve_vqmovn (VTI.Vec MQPR:$Qd_src),
+ (InVTI.Vec MQPR:$Qm),
+ (i32 outU), (i32 inU), (i32 top))),
+ (VTI.Vec (Inst (VTI.Vec MQPR:$Qd_src),
+ (InVTI.Vec MQPR:$Qm)))>;
+
+ def : Pat<(VTI.Vec (int_arm_mve_vqmovn_predicated (VTI.Vec MQPR:$Qd_src),
+ (InVTI.Vec MQPR:$Qm),
+ (i32 outU), (i32 inU), (i32 top),
+ (InVTI.Pred VCCR:$pred))),
+ (VTI.Vec (Inst (VTI.Vec MQPR:$Qd_src),
+ (InVTI.Vec MQPR:$Qm),
+ ARMVCCThen, (InVTI.Pred VCCR:$pred)))>;
+}
+
+defm : MVE_VQMOVN_p<MVE_VQMOVNs32bh, 0, 0, 0, MVE_v8i16, MVE_v4i32>;
+defm : MVE_VQMOVN_p<MVE_VQMOVNs32th, 0, 0, 1, MVE_v8i16, MVE_v4i32>;
+defm : MVE_VQMOVN_p<MVE_VQMOVNs16bh, 0, 0, 0, MVE_v16i8, MVE_v8i16>;
+defm : MVE_VQMOVN_p<MVE_VQMOVNs16th, 0, 0, 1, MVE_v16i8, MVE_v8i16>;
+defm : MVE_VQMOVN_p<MVE_VQMOVNu32bh, 1, 1, 0, MVE_v8i16, MVE_v4i32>;
+defm : MVE_VQMOVN_p<MVE_VQMOVNu32th, 1, 1, 1, MVE_v8i16, MVE_v4i32>;
+defm : MVE_VQMOVN_p<MVE_VQMOVNu16bh, 1, 1, 0, MVE_v16i8, MVE_v8i16>;
+defm : MVE_VQMOVN_p<MVE_VQMOVNu16th, 1, 1, 1, MVE_v16i8, MVE_v8i16>;
+defm : MVE_VQMOVN_p<MVE_VQMOVUNs32bh, 1, 0, 0, MVE_v8i16, MVE_v4i32>;
+defm : MVE_VQMOVN_p<MVE_VQMOVUNs32th, 1, 0, 1, MVE_v8i16, MVE_v4i32>;
+defm : MVE_VQMOVN_p<MVE_VQMOVUNs16bh, 1, 0, 0, MVE_v16i8, MVE_v8i16>;
+defm : MVE_VQMOVN_p<MVE_VQMOVUNs16th, 1, 0, 1, MVE_v16i8, MVE_v8i16>;
class MVE_VCVT_ff<string iname, string suffix, bit op, bit T,
list<dag> pattern=[]>
diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vqmovn.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vqmovn.ll
new file mode 100644
index 000000000000..c8ff549321fe
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vqmovn.ll
@@ -0,0 +1,299 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqmovnbq_s16(<16 x i8> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vqmovnbq_s16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqmovnb.s16 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %0 = tail call <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8> %a, <8 x i16> %b, i32 0, i32 0, i32 0)
+ ret <16 x i8> %0
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqmovnbq_s32(<8 x i16> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vqmovnbq_s32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqmovnb.s32 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %0 = tail call <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16> %a, <4 x i32> %b, i32 0, i32 0, i32 0)
+ ret <8 x i16> %0
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqmovnbq_u16(<16 x i8> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vqmovnbq_u16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqmovnb.u16 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %0 = tail call <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8> %a, <8 x i16> %b, i32 1, i32 1, i32 0)
+ ret <16 x i8> %0
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqmovnbq_u32(<8 x i16> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vqmovnbq_u32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqmovnb.u32 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %0 = tail call <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16> %a, <4 x i32> %b, i32 1, i32 1, i32 0)
+ ret <8 x i16> %0
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqmovntq_s16(<16 x i8> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vqmovntq_s16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqmovnt.s16 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %0 = tail call <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8> %a, <8 x i16> %b, i32 0, i32 0, i32 1)
+ ret <16 x i8> %0
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqmovntq_s32(<8 x i16> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vqmovntq_s32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqmovnt.s32 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %0 = tail call <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16> %a, <4 x i32> %b, i32 0, i32 0, i32 1)
+ ret <8 x i16> %0
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqmovntq_u16(<16 x i8> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vqmovntq_u16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqmovnt.u16 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %0 = tail call <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8> %a, <8 x i16> %b, i32 1, i32 1, i32 1)
+ ret <16 x i8> %0
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqmovntq_u32(<8 x i16> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vqmovntq_u32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqmovnt.u32 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %0 = tail call <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16> %a, <4 x i32> %b, i32 1, i32 1, i32 1)
+ ret <8 x i16> %0
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqmovunbq_s16(<16 x i8> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vqmovunbq_s16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqmovunb.s16 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %0 = tail call <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8> %a, <8 x i16> %b, i32 1, i32 0, i32 0)
+ ret <16 x i8> %0
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqmovunbq_s32(<8 x i16> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vqmovunbq_s32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqmovunb.s32 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %0 = tail call <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16> %a, <4 x i32> %b, i32 1, i32 0, i32 0)
+ ret <8 x i16> %0
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqmovuntq_s16(<16 x i8> %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vqmovuntq_s16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqmovunt.s16 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %0 = tail call <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8> %a, <8 x i16> %b, i32 1, i32 0, i32 1)
+ ret <16 x i8> %0
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqmovuntq_s32(<8 x i16> %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vqmovuntq_s32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vqmovunt.s32 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %0 = tail call <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16> %a, <4 x i32> %b, i32 1, i32 0, i32 1)
+ ret <8 x i16> %0
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqmovnbq_m_s16(<16 x i8> %a, <8 x i16> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqmovnbq_m_s16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmsr p0, r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vqmovnbt.s16 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %0 = zext i16 %p to i32
+ %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+ %2 = tail call <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8> %a, <8 x i16> %b, i32 0, i32 0, i32 0, <8 x i1> %1)
+ ret <16 x i8> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqmovnbq_m_s32(<8 x i16> %a, <4 x i32> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqmovnbq_m_s32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmsr p0, r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vqmovnbt.s32 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %0 = zext i16 %p to i32
+ %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+ %2 = tail call <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16> %a, <4 x i32> %b, i32 0, i32 0, i32 0, <4 x i1> %1)
+ ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqmovnbq_m_u16(<16 x i8> %a, <8 x i16> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqmovnbq_m_u16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmsr p0, r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vqmovnbt.u16 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %0 = zext i16 %p to i32
+ %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+ %2 = tail call <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8> %a, <8 x i16> %b, i32 1, i32 1, i32 0, <8 x i1> %1)
+ ret <16 x i8> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqmovnbq_m_u32(<8 x i16> %a, <4 x i32> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqmovnbq_m_u32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmsr p0, r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vqmovnbt.u32 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %0 = zext i16 %p to i32
+ %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+ %2 = tail call <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16> %a, <4 x i32> %b, i32 1, i32 1, i32 0, <4 x i1> %1)
+ ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqmovntq_m_s16(<16 x i8> %a, <8 x i16> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqmovntq_m_s16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmsr p0, r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vqmovntt.s16 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %0 = zext i16 %p to i32
+ %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+ %2 = tail call <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8> %a, <8 x i16> %b, i32 0, i32 0, i32 1, <8 x i1> %1)
+ ret <16 x i8> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqmovntq_m_s32(<8 x i16> %a, <4 x i32> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqmovntq_m_s32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmsr p0, r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vqmovntt.s32 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %0 = zext i16 %p to i32
+ %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+ %2 = tail call <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16> %a, <4 x i32> %b, i32 0, i32 0, i32 1, <4 x i1> %1)
+ ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqmovntq_m_u16(<16 x i8> %a, <8 x i16> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqmovntq_m_u16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmsr p0, r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vqmovntt.u16 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %0 = zext i16 %p to i32
+ %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+ %2 = tail call <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8> %a, <8 x i16> %b, i32 1, i32 1, i32 1, <8 x i1> %1)
+ ret <16 x i8> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqmovntq_m_u32(<8 x i16> %a, <4 x i32> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqmovntq_m_u32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmsr p0, r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vqmovntt.u32 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %0 = zext i16 %p to i32
+ %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+ %2 = tail call <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16> %a, <4 x i32> %b, i32 1, i32 1, i32 1, <4 x i1> %1)
+ ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqmovunbq_m_s16(<16 x i8> %a, <8 x i16> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqmovunbq_m_s16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmsr p0, r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vqmovunbt.s16 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %0 = zext i16 %p to i32
+ %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+ %2 = tail call <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8> %a, <8 x i16> %b, i32 1, i32 0, i32 0, <8 x i1> %1)
+ ret <16 x i8> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqmovunbq_m_s32(<8 x i16> %a, <4 x i32> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqmovunbq_m_s32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmsr p0, r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vqmovunbt.s32 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %0 = zext i16 %p to i32
+ %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+ %2 = tail call <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16> %a, <4 x i32> %b, i32 1, i32 0, i32 0, <4 x i1> %1)
+ ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vqmovuntq_m_s16(<16 x i8> %a, <8 x i16> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqmovuntq_m_s16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmsr p0, r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vqmovuntt.s16 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %0 = zext i16 %p to i32
+ %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+ %2 = tail call <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8> %a, <8 x i16> %b, i32 1, i32 0, i32 1, <8 x i1> %1)
+ ret <16 x i8> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vqmovuntq_m_s32(<8 x i16> %a, <4 x i32> %b, i16 zeroext %p) {
+; CHECK-LABEL: test_vqmovuntq_m_s32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmsr p0, r0
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vqmovuntt.s32 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %0 = zext i16 %p to i32
+ %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+ %2 = tail call <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16> %a, <4 x i32> %b, i32 1, i32 0, i32 1, <4 x i1> %1)
+ ret <8 x i16> %2
+}
+
+declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32)
+declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32)
+
+declare <16 x i8> @llvm.arm.mve.vqmovn.v16i8.v8i16(<16 x i8>, <8 x i16>, i32, i32, i32)
+declare <8 x i16> @llvm.arm.mve.vqmovn.v8i16.v4i32(<8 x i16>, <4 x i32>, i32, i32, i32)
+
+declare <16 x i8> @llvm.arm.mve.vqmovn.predicated.v16i8.v8i16.v8i1(<16 x i8>, <8 x i16>, i32, i32, i32, <8 x i1>)
+declare <8 x i16> @llvm.arm.mve.vqmovn.predicated.v8i16.v4i32.v4i1(<8 x i16>, <4 x i32>, i32, i32, i32, <4 x i1>)
More information about the llvm-commits
mailing list