[clang] cf7e98e - [ARM,MVE] Add intrinsics for vdupq.

Simon Tatham via cfe-commits cfe-commits at lists.llvm.org
Mon Feb 3 03:20:19 PST 2020


Author: Simon Tatham
Date: 2020-02-03T11:20:06Z
New Revision: cf7e98e6f7805f4e2693a6dbbd12c10fe06fde70

URL: https://github.com/llvm/llvm-project/commit/cf7e98e6f7805f4e2693a6dbbd12c10fe06fde70
DIFF: https://github.com/llvm/llvm-project/commit/cf7e98e6f7805f4e2693a6dbbd12c10fe06fde70.diff

LOG: [ARM,MVE] Add intrinsics for vdupq.

Summary:
The unpredicated case of this is trivial: the clang codegen just makes
a vector splat of the input, and LLVM isel is already prepared to
handle that. For the predicated version, I've generated a `select`
between the same vector splat and the `inactive` input parameter, and
added new Tablegen isel rules to match that pattern into a predicated
`MVE_VDUP` instruction.

Reviewers: dmgreen, MarkMurrayARM, miyuki, ostannard

Reviewed By: dmgreen

Subscribers: kristof.beyls, hiraditya, cfe-commits, llvm-commits

Tags: #clang, #llvm

Differential Revision: https://reviews.llvm.org/D73356

Added: 
    clang/test/CodeGen/arm-mve-intrinsics/dup.c
    llvm/test/CodeGen/Thumb2/mve-intrinsics/dup.ll

Modified: 
    clang/include/clang/Basic/arm_mve.td
    llvm/lib/Target/ARM/ARMInstrMVE.td

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/arm_mve.td b/clang/include/clang/Basic/arm_mve.td
index ee0ce25bf516..e9ad26a4e88e 100644
--- a/clang/include/clang/Basic/arm_mve.td
+++ b/clang/include/clang/Basic/arm_mve.td
@@ -138,6 +138,15 @@ let params = !listconcat(T.Int16, T.Int32) in {
     (select $pred, (or $v, (splat (Scalar $imm))), $v)>;
 }
 
+let params = T.Usual in {
+  let pnt = PNT_None in
+    def vdupq_n: Intrinsic<Vector, (args unpromoted<Scalar>:$s), (splat $s)>;
+
+  defm vdupq: IntrinsicMX<
+      Vector, (args unpromoted<Scalar>:$s, Predicate:$pred),
+      (select $pred, (splat $s), $inactive), 1, "_n", PNT_NType, PNT_None>;
+}
+
 // The bitcasting below is not overcomplicating the IR because while
 // Vector and UVector may be 
diff erent vector types at the C level i.e.
 // vectors of same size signed/unsigned ints. Once they're lowered

diff  --git a/clang/test/CodeGen/arm-mve-intrinsics/dup.c b/clang/test/CodeGen/arm-mve-intrinsics/dup.c
new file mode 100644
index 000000000000..3bcec9d2549e
--- /dev/null
+++ b/clang/test/CodeGen/arm-mve-intrinsics/dup.c
@@ -0,0 +1,351 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg -sroa -early-cse | FileCheck %s
+// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -mem2reg -sroa -early-cse | FileCheck %s
+
+#include <arm_mve.h>
+
+// CHECK-LABEL: @test_vdupq_n_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast float [[A_COERCE:%.*]] to i32
+// CHECK-NEXT:    [[TMP_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[TMP0]] to i16
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast i16 [[TMP_0_EXTRACT_TRUNC]] to half
+// CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> undef, half [[TMP1]], i32 0
+// CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> undef, <8 x i32> zeroinitializer
+// CHECK-NEXT:    ret <8 x half> [[DOTSPLAT]]
+//
+float16x8_t test_vdupq_n_f16(float16_t a)
+{
+    return vdupq_n_f16(a);
+}
+
+// CHECK-LABEL: @test_vdupq_n_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> undef, float [[A:%.*]], i32 0
+// CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> undef, <4 x i32> zeroinitializer
+// CHECK-NEXT:    ret <4 x float> [[DOTSPLAT]]
+//
+float32x4_t test_vdupq_n_f32(float32_t a)
+{
+    return vdupq_n_f32(a);
+}
+
+// CHECK-LABEL: @test_vdupq_n_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> undef, i8 [[A:%.*]], i32 0
+// CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> undef, <16 x i32> zeroinitializer
+// CHECK-NEXT:    ret <16 x i8> [[DOTSPLAT]]
+//
+int8x16_t test_vdupq_n_s8(int8_t a)
+{
+    return vdupq_n_s8(a);
+}
+
+// CHECK-LABEL: @test_vdupq_n_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> undef, i16 [[A:%.*]], i32 0
+// CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> undef, <8 x i32> zeroinitializer
+// CHECK-NEXT:    ret <8 x i16> [[DOTSPLAT]]
+//
+int16x8_t test_vdupq_n_s16(int16_t a)
+{
+    return vdupq_n_s16(a);
+}
+
+// CHECK-LABEL: @test_vdupq_n_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> undef, i32 [[A:%.*]], i32 0
+// CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK-NEXT:    ret <4 x i32> [[DOTSPLAT]]
+//
+int32x4_t test_vdupq_n_s32(int32_t a)
+{
+    return vdupq_n_s32(a);
+}
+
+// CHECK-LABEL: @test_vdupq_n_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> undef, i8 [[A:%.*]], i32 0
+// CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> undef, <16 x i32> zeroinitializer
+// CHECK-NEXT:    ret <16 x i8> [[DOTSPLAT]]
+//
+uint8x16_t test_vdupq_n_u8(uint8_t a)
+{
+    return vdupq_n_u8(a);
+}
+
+// CHECK-LABEL: @test_vdupq_n_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> undef, i16 [[A:%.*]], i32 0
+// CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> undef, <8 x i32> zeroinitializer
+// CHECK-NEXT:    ret <8 x i16> [[DOTSPLAT]]
+//
+uint16x8_t test_vdupq_n_u16(uint16_t a)
+{
+    return vdupq_n_u16(a);
+}
+
+// CHECK-LABEL: @test_vdupq_n_u32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> undef, i32 [[A:%.*]], i32 0
+// CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK-NEXT:    ret <4 x i32> [[DOTSPLAT]]
+//
+uint32x4_t test_vdupq_n_u32(uint32_t a)
+{
+    return vdupq_n_u32(a);
+}
+
+// CHECK-LABEL: @test_vdupq_m_n_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast float [[A_COERCE:%.*]] to i32
+// CHECK-NEXT:    [[TMP_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[TMP0]] to i16
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast i16 [[TMP_0_EXTRACT_TRUNC]] to half
+// CHECK-NEXT:    [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP3:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP2]])
+// CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> undef, half [[TMP1]], i32 0
+// CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> undef, <8 x i32> zeroinitializer
+// CHECK-NEXT:    [[TMP4:%.*]] = select <8 x i1> [[TMP3]], <8 x half> [[DOTSPLAT]], <8 x half> [[INACTIVE:%.*]]
+// CHECK-NEXT:    ret <8 x half> [[TMP4]]
+//
+float16x8_t test_vdupq_m_n_f16(float16x8_t inactive, float16_t a, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+    return vdupq_m(inactive, a, p);
+#else /* POLYMORPHIC */
+    return vdupq_m_n_f16(inactive, a, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vdupq_m_n_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> undef, float [[A:%.*]], i32 0
+// CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> undef, <4 x i32> zeroinitializer
+// CHECK-NEXT:    [[TMP2:%.*]] = select <4 x i1> [[TMP1]], <4 x float> [[DOTSPLAT]], <4 x float> [[INACTIVE:%.*]]
+// CHECK-NEXT:    ret <4 x float> [[TMP2]]
+//
+float32x4_t test_vdupq_m_n_f32(float32x4_t inactive, float32_t a, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+    return vdupq_m(inactive, a, p);
+#else /* POLYMORPHIC */
+    return vdupq_m_n_f32(inactive, a, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vdupq_m_n_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> undef, i8 [[A:%.*]], i32 0
+// CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> undef, <16 x i32> zeroinitializer
+// CHECK-NEXT:    [[TMP2:%.*]] = select <16 x i1> [[TMP1]], <16 x i8> [[DOTSPLAT]], <16 x i8> [[INACTIVE:%.*]]
+// CHECK-NEXT:    ret <16 x i8> [[TMP2]]
+//
+int8x16_t test_vdupq_m_n_s8(int8x16_t inactive, int8_t a, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+    return vdupq_m(inactive, a, p);
+#else /* POLYMORPHIC */
+    return vdupq_m_n_s8(inactive, a, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vdupq_m_n_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> undef, i16 [[A:%.*]], i32 0
+// CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> undef, <8 x i32> zeroinitializer
+// CHECK-NEXT:    [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x i16> [[DOTSPLAT]], <8 x i16> [[INACTIVE:%.*]]
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vdupq_m_n_s16(int16x8_t inactive, int16_t a, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+    return vdupq_m(inactive, a, p);
+#else /* POLYMORPHIC */
+    return vdupq_m_n_s16(inactive, a, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vdupq_m_n_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> undef, i32 [[A:%.*]], i32 0
+// CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK-NEXT:    [[TMP2:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[DOTSPLAT]], <4 x i32> [[INACTIVE:%.*]]
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vdupq_m_n_s32(int32x4_t inactive, int32_t a, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+    return vdupq_m(inactive, a, p);
+#else /* POLYMORPHIC */
+    return vdupq_m_n_s32(inactive, a, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vdupq_m_n_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> undef, i8 [[A:%.*]], i32 0
+// CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> undef, <16 x i32> zeroinitializer
+// CHECK-NEXT:    [[TMP2:%.*]] = select <16 x i1> [[TMP1]], <16 x i8> [[DOTSPLAT]], <16 x i8> [[INACTIVE:%.*]]
+// CHECK-NEXT:    ret <16 x i8> [[TMP2]]
+//
+uint8x16_t test_vdupq_m_n_u8(uint8x16_t inactive, uint8_t a, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+    return vdupq_m(inactive, a, p);
+#else /* POLYMORPHIC */
+    return vdupq_m_n_u8(inactive, a, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vdupq_m_n_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> undef, i16 [[A:%.*]], i32 0
+// CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> undef, <8 x i32> zeroinitializer
+// CHECK-NEXT:    [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x i16> [[DOTSPLAT]], <8 x i16> [[INACTIVE:%.*]]
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+uint16x8_t test_vdupq_m_n_u16(uint16x8_t inactive, uint16_t a, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+    return vdupq_m(inactive, a, p);
+#else /* POLYMORPHIC */
+    return vdupq_m_n_u16(inactive, a, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vdupq_m_n_u32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> undef, i32 [[A:%.*]], i32 0
+// CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK-NEXT:    [[TMP2:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[DOTSPLAT]], <4 x i32> [[INACTIVE:%.*]]
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+uint32x4_t test_vdupq_m_n_u32(uint32x4_t inactive, uint32_t a, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+    return vdupq_m(inactive, a, p);
+#else /* POLYMORPHIC */
+    return vdupq_m_n_u32(inactive, a, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vdupq_x_n_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast float [[A_COERCE:%.*]] to i32
+// CHECK-NEXT:    [[TMP_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[TMP0]] to i16
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast i16 [[TMP_0_EXTRACT_TRUNC]] to half
+// CHECK-NEXT:    [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP3:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP2]])
+// CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> undef, half [[TMP1]], i32 0
+// CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> undef, <8 x i32> zeroinitializer
+// CHECK-NEXT:    ret <8 x half> [[DOTSPLAT]]
+//
+float16x8_t test_vdupq_x_n_f16(float16_t a, mve_pred16_t p)
+{
+    return vdupq_x_n_f16(a, p);
+}
+
+// CHECK-LABEL: @test_vdupq_x_n_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> undef, float [[A:%.*]], i32 0
+// CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> undef, <4 x i32> zeroinitializer
+// CHECK-NEXT:    ret <4 x float> [[DOTSPLAT]]
+//
+float32x4_t test_vdupq_x_n_f32(float32_t a, mve_pred16_t p)
+{
+    return vdupq_x_n_f32(a, p);
+}
+
+// CHECK-LABEL: @test_vdupq_x_n_s8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> undef, i8 [[A:%.*]], i32 0
+// CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> undef, <16 x i32> zeroinitializer
+// CHECK-NEXT:    ret <16 x i8> [[DOTSPLAT]]
+//
+int8x16_t test_vdupq_x_n_s8(int8_t a, mve_pred16_t p)
+{
+    return vdupq_x_n_s8(a, p);
+}
+
+// CHECK-LABEL: @test_vdupq_x_n_s16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> undef, i16 [[A:%.*]], i32 0
+// CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> undef, <8 x i32> zeroinitializer
+// CHECK-NEXT:    ret <8 x i16> [[DOTSPLAT]]
+//
+int16x8_t test_vdupq_x_n_s16(int16_t a, mve_pred16_t p)
+{
+    return vdupq_x_n_s16(a, p);
+}
+
+// CHECK-LABEL: @test_vdupq_x_n_s32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> undef, i32 [[A:%.*]], i32 0
+// CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK-NEXT:    ret <4 x i32> [[DOTSPLAT]]
+//
+int32x4_t test_vdupq_x_n_s32(int32_t a, mve_pred16_t p)
+{
+    return vdupq_x_n_s32(a, p);
+}
+
+// CHECK-LABEL: @test_vdupq_x_n_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> undef, i8 [[A:%.*]], i32 0
+// CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> undef, <16 x i32> zeroinitializer
+// CHECK-NEXT:    ret <16 x i8> [[DOTSPLAT]]
+//
+uint8x16_t test_vdupq_x_n_u8(uint8_t a, mve_pred16_t p)
+{
+    return vdupq_x_n_u8(a, p);
+}
+
+// CHECK-LABEL: @test_vdupq_x_n_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> undef, i16 [[A:%.*]], i32 0
+// CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> undef, <8 x i32> zeroinitializer
+// CHECK-NEXT:    ret <8 x i16> [[DOTSPLAT]]
+//
+uint16x8_t test_vdupq_x_n_u16(uint16_t a, mve_pred16_t p)
+{
+    return vdupq_x_n_u16(a, p);
+}
+
+// CHECK-LABEL: @test_vdupq_x_n_u32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> undef, i32 [[A:%.*]], i32 0
+// CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK-NEXT:    ret <4 x i32> [[DOTSPLAT]]
+//
+uint32x4_t test_vdupq_x_n_u32(uint32_t a, mve_pred16_t p)
+{
+    return vdupq_x_n_u32(a, p);
+}
+

diff  --git a/llvm/lib/Target/ARM/ARMInstrMVE.td b/llvm/lib/Target/ARM/ARMInstrMVE.td
index c27ea47e9bbe..69f496a7f793 100644
--- a/llvm/lib/Target/ARM/ARMInstrMVE.td
+++ b/llvm/lib/Target/ARM/ARMInstrMVE.td
@@ -2001,6 +2001,33 @@ let Predicates = [HasMVEInt] in {
             (MVE_VDUP32 (MVE_VMOV_from_lane_32 MQPR:$src, imm:$lane))>;
   def : Pat<(v8f16 (ARMvduplane (v8f16 MQPR:$src), imm:$lane)),
             (MVE_VDUP16 (MVE_VMOV_from_lane_u16 MQPR:$src, imm:$lane))>;
+
+  // Match a vselect with an ARMvdup as a predicated MVE_VDUP
+  def : Pat<(v16i8 (vselect (v16i1 VCCR:$pred),
+                            (v16i8 (ARMvdup (i32 rGPR:$elem))),
+                            (v16i8 MQPR:$inactive))),
+            (MVE_VDUP8  rGPR:$elem, ARMVCCThen, (v16i1 VCCR:$pred),
+                        (v16i8 MQPR:$inactive))>;
+  def : Pat<(v8i16 (vselect (v8i1 VCCR:$pred),
+                            (v8i16 (ARMvdup (i32 rGPR:$elem))),
+                            (v8i16 MQPR:$inactive))),
+            (MVE_VDUP16 rGPR:$elem, ARMVCCThen, (v8i1 VCCR:$pred),
+                            (v8i16 MQPR:$inactive))>;
+  def : Pat<(v4i32 (vselect (v4i1 VCCR:$pred),
+                            (v4i32 (ARMvdup (i32 rGPR:$elem))),
+                            (v4i32 MQPR:$inactive))),
+            (MVE_VDUP32 rGPR:$elem, ARMVCCThen, (v4i1 VCCR:$pred),
+                            (v4i32 MQPR:$inactive))>;
+  def : Pat<(v4f32 (vselect (v4i1 VCCR:$pred),
+                            (v4f32 (ARMvdup (f32 SPR:$elem))),
+                            (v4f32 MQPR:$inactive))),
+            (MVE_VDUP32 (i32 (COPY_TO_REGCLASS (f32 SPR:$elem), rGPR)),
+                      ARMVCCThen, (v4i1 VCCR:$pred), (v4f32 MQPR:$inactive))>;
+  def : Pat<(v8f16 (vselect (v8i1 VCCR:$pred),
+                            (v8f16 (ARMvdup (f16 HPR:$elem))),
+                            (v8f16 MQPR:$inactive))),
+            (MVE_VDUP16 (i32 (COPY_TO_REGCLASS (f16 HPR:$elem), rGPR)),
+                      ARMVCCThen, (v8i1 VCCR:$pred), (v8f16 MQPR:$inactive))>;
 }
 
 

diff  --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/dup.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/dup.ll
new file mode 100644
index 000000000000..94e65c4f249f
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/dup.ll
@@ -0,0 +1,232 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s
+
+define arm_aapcs_vfpcc <8 x half> @test_vdupq_n_f16(float %a.coerce) {
+; CHECK-LABEL: test_vdupq_n_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vdup.16 q0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = bitcast float %a.coerce to i32
+  %tmp.0.extract.trunc = trunc i32 %0 to i16
+  %1 = bitcast i16 %tmp.0.extract.trunc to half
+  %.splatinsert = insertelement <8 x half> undef, half %1, i32 0
+  %.splat = shufflevector <8 x half> %.splatinsert, <8 x half> undef, <8 x i32> zeroinitializer
+  ret <8 x half> %.splat
+}
+
+define arm_aapcs_vfpcc <4 x float> @test_vdupq_n_f32(float %a) {
+; CHECK-LABEL: test_vdupq_n_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vdup.32 q0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %.splatinsert = insertelement <4 x float> undef, float %a, i32 0
+  %.splat = shufflevector <4 x float> %.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
+  ret <4 x float> %.splat
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vdupq_n_s8(i8 signext %a) {
+; CHECK-LABEL: test_vdupq_n_s8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vdup.8 q0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %.splatinsert = insertelement <16 x i8> undef, i8 %a, i32 0
+  %.splat = shufflevector <16 x i8> %.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
+  ret <16 x i8> %.splat
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vdupq_n_s16(i16 signext %a) {
+; CHECK-LABEL: test_vdupq_n_s16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vdup.16 q0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %.splatinsert = insertelement <8 x i16> undef, i16 %a, i32 0
+  %.splat = shufflevector <8 x i16> %.splatinsert, <8 x i16> undef, <8 x i32> zeroinitializer
+  ret <8 x i16> %.splat
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vdupq_n_s32(i32 %a) {
+; CHECK-LABEL: test_vdupq_n_s32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vdup.32 q0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %.splatinsert = insertelement <4 x i32> undef, i32 %a, i32 0
+  %.splat = shufflevector <4 x i32> %.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
+  ret <4 x i32> %.splat
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vdupq_n_u8(i8 zeroext %a) {
+; CHECK-LABEL: test_vdupq_n_u8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vdup.8 q0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %.splatinsert = insertelement <16 x i8> undef, i8 %a, i32 0
+  %.splat = shufflevector <16 x i8> %.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
+  ret <16 x i8> %.splat
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vdupq_n_u16(i16 zeroext %a) {
+; CHECK-LABEL: test_vdupq_n_u16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vdup.16 q0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %.splatinsert = insertelement <8 x i16> undef, i16 %a, i32 0
+  %.splat = shufflevector <8 x i16> %.splatinsert, <8 x i16> undef, <8 x i32> zeroinitializer
+  ret <8 x i16> %.splat
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vdupq_n_u32(i32 %a) {
+; CHECK-LABEL: test_vdupq_n_u32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vdup.32 q0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %.splatinsert = insertelement <4 x i32> undef, i32 %a, i32 0
+  %.splat = shufflevector <4 x i32> %.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
+  ret <4 x i32> %.splat
+}
+
+define arm_aapcs_vfpcc <8 x half> @test_vdupq_m_n_f16(<8 x half> %inactive, float %a.coerce, i16 zeroext %p) {
+; CHECK-LABEL: test_vdupq_m_n_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r1, s4
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vdupt.16 q0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = bitcast float %a.coerce to i32
+  %tmp.0.extract.trunc = trunc i32 %0 to i16
+  %1 = bitcast i16 %tmp.0.extract.trunc to half
+  %2 = zext i16 %p to i32
+  %3 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %2)
+  %.splatinsert = insertelement <8 x half> undef, half %1, i32 0
+  %.splat = shufflevector <8 x half> %.splatinsert, <8 x half> undef, <8 x i32> zeroinitializer
+  %4 = select <8 x i1> %3, <8 x half> %.splat, <8 x half> %inactive
+  ret <8 x half> %4
+}
+
+define arm_aapcs_vfpcc <4 x float> @test_vdupq_m_n_f32(<4 x float> %inactive, float %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vdupq_m_n_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vdupt.32 q0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %.splatinsert = insertelement <4 x float> undef, float %a, i32 0
+  %.splat = shufflevector <4 x float> %.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
+  %2 = select <4 x i1> %1, <4 x float> %.splat, <4 x float> %inactive
+  ret <4 x float> %2
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vdupq_m_n_s8(<16 x i8> %inactive, i8 signext %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vdupq_m_n_s8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r1
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vdupt.8 q0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
+  %.splatinsert = insertelement <16 x i8> undef, i8 %a, i32 0
+  %.splat = shufflevector <16 x i8> %.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
+  %2 = select <16 x i1> %1, <16 x i8> %.splat, <16 x i8> %inactive
+  ret <16 x i8> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vdupq_m_n_s16(<8 x i16> %inactive, i16 signext %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vdupq_m_n_s16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r1
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vdupt.16 q0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %.splatinsert = insertelement <8 x i16> undef, i16 %a, i32 0
+  %.splat = shufflevector <8 x i16> %.splatinsert, <8 x i16> undef, <8 x i32> zeroinitializer
+  %2 = select <8 x i1> %1, <8 x i16> %.splat, <8 x i16> %inactive
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vdupq_m_n_s32(<4 x i32> %inactive, i32 %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vdupq_m_n_s32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r1
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vdupt.32 q0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %.splatinsert = insertelement <4 x i32> undef, i32 %a, i32 0
+  %.splat = shufflevector <4 x i32> %.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
+  %2 = select <4 x i1> %1, <4 x i32> %.splat, <4 x i32> %inactive
+  ret <4 x i32> %2
+}
+
+define arm_aapcs_vfpcc <16 x i8> @test_vdupq_m_n_u8(<16 x i8> %inactive, i8 zeroext %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vdupq_m_n_u8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r1
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vdupt.8 q0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
+  %.splatinsert = insertelement <16 x i8> undef, i8 %a, i32 0
+  %.splat = shufflevector <16 x i8> %.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
+  %2 = select <16 x i1> %1, <16 x i8> %.splat, <16 x i8> %inactive
+  ret <16 x i8> %2
+}
+
+define arm_aapcs_vfpcc <8 x i16> @test_vdupq_m_n_u16(<8 x i16> %inactive, i16 zeroext %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vdupq_m_n_u16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r1
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vdupt.16 q0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %.splatinsert = insertelement <8 x i16> undef, i16 %a, i32 0
+  %.splat = shufflevector <8 x i16> %.splatinsert, <8 x i16> undef, <8 x i32> zeroinitializer
+  %2 = select <8 x i1> %1, <8 x i16> %.splat, <8 x i16> %inactive
+  ret <8 x i16> %2
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_vdupq_m_n_u32(<4 x i32> %inactive, i32 %a, i16 zeroext %p) {
+; CHECK-LABEL: test_vdupq_m_n_u32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmsr p0, r1
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vdupt.32 q0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext i16 %p to i32
+  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %.splatinsert = insertelement <4 x i32> undef, i32 %a, i32 0
+  %.splat = shufflevector <4 x i32> %.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
+  %2 = select <4 x i1> %1, <4 x i32> %.splat, <4 x i32> %inactive
+  ret <4 x i32> %2
+}
+
+declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32)
+declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32)
+declare <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32)


        


More information about the cfe-commits mailing list