[llvm] [RISCV] Decompose locally repeating shuffles (without exact VLEN) (PR #125735)

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 4 19:10:31 PST 2025


https://github.com/preames updated https://github.com/llvm/llvm-project/pull/125735

>From 3527d8449cdec4486b5774826ab6f2c0dab39eea Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Mon, 3 Feb 2025 09:50:39 -0800
Subject: [PATCH 1/3] [RISCV] Decompose locally repeating shuffles (without
 exact VLEN)

High LMUL shuffles are expensive on typical SIMD implementations.  Without
exact vector length knowledge, we struggle to map elements within the vector
to the register within the vector register group.  However, there are
some patterns where we can perform a vector length agnostic (VLA) shuffle
by leveraging knowledge of the pattern performed even without the ability
to map individual elements to registers.  An existing in tree example
is vector reverse.

This patch introduces another such case.  Specifically, if we have a
shuffle where the a local rearrangement of elements is happening
within a 128b (really zvlNb) chunk, and we're applying the same pattern
to each chunk, we can decompose a high LMUL shuffle into a linear
number of m1 shuffles.  We take advantage of the fact the tail of the
operation is undefined, and repeat the pattern for all elements in the
source register group - not just the ones the fixed vector type covers.

This is an optimization for typical SIMD vrgather designs, but could
be a pessimation on hardware for which vrgather's execution cost is
not independent of the runtime VL.
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |  55 +++++++-
 .../rvv/fixed-vectors-shuffle-reverse.ll      | 131 ++++++++++--------
 .../RISCV/rvv/fixed-vectors-shuffle-rotate.ll | 124 +++++++++++++++--
 3 files changed, 235 insertions(+), 75 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 7c3b58389da28ee..d34de540332942a 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -5324,6 +5324,21 @@ static SDValue lowerDisjointIndicesShuffle(ShuffleVectorSDNode *SVN,
   return DAG.getVectorShuffle(VT, DL, Select, DAG.getUNDEF(VT), NewMask);
 }
 
+/// Is this mask local (i.e. elements only move within their local span), and
+/// repeating (that is, the same rearrangement is being done within each span)?
+static bool isLocalRepeatingShuffle(ArrayRef<int> Mask, int Span) {
+  // TODO: Could improve the case where undef elements exist in the first span.
+  for (auto [I, M] : enumerate(Mask)) {
+    if (M == -1)
+      continue;
+    int ChunkLo = I - (I % Span);
+    int ChunkHi = ChunkLo + Span;
+    if (M < ChunkLo || M >= ChunkHi || M - ChunkLo != Mask[I % Span])
+      return false;
+  }
+  return true;
+}
+
 /// Try to widen element type to get a new mask value for a better permutation
 /// sequence.  This doesn't try to inspect the widened mask for profitability;
 /// we speculate the widened form is equal or better.  This has the effect of
@@ -5685,10 +5700,42 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
                                  : DAG.getUNDEF(XLenVT));
     }
     SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
-    LHSIndices = convertToScalableVector(IndexContainerVT, LHSIndices, DAG,
-                                         Subtarget);
-    SDValue Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
-                                 DAG.getUNDEF(ContainerVT), TrueMask, VL);
+    LHSIndices =
+        convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
+
+    SDValue Gather;
+    // If we have a locally repeating mask, then we can reuse the first register
+    // in the index register group for all registers within the source register
+    // group.  TODO:: This generalizes to m2, and m4.  Also, this is currently
+    // picking up cases with a fully undef tail which could be more directly
+    // handled with fewer redundant vrgathers
+    const MVT M1VT = getLMUL1VT(ContainerVT);
+    auto VLMAX = RISCVTargetLowering::computeVLMAXBounds(M1VT, Subtarget).first;
+    if (ContainerVT.bitsGT(M1VT) && isLocalRepeatingShuffle(Mask, VLMAX)) {
+      EVT SubIndexVT = M1VT.changeVectorElementType(IndexVT.getScalarType());
+      SDValue SubIndex =
+          DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubIndexVT, LHSIndices,
+                      DAG.getVectorIdxConstant(0, DL));
+      auto [InnerTrueMask, InnerVL] =
+          getDefaultScalableVLOps(M1VT, DL, DAG, Subtarget);
+      int N = ContainerVT.getVectorMinNumElements() / M1VT.getVectorMinNumElements();
+      assert(isPowerOf2_32(N) && N <= 8);
+      Gather = DAG.getUNDEF(ContainerVT);
+      for (int i = 0; i < N; i++) {
+        SDValue SubIdx =
+            DAG.getVectorIdxConstant(M1VT.getVectorMinNumElements() * i, DL);
+        SDValue SubV1 =
+            DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, M1VT, V1, SubIdx);
+        SDValue SubVec =
+            DAG.getNode(GatherVVOpc, DL, M1VT, SubV1, SubIndex,
+                        DAG.getUNDEF(M1VT), InnerTrueMask, InnerVL);
+        Gather = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT, Gather,
+                             SubVec, SubIdx);
+      }
+    } else {
+      Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
+                           DAG.getUNDEF(ContainerVT), TrueMask, VL);
+    }
     return convertFromScalableVector(VT, Gather, DAG, Subtarget);
   }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
index 5fd7e47507f71e4..71a15077be6eb06 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
@@ -874,27 +874,30 @@ define <16 x i8> @reverse_v16i8_2(<8 x i8> %a, <8 x i8> %b) {
 define <32 x i8> @reverse_v32i8_2(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-LABEL: reverse_v32i8_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    vid.v v12
-; CHECK-NEXT:    addi a1, a0, -1
-; CHECK-NEXT:    vrsub.vx v12, v12, a1
-; CHECK-NEXT:    lui a1, 16
-; CHECK-NEXT:    addi a1, a1, -1
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vid.v v10
+; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    addi a2, a0, -1
+; CHECK-NEXT:    vrsub.vx v10, v10, a2
+; CHECK-NEXT:    lui a2, 16
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vrgatherei16.vv v15, v8, v12
-; CHECK-NEXT:    vrgatherei16.vv v14, v9, v12
+; CHECK-NEXT:    vrgatherei16.vv v15, v8, v10
+; CHECK-NEXT:    vrgatherei16.vv v14, v12, v10
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT:    vid.v v10
+; CHECK-NEXT:    addi a2, a2, -1
+; CHECK-NEXT:    vrsub.vi v10, v10, 15
+; CHECK-NEXT:    vsetvli a3, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vrgather.vv v17, v13, v10
+; CHECK-NEXT:    vrgather.vv v16, v9, v10
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vmv.s.x v0, a1
-; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    vmv.s.x v0, a2
 ; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    addi a0, a0, -32
-; CHECK-NEXT:    vrsub.vi v12, v8, 15
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v14, a0
-; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
 ; CHECK-NEXT:    ret
   %res = shufflevector <16 x i8> %a, <16 x i8> %b,  <32 x i32> <i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
   ret <32 x i8> %res
@@ -943,23 +946,25 @@ define <8 x i16> @reverse_v8i16_2(<4 x i16> %a, <4 x i16> %b) {
 define <16 x i16> @reverse_v16i16_2(<8 x i16> %a, <8 x i16> %b) {
 ; CHECK-LABEL: reverse_v16i16_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT:    vid.v v10
 ; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    vrsub.vi v10, v10, 7
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vrgather.vv v13, v12, v10
+; CHECK-NEXT:    vrgather.vv v12, v9, v10
 ; CHECK-NEXT:    vid.v v9
 ; CHECK-NEXT:    srli a1, a0, 1
 ; CHECK-NEXT:    addi a1, a1, -1
 ; CHECK-NEXT:    vrsub.vx v9, v9, a1
-; CHECK-NEXT:    vrgather.vv v13, v8, v9
-; CHECK-NEXT:    vrgather.vv v12, v11, v9
-; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
-; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    li a1, 255
 ; CHECK-NEXT:    addi a0, a0, -16
-; CHECK-NEXT:    vrsub.vi v14, v8, 7
+; CHECK-NEXT:    vrgather.vv v15, v8, v9
+; CHECK-NEXT:    vrgather.vv v14, v10, v9
 ; CHECK-NEXT:    vmv.s.x v0, a1
-; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    vrgather.vv v8, v10, v14, v0.t
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v14, a0
+; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
 ; CHECK-NEXT:    ret
   %res = shufflevector <8 x i16> %a, <8 x i16> %b,  <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
   ret <16 x i16> %res
@@ -1024,24 +1029,27 @@ define <4 x i32> @reverse_v4i32_2(<2 x i32> %a, < 2 x i32> %b) {
 define <8 x i32> @reverse_v8i32_2(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: reverse_v8i32_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vid.v v10
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    vid.v v9
-; CHECK-NEXT:    srli a1, a0, 2
-; CHECK-NEXT:    addi a1, a1, -1
-; CHECK-NEXT:    vrsub.vx v9, v9, a1
-; CHECK-NEXT:    vrgather.vv v13, v8, v9
-; CHECK-NEXT:    vrgather.vv v12, v11, v9
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vid.v v12
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    vmv.v.i v0, 15
+; CHECK-NEXT:    vrsub.vi v10, v10, 3
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v15, v11, v10
+; CHECK-NEXT:    vrgatherei16.vv v14, v9, v10
+; CHECK-NEXT:    srli a1, a0, 2
 ; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    vrsub.vi v14, v8, 3
+; CHECK-NEXT:    addi a1, a1, -1
 ; CHECK-NEXT:    addi a0, a0, -8
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vrsub.vx v10, v12, a1
+; CHECK-NEXT:    vrgather.vv v13, v8, v10
+; CHECK-NEXT:    vrgather.vv v12, v9, v10
+; CHECK-NEXT:    vmv.v.i v0, 15
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    vrgatherei16.vv v8, v10, v14, v0.t
+; CHECK-NEXT:    vmerge.vvm v8, v8, v14, v0
 ; CHECK-NEXT:    ret
   %res = shufflevector <4 x i32> %a, <4 x i32> %b, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
   ret <8 x i32> %res
@@ -1197,23 +1205,25 @@ define <8 x half> @reverse_v8f16_2(<4 x half> %a, <4 x half> %b) {
 define <16 x half> @reverse_v16f16_2(<8 x half> %a, <8 x half> %b) {
 ; CHECK-LABEL: reverse_v16f16_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT:    vid.v v10
 ; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    vrsub.vi v10, v10, 7
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vrgather.vv v13, v12, v10
+; CHECK-NEXT:    vrgather.vv v12, v9, v10
 ; CHECK-NEXT:    vid.v v9
 ; CHECK-NEXT:    srli a1, a0, 1
 ; CHECK-NEXT:    addi a1, a1, -1
 ; CHECK-NEXT:    vrsub.vx v9, v9, a1
-; CHECK-NEXT:    vrgather.vv v13, v8, v9
-; CHECK-NEXT:    vrgather.vv v12, v11, v9
-; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
-; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    li a1, 255
 ; CHECK-NEXT:    addi a0, a0, -16
-; CHECK-NEXT:    vrsub.vi v14, v8, 7
+; CHECK-NEXT:    vrgather.vv v15, v8, v9
+; CHECK-NEXT:    vrgather.vv v14, v10, v9
 ; CHECK-NEXT:    vmv.s.x v0, a1
-; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    vrgather.vv v8, v10, v14, v0.t
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v14, a0
+; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
 ; CHECK-NEXT:    ret
   %res = shufflevector <8 x half> %a, <8 x half> %b,  <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
   ret <16 x half> %res
@@ -1269,24 +1279,27 @@ define <4 x float> @reverse_v4f32_2(<2 x float> %a, <2 x float> %b) {
 define <8 x float> @reverse_v8f32_2(<4 x float> %a, <4 x float> %b) {
 ; CHECK-LABEL: reverse_v8f32_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vid.v v10
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    vid.v v9
-; CHECK-NEXT:    srli a1, a0, 2
-; CHECK-NEXT:    addi a1, a1, -1
-; CHECK-NEXT:    vrsub.vx v9, v9, a1
-; CHECK-NEXT:    vrgather.vv v13, v8, v9
-; CHECK-NEXT:    vrgather.vv v12, v11, v9
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vid.v v12
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    vmv.v.i v0, 15
+; CHECK-NEXT:    vrsub.vi v10, v10, 3
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v15, v11, v10
+; CHECK-NEXT:    vrgatherei16.vv v14, v9, v10
+; CHECK-NEXT:    srli a1, a0, 2
 ; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    vrsub.vi v14, v8, 3
+; CHECK-NEXT:    addi a1, a1, -1
 ; CHECK-NEXT:    addi a0, a0, -8
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vrsub.vx v10, v12, a1
+; CHECK-NEXT:    vrgather.vv v13, v8, v10
+; CHECK-NEXT:    vrgather.vv v12, v9, v10
+; CHECK-NEXT:    vmv.v.i v0, 15
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    vrgatherei16.vv v8, v10, v14, v0.t
+; CHECK-NEXT:    vmerge.vvm v8, v8, v14, v0
 ; CHECK-NEXT:    ret
   %res = shufflevector <4 x float> %a, <4 x float> %b, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
   ret <8 x float> %res
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll
index 464b4eca35aba0a..86d8a275a905508 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll
@@ -515,8 +515,10 @@ define <8 x i16> @shuffle_v8i16_as_i64_16(<8 x i16> %v) {
 ; ZVKB-ZVE32X-NEXT:    vsetivli zero, 8, e16, m2, ta, ma
 ; ZVKB-ZVE32X-NEXT:    vle8.v v10, (a0)
 ; ZVKB-ZVE32X-NEXT:    vsext.vf2 v12, v10
+; ZVKB-ZVE32X-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVKB-ZVE32X-NEXT:    vrgather.vv v11, v9, v12
 ; ZVKB-ZVE32X-NEXT:    vrgather.vv v10, v8, v12
-; ZVKB-ZVE32X-NEXT:    vmv.v.v v8, v10
+; ZVKB-ZVE32X-NEXT:    vmv2r.v v8, v10
 ; ZVKB-ZVE32X-NEXT:    ret
   %shuffle = shufflevector <8 x i16> %v, <8 x i16> poison, <8 x i32> <i32 1, i32 2, i32 3, i32 0, i32 5, i32 6, i32 7, i32 4>
   ret <8 x i16> %shuffle
@@ -562,9 +564,10 @@ define <8 x i16> @shuffle_v8i16_as_i64_32(<8 x i16> %v) {
 ; ZVKB-ZVE32X-NEXT:    vmv.s.x v10, a0
 ; ZVKB-ZVE32X-NEXT:    vsetivli zero, 4, e16, m1, ta, ma
 ; ZVKB-ZVE32X-NEXT:    vsext.vf2 v12, v10
-; ZVKB-ZVE32X-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVKB-ZVE32X-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; ZVKB-ZVE32X-NEXT:    vrgatherei16.vv v11, v9, v12
 ; ZVKB-ZVE32X-NEXT:    vrgatherei16.vv v10, v8, v12
-; ZVKB-ZVE32X-NEXT:    vmv.v.v v8, v10
+; ZVKB-ZVE32X-NEXT:    vmv2r.v v8, v10
 ; ZVKB-ZVE32X-NEXT:    ret
   %shuffle = shufflevector <8 x i16> %v, <8 x i16> poison, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 6, i32 7, i32 4, i32 5>
   ret <8 x i16> %shuffle
@@ -609,8 +612,10 @@ define <8 x i16> @shuffle_v8i16_as_i64_48(<8 x i16> %v) {
 ; ZVKB-ZVE32X-NEXT:    vsetivli zero, 8, e16, m2, ta, ma
 ; ZVKB-ZVE32X-NEXT:    vle8.v v10, (a0)
 ; ZVKB-ZVE32X-NEXT:    vsext.vf2 v12, v10
+; ZVKB-ZVE32X-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVKB-ZVE32X-NEXT:    vrgather.vv v11, v9, v12
 ; ZVKB-ZVE32X-NEXT:    vrgather.vv v10, v8, v12
-; ZVKB-ZVE32X-NEXT:    vmv.v.v v8, v10
+; ZVKB-ZVE32X-NEXT:    vmv2r.v v8, v10
 ; ZVKB-ZVE32X-NEXT:    ret
   %shuffle = shufflevector <8 x i16> %v, <8 x i16> poison, <8 x i32> <i32 3, i32 0, i32 1, i32 2, i32 7, i32 4, i32 5, i32 6>
   ret <8 x i16> %shuffle
@@ -655,9 +660,12 @@ define <8 x i32> @shuffle_v8i32_as_i64(<8 x i32> %v) {
 ; ZVKB-ZVE32X-NEXT:    vsetivli zero, 8, e16, m2, ta, ma
 ; ZVKB-ZVE32X-NEXT:    vle8.v v12, (a0)
 ; ZVKB-ZVE32X-NEXT:    vsext.vf2 v16, v12
-; ZVKB-ZVE32X-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; ZVKB-ZVE32X-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; ZVKB-ZVE32X-NEXT:    vrgatherei16.vv v13, v9, v16
 ; ZVKB-ZVE32X-NEXT:    vrgatherei16.vv v12, v8, v16
-; ZVKB-ZVE32X-NEXT:    vmv.v.v v8, v12
+; ZVKB-ZVE32X-NEXT:    vrgatherei16.vv v14, v10, v16
+; ZVKB-ZVE32X-NEXT:    vrgatherei16.vv v15, v11, v16
+; ZVKB-ZVE32X-NEXT:    vmv4r.v v8, v12
 ; ZVKB-ZVE32X-NEXT:    ret
   %shuffle = shufflevector <8 x i32> %v, <8 x i32> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
   ret <8 x i32> %shuffle
@@ -726,8 +734,10 @@ define <8 x half> @shuffle_v8f16_as_i64_16(<8 x half> %v) {
 ; ZVKB-ZVE32X-NEXT:    vsetivli zero, 8, e16, m2, ta, ma
 ; ZVKB-ZVE32X-NEXT:    vle8.v v10, (a0)
 ; ZVKB-ZVE32X-NEXT:    vsext.vf2 v12, v10
+; ZVKB-ZVE32X-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVKB-ZVE32X-NEXT:    vrgather.vv v11, v9, v12
 ; ZVKB-ZVE32X-NEXT:    vrgather.vv v10, v8, v12
-; ZVKB-ZVE32X-NEXT:    vmv.v.v v8, v10
+; ZVKB-ZVE32X-NEXT:    vmv2r.v v8, v10
 ; ZVKB-ZVE32X-NEXT:    ret
   %shuffle = shufflevector <8 x half> %v, <8 x half> poison, <8 x i32> <i32 1, i32 2, i32 3, i32 0, i32 5, i32 6, i32 7, i32 4>
   ret <8 x half> %shuffle
@@ -773,9 +783,10 @@ define <8 x half> @shuffle_v8f16_as_i64_32(<8 x half> %v) {
 ; ZVKB-ZVE32X-NEXT:    vmv.s.x v10, a0
 ; ZVKB-ZVE32X-NEXT:    vsetivli zero, 4, e16, m1, ta, ma
 ; ZVKB-ZVE32X-NEXT:    vsext.vf2 v12, v10
-; ZVKB-ZVE32X-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVKB-ZVE32X-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; ZVKB-ZVE32X-NEXT:    vrgatherei16.vv v11, v9, v12
 ; ZVKB-ZVE32X-NEXT:    vrgatherei16.vv v10, v8, v12
-; ZVKB-ZVE32X-NEXT:    vmv.v.v v8, v10
+; ZVKB-ZVE32X-NEXT:    vmv2r.v v8, v10
 ; ZVKB-ZVE32X-NEXT:    ret
   %shuffle = shufflevector <8 x half> %v, <8 x half> poison, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 6, i32 7, i32 4, i32 5>
   ret <8 x half> %shuffle
@@ -820,8 +831,10 @@ define <8 x half> @shuffle_v8f16_as_i64_48(<8 x half> %v) {
 ; ZVKB-ZVE32X-NEXT:    vsetivli zero, 8, e16, m2, ta, ma
 ; ZVKB-ZVE32X-NEXT:    vle8.v v10, (a0)
 ; ZVKB-ZVE32X-NEXT:    vsext.vf2 v12, v10
+; ZVKB-ZVE32X-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVKB-ZVE32X-NEXT:    vrgather.vv v11, v9, v12
 ; ZVKB-ZVE32X-NEXT:    vrgather.vv v10, v8, v12
-; ZVKB-ZVE32X-NEXT:    vmv.v.v v8, v10
+; ZVKB-ZVE32X-NEXT:    vmv2r.v v8, v10
 ; ZVKB-ZVE32X-NEXT:    ret
   %shuffle = shufflevector <8 x half> %v, <8 x half> poison, <8 x i32> <i32 3, i32 0, i32 1, i32 2, i32 7, i32 4, i32 5, i32 6>
   ret <8 x half> %shuffle
@@ -866,9 +879,12 @@ define <8 x float> @shuffle_v8f32_as_i64(<8 x float> %v) {
 ; ZVKB-ZVE32X-NEXT:    vsetivli zero, 8, e16, m2, ta, ma
 ; ZVKB-ZVE32X-NEXT:    vle8.v v12, (a0)
 ; ZVKB-ZVE32X-NEXT:    vsext.vf2 v16, v12
-; ZVKB-ZVE32X-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; ZVKB-ZVE32X-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; ZVKB-ZVE32X-NEXT:    vrgatherei16.vv v13, v9, v16
 ; ZVKB-ZVE32X-NEXT:    vrgatherei16.vv v12, v8, v16
-; ZVKB-ZVE32X-NEXT:    vmv.v.v v8, v12
+; ZVKB-ZVE32X-NEXT:    vrgatherei16.vv v14, v10, v16
+; ZVKB-ZVE32X-NEXT:    vrgatherei16.vv v15, v11, v16
+; ZVKB-ZVE32X-NEXT:    vmv4r.v v8, v12
 ; ZVKB-ZVE32X-NEXT:    ret
   %shuffle = shufflevector <8 x float> %v, <8 x float> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
   ret <8 x float> %shuffle
@@ -920,3 +936,87 @@ define <8 x float> @shuffle_v8f32_as_i64_exact(<8 x float> %v) vscale_range(2,2)
   %shuffle = shufflevector <8 x float> %v, <8 x float> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
   ret <8 x float> %shuffle
 }
+
+define <8 x i64> @shuffle_v8i64_as_i128(<8 x i64> %v) {
+; CHECK-LABEL: shuffle_v8i64_as_i128:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI29_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI29_0)
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vle16.v v16, (a0)
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v13, v9, v16
+; CHECK-NEXT:    vrgatherei16.vv v12, v8, v16
+; CHECK-NEXT:    vrgatherei16.vv v14, v10, v16
+; CHECK-NEXT:    vrgatherei16.vv v15, v11, v16
+; CHECK-NEXT:    vmv4r.v v8, v12
+; CHECK-NEXT:    ret
+;
+; ZVKB-V-LABEL: shuffle_v8i64_as_i128:
+; ZVKB-V:       # %bb.0:
+; ZVKB-V-NEXT:    lui a0, %hi(.LCPI29_0)
+; ZVKB-V-NEXT:    addi a0, a0, %lo(.LCPI29_0)
+; ZVKB-V-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVKB-V-NEXT:    vle16.v v16, (a0)
+; ZVKB-V-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; ZVKB-V-NEXT:    vrgatherei16.vv v13, v9, v16
+; ZVKB-V-NEXT:    vrgatherei16.vv v12, v8, v16
+; ZVKB-V-NEXT:    vrgatherei16.vv v14, v10, v16
+; ZVKB-V-NEXT:    vrgatherei16.vv v15, v11, v16
+; ZVKB-V-NEXT:    vmv4r.v v8, v12
+; ZVKB-V-NEXT:    ret
+  %shuffle = shufflevector <8 x i64> %v, <8 x i64> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+  ret <8 x i64> %shuffle
+}
+
+define <8 x i64> @shuffle_v8i64_as_i256(<8 x i64> %v) {
+; CHECK-LABEL: shuffle_v8i64_as_i256:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI30_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI30_0)
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT:    vle16.v v16, (a0)
+; CHECK-NEXT:    vrgatherei16.vv v12, v8, v16
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+;
+; ZVKB-V-LABEL: shuffle_v8i64_as_i256:
+; ZVKB-V:       # %bb.0:
+; ZVKB-V-NEXT:    lui a0, %hi(.LCPI30_0)
+; ZVKB-V-NEXT:    addi a0, a0, %lo(.LCPI30_0)
+; ZVKB-V-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; ZVKB-V-NEXT:    vle16.v v16, (a0)
+; ZVKB-V-NEXT:    vrgatherei16.vv v12, v8, v16
+; ZVKB-V-NEXT:    vmv.v.v v8, v12
+; ZVKB-V-NEXT:    ret
+  %shuffle = shufflevector <8 x i64> %v, <8 x i64> poison, <8 x i32> <i32 2, i32 1, i32 0, i32 3, i32 6, i32 5, i32 4, i32 7>
+  ret <8 x i64> %shuffle
+}
+
+define <8 x i64> @shuffle_v8i64_as_i256_zvl256b(<8 x i64> %v) vscale_range(4,0) {
+; CHECK-LABEL: shuffle_v8i64_as_i256_zvl256b:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, %hi(.LCPI31_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI31_0)
+; CHECK-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; CHECK-NEXT:    vle16.v v12, (a0)
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v11, v9, v12
+; CHECK-NEXT:    vrgatherei16.vv v10, v8, v12
+; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    ret
+;
+; ZVKB-V-LABEL: shuffle_v8i64_as_i256_zvl256b:
+; ZVKB-V:       # %bb.0:
+; ZVKB-V-NEXT:    lui a0, %hi(.LCPI31_0)
+; ZVKB-V-NEXT:    addi a0, a0, %lo(.LCPI31_0)
+; ZVKB-V-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVKB-V-NEXT:    vle16.v v12, (a0)
+; ZVKB-V-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; ZVKB-V-NEXT:    vrgatherei16.vv v11, v9, v12
+; ZVKB-V-NEXT:    vrgatherei16.vv v10, v8, v12
+; ZVKB-V-NEXT:    vmv2r.v v8, v10
+; ZVKB-V-NEXT:    ret
+  %shuffle = shufflevector <8 x i64> %v, <8 x i64> poison, <8 x i32> <i32 2, i32 1, i32 0, i32 3, i32 6, i32 5, i32 4, i32 7>
+  ret <8 x i64> %shuffle
+}

>From 3c6e0d2c6500b2c65b8e2bedb572ccd721cc427c Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Tue, 4 Feb 2025 11:29:40 -0800
Subject: [PATCH 2/3] Address review comment

---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index d34de540332942a..5653f13c110d9c8 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -5706,7 +5706,7 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
     SDValue Gather;
     // If we have a locally repeating mask, then we can reuse the first register
     // in the index register group for all registers within the source register
-    // group.  TODO:: This generalizes to m2, and m4.  Also, this is currently
+    // group.  TODO: This generalizes to m2, and m4.  Also, this is currently
     // picking up cases with a fully undef tail which could be more directly
     // handled with fewer redundant vrgathers
     const MVT M1VT = getLMUL1VT(ContainerVT);

>From ed04dea16fded817b57f2fc28f7bfcc0c003e88a Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Tue, 4 Feb 2025 19:05:44 -0800
Subject: [PATCH 3/3] clang-format

---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 5653f13c110d9c8..1b507c27db1e6a4 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -5718,7 +5718,8 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
                       DAG.getVectorIdxConstant(0, DL));
       auto [InnerTrueMask, InnerVL] =
           getDefaultScalableVLOps(M1VT, DL, DAG, Subtarget);
-      int N = ContainerVT.getVectorMinNumElements() / M1VT.getVectorMinNumElements();
+      int N = ContainerVT.getVectorMinNumElements() /
+              M1VT.getVectorMinNumElements();
       assert(isPowerOf2_32(N) && N <= 8);
       Gather = DAG.getUNDEF(ContainerVT);
       for (int i = 0; i < N; i++) {



More information about the llvm-commits mailing list