[llvm] [IA] Recognize repeated masks which come from shuffle vectors (PR #150285)

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 24 21:01:24 PDT 2025


https://github.com/preames updated https://github.com/llvm/llvm-project/pull/150285

>From a8d240f71b62dc753842e0ae74c0b98f8245e368 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Wed, 23 Jul 2025 11:11:52 -0700
Subject: [PATCH 1/4] [IA] Recognize repeated masks which come from shuffle
 vectors

This extends the fixed vector lowering to support the case where the
mask is formed via shufflevector idiom.
---
 llvm/lib/CodeGen/InterleavedAccessPass.cpp    |  13 +++
 .../rvv/fixed-vectors-interleaved-access.ll   | 105 ++++++++++++------
 2 files changed, 82 insertions(+), 36 deletions(-)

diff --git a/llvm/lib/CodeGen/InterleavedAccessPass.cpp b/llvm/lib/CodeGen/InterleavedAccessPass.cpp
index 1b691881d67dd..420af717f120d 100644
--- a/llvm/lib/CodeGen/InterleavedAccessPass.cpp
+++ b/llvm/lib/CodeGen/InterleavedAccessPass.cpp
@@ -564,6 +564,19 @@ static Value *getMask(Value *WideMask, unsigned Factor,
     }
   }
 
+  if (auto *SVI = dyn_cast<ShuffleVectorInst>(WideMask)) {
+    unsigned LeafMaskLen = LeafValueEC.getFixedValue();
+    if (SVI->isInterleave(Factor) &&
+        llvm::all_of(SVI->getShuffleMask(),
+                     [&](int Idx) { return Idx < (int)LeafMaskLen; })) {
+      auto *LeafMaskTy =
+          FixedVectorType::get(Type::getInt1Ty(SVI->getContext()), LeafMaskLen);
+      IRBuilder<> Builder(SVI);
+      return Builder.CreateExtractVector(LeafMaskTy, SVI->getOperand(0),
+                                         uint64_t(0));
+    }
+  }
+
   return nullptr;
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
index 7274e1bb59b92..fbed742296cf6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
@@ -205,6 +205,21 @@ define {<4 x i32>, <4 x i32>} @vpload_factor2_interleaved_mask_intrinsic(ptr %pt
   ret {<4 x i32>, <4 x i32>} %res1
 }
 
+define {<4 x i32>, <4 x i32>} @vpload_factor2_interleaved_mask_shuffle(ptr %ptr, <4 x i1> %m) {
+; CHECK-LABEL: vpload_factor2_interleaved_mask_shuffle:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vlseg2e32.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %interleaved.mask = shufflevector <4 x i1> %m, <4 x i1> poison, <8 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3>
+  %interleaved.vec = tail call <8 x i32> @llvm.vp.load.v8i32.p0(ptr %ptr, <8 x i1> %interleaved.mask, i32 8)
+  %v0 = shufflevector <8 x i32> %interleaved.vec, <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %v1 = shufflevector <8 x i32> %interleaved.vec, <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %res0 = insertvalue {<4 x i32>, <4 x i32>} undef, <4 x i32> %v0, 0
+  %res1 = insertvalue {<4 x i32>, <4 x i32>} %res0, <4 x i32> %v1, 1
+  ret {<4 x i32>, <4 x i32>} %res1
+}
+
 define {<4 x i32>, <4 x i32>, <4 x i32>} @vpload_factor3(ptr %ptr) {
 ; CHECK-LABEL: vpload_factor3:
 ; CHECK:       # %bb.0:
@@ -437,8 +452,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV32-NEXT:    li a2, 32
 ; RV32-NEXT:    lui a3, 12
 ; RV32-NEXT:    lui a6, 12291
-; RV32-NEXT:    lui a7, %hi(.LCPI21_0)
-; RV32-NEXT:    addi a7, a7, %lo(.LCPI21_0)
+; RV32-NEXT:    lui a7, %hi(.LCPI22_0)
+; RV32-NEXT:    addi a7, a7, %lo(.LCPI22_0)
 ; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
 ; RV32-NEXT:    vle32.v v24, (a5)
 ; RV32-NEXT:    vmv.s.x v0, a3
@@ -523,12 +538,12 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV32-NEXT:    addi a1, a1, 16
 ; RV32-NEXT:    vs4r.v v8, (a1) # vscale x 32-byte Folded Spill
 ; RV32-NEXT:    lui a7, 49164
-; RV32-NEXT:    lui a1, %hi(.LCPI21_1)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI21_1)
+; RV32-NEXT:    lui a1, %hi(.LCPI22_1)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI22_1)
 ; RV32-NEXT:    lui t2, 3
 ; RV32-NEXT:    lui t1, 196656
-; RV32-NEXT:    lui a4, %hi(.LCPI21_3)
-; RV32-NEXT:    addi a4, a4, %lo(.LCPI21_3)
+; RV32-NEXT:    lui a4, %hi(.LCPI22_3)
+; RV32-NEXT:    addi a4, a4, %lo(.LCPI22_3)
 ; RV32-NEXT:    lui t0, 786624
 ; RV32-NEXT:    li a5, 48
 ; RV32-NEXT:    lui a6, 768
@@ -707,8 +722,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV32-NEXT:    vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32-NEXT:    vrgatherei16.vv v24, v8, v2
-; RV32-NEXT:    lui a1, %hi(.LCPI21_2)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI21_2)
+; RV32-NEXT:    lui a1, %hi(.LCPI22_2)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI22_2)
 ; RV32-NEXT:    lui a3, 3073
 ; RV32-NEXT:    addi a3, a3, -1024
 ; RV32-NEXT:    vmv.s.x v0, a3
@@ -772,16 +787,16 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV32-NEXT:    vrgatherei16.vv v28, v8, v3
 ; RV32-NEXT:    vsetivli zero, 10, e32, m4, tu, ma
 ; RV32-NEXT:    vmv.v.v v28, v24
-; RV32-NEXT:    lui a1, %hi(.LCPI21_4)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI21_4)
-; RV32-NEXT:    lui a2, %hi(.LCPI21_5)
-; RV32-NEXT:    addi a2, a2, %lo(.LCPI21_5)
+; RV32-NEXT:    lui a1, %hi(.LCPI22_4)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI22_4)
+; RV32-NEXT:    lui a2, %hi(.LCPI22_5)
+; RV32-NEXT:    addi a2, a2, %lo(.LCPI22_5)
 ; RV32-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; RV32-NEXT:    vle16.v v24, (a2)
 ; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; RV32-NEXT:    vle16.v v8, (a1)
-; RV32-NEXT:    lui a1, %hi(.LCPI21_7)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI21_7)
+; RV32-NEXT:    lui a1, %hi(.LCPI22_7)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI22_7)
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32-NEXT:    vle16.v v10, (a1)
 ; RV32-NEXT:    csrr a1, vlenb
@@ -809,14 +824,14 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV32-NEXT:    vl8r.v v0, (a1) # vscale x 64-byte Folded Reload
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32-NEXT:    vrgatherei16.vv v16, v0, v10
-; RV32-NEXT:    lui a1, %hi(.LCPI21_6)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI21_6)
-; RV32-NEXT:    lui a2, %hi(.LCPI21_8)
-; RV32-NEXT:    addi a2, a2, %lo(.LCPI21_8)
+; RV32-NEXT:    lui a1, %hi(.LCPI22_6)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI22_6)
+; RV32-NEXT:    lui a2, %hi(.LCPI22_8)
+; RV32-NEXT:    addi a2, a2, %lo(.LCPI22_8)
 ; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; RV32-NEXT:    vle16.v v4, (a1)
-; RV32-NEXT:    lui a1, %hi(.LCPI21_9)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI21_9)
+; RV32-NEXT:    lui a1, %hi(.LCPI22_9)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI22_9)
 ; RV32-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; RV32-NEXT:    vle16.v v6, (a1)
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
@@ -903,8 +918,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV64-NEXT:    li a4, 128
 ; RV64-NEXT:    lui a1, 1
 ; RV64-NEXT:    vle64.v v8, (a3)
-; RV64-NEXT:    lui a3, %hi(.LCPI21_0)
-; RV64-NEXT:    addi a3, a3, %lo(.LCPI21_0)
+; RV64-NEXT:    lui a3, %hi(.LCPI22_0)
+; RV64-NEXT:    addi a3, a3, %lo(.LCPI22_0)
 ; RV64-NEXT:    vmv.s.x v0, a4
 ; RV64-NEXT:    csrr a4, vlenb
 ; RV64-NEXT:    li a5, 61
@@ -1092,8 +1107,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV64-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV64-NEXT:    vslideup.vi v12, v16, 1, v0.t
-; RV64-NEXT:    lui a2, %hi(.LCPI21_1)
-; RV64-NEXT:    addi a2, a2, %lo(.LCPI21_1)
+; RV64-NEXT:    lui a2, %hi(.LCPI22_1)
+; RV64-NEXT:    addi a2, a2, %lo(.LCPI22_1)
 ; RV64-NEXT:    li a3, 192
 ; RV64-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; RV64-NEXT:    vle16.v v6, (a2)
@@ -1127,8 +1142,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV64-NEXT:    vrgatherei16.vv v24, v16, v6
 ; RV64-NEXT:    addi a2, sp, 16
 ; RV64-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
-; RV64-NEXT:    lui a2, %hi(.LCPI21_2)
-; RV64-NEXT:    addi a2, a2, %lo(.LCPI21_2)
+; RV64-NEXT:    lui a2, %hi(.LCPI22_2)
+; RV64-NEXT:    addi a2, a2, %lo(.LCPI22_2)
 ; RV64-NEXT:    li a3, 1040
 ; RV64-NEXT:    vmv.s.x v0, a3
 ; RV64-NEXT:    addi a1, a1, -2016
@@ -1212,12 +1227,12 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
 ; RV64-NEXT:    vs4r.v v8, (a1) # vscale x 32-byte Folded Spill
-; RV64-NEXT:    lui a1, %hi(.LCPI21_3)
-; RV64-NEXT:    addi a1, a1, %lo(.LCPI21_3)
+; RV64-NEXT:    lui a1, %hi(.LCPI22_3)
+; RV64-NEXT:    addi a1, a1, %lo(.LCPI22_3)
 ; RV64-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; RV64-NEXT:    vle16.v v20, (a1)
-; RV64-NEXT:    lui a1, %hi(.LCPI21_4)
-; RV64-NEXT:    addi a1, a1, %lo(.LCPI21_4)
+; RV64-NEXT:    lui a1, %hi(.LCPI22_4)
+; RV64-NEXT:    addi a1, a1, %lo(.LCPI22_4)
 ; RV64-NEXT:    vle16.v v8, (a1)
 ; RV64-NEXT:    csrr a1, vlenb
 ; RV64-NEXT:    li a2, 77
@@ -1268,8 +1283,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV64-NEXT:    vl2r.v v8, (a1) # vscale x 16-byte Folded Reload
 ; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64-NEXT:    vrgatherei16.vv v0, v16, v8
-; RV64-NEXT:    lui a1, %hi(.LCPI21_5)
-; RV64-NEXT:    addi a1, a1, %lo(.LCPI21_5)
+; RV64-NEXT:    lui a1, %hi(.LCPI22_5)
+; RV64-NEXT:    addi a1, a1, %lo(.LCPI22_5)
 ; RV64-NEXT:    vle16.v v20, (a1)
 ; RV64-NEXT:    csrr a1, vlenb
 ; RV64-NEXT:    li a2, 61
@@ -1586,6 +1601,24 @@ define void @vpstore_factor7(ptr %ptr, <2 x i16> %v0, <2 x i16> %v1, <2 x i16> %
   ret void
 }
 
+define void @vpstore_factor7_masked(ptr %ptr, <2 x i16> %v0, <2 x i16> %v1, <2 x i16> %v2, <2 x i16> %v3, <2 x i16> %v4, <2 x i16> %v5, <2 x i16> %v6, <2 x i1> %m) {
+; CHECK-LABEL: vpstore_factor7_masked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vsseg7e16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %interleaved.mask = shufflevector <2 x i1> %m, <2 x i1> poison, <14 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %s0 = shufflevector <2 x i16> %v0, <2 x i16> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %s1 = shufflevector <2 x i16> %v2, <2 x i16> %v3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %s2 = shufflevector <2 x i16> %v4, <2 x i16> %v5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %s3 = shufflevector <4 x i16> %s0, <4 x i16> %s1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %s4 = shufflevector <2 x i16> %v6, <2 x i16> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+  %s5 = shufflevector <4 x i16> %s2, <4 x i16> %s4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 undef, i32 undef>
+  %interleaved.vec = shufflevector <8 x i16> %s3, <8 x i16> %s5, <14 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13>
+  tail call void @llvm.vp.store.v14i16.p0(<14 x i16> %interleaved.vec, ptr %ptr, <14 x i1> %interleaved.mask, i32 14)
+  ret void
+}
+
 define void @vpstore_factor8(ptr %ptr, <2 x i16> %v0, <2 x i16> %v1, <2 x i16> %v2, <2 x i16> %v3, <2 x i16> %v4, <2 x i16> %v5, <2 x i16> %v6, <2 x i16> %v7) {
 ; CHECK-LABEL: vpstore_factor8:
 ; CHECK:       # %bb.0:
@@ -1867,8 +1900,8 @@ define {<4 x i32>, <4 x i32>, <4 x i32>} @invalid_vp_mask(ptr %ptr) {
 ; RV32-NEXT:    vle32.v v12, (a0), v0.t
 ; RV32-NEXT:    li a0, 36
 ; RV32-NEXT:    vmv.s.x v20, a1
-; RV32-NEXT:    lui a1, %hi(.LCPI56_0)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI56_0)
+; RV32-NEXT:    lui a1, %hi(.LCPI58_0)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI58_0)
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV32-NEXT:    vle16.v v21, (a1)
 ; RV32-NEXT:    vcompress.vm v8, v12, v11
@@ -1943,8 +1976,8 @@ define {<4 x i32>, <4 x i32>, <4 x i32>} @invalid_vp_evl(ptr %ptr) {
 ; RV32-NEXT:    vmv.s.x v10, a0
 ; RV32-NEXT:    li a0, 146
 ; RV32-NEXT:    vmv.s.x v11, a0
-; RV32-NEXT:    lui a0, %hi(.LCPI57_0)
-; RV32-NEXT:    addi a0, a0, %lo(.LCPI57_0)
+; RV32-NEXT:    lui a0, %hi(.LCPI59_0)
+; RV32-NEXT:    addi a0, a0, %lo(.LCPI59_0)
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV32-NEXT:    vle16.v v20, (a0)
 ; RV32-NEXT:    li a0, 36

>From f22b72d71fd7318bd13929a0f4eccc067849f426 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Thu, 24 Jul 2025 10:37:17 -0700
Subject: [PATCH 2/4] address review comment

---
 llvm/lib/CodeGen/InterleavedAccessPass.cpp    | 15 ++-
 .../rvv/fixed-vectors-interleaved-access.ll   | 99 ++++++++++++-------
 2 files changed, 74 insertions(+), 40 deletions(-)

diff --git a/llvm/lib/CodeGen/InterleavedAccessPass.cpp b/llvm/lib/CodeGen/InterleavedAccessPass.cpp
index 3d11a0a326f69..31e44c933ae74 100644
--- a/llvm/lib/CodeGen/InterleavedAccessPass.cpp
+++ b/llvm/lib/CodeGen/InterleavedAccessPass.cpp
@@ -588,12 +588,19 @@ static Value *getMask(Value *WideMask, unsigned Factor,
   }
 
   if (auto *SVI = dyn_cast<ShuffleVectorInst>(WideMask)) {
-    unsigned LeafMaskLen = LeafValueEC.getFixedValue();
-    if (SVI->isInterleave(Factor) &&
+    // Check that the shuffle mask is: a) an interleave, b) all of the same
+    // set of the elements, and c) contained by the first source.  (c) could
+    // be relaxed if desired.
+    unsigned NumSrcElts =
+        cast<FixedVectorType>(SVI->getOperand(1)->getType())->getNumElements();
+    SmallVector<unsigned> StartIndexes;
+    if (ShuffleVectorInst::isInterleaveMask(SVI->getShuffleMask(), Factor,
+                                            NumSrcElts * 2, StartIndexes) &&
+        llvm::all_of(StartIndexes, [](unsigned Start) { return Start == 0; }) &&
         llvm::all_of(SVI->getShuffleMask(),
-                     [&](int Idx) { return Idx < (int)LeafMaskLen; })) {
+                     [&](int Idx) { return Idx < (int)NumSrcElts; })) {
       auto *LeafMaskTy =
-          FixedVectorType::get(Type::getInt1Ty(SVI->getContext()), LeafMaskLen);
+          VectorType::get(Type::getInt1Ty(SVI->getContext()), LeafValueEC);
       IRBuilder<> Builder(SVI);
       return Builder.CreateExtractVector(LeafMaskTy, SVI->getOperand(0),
                                          uint64_t(0));
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
index 9f9a60e3f4661..9694912816ad5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
@@ -220,6 +220,33 @@ define {<4 x i32>, <4 x i32>} @vpload_factor2_interleaved_mask_shuffle(ptr %ptr,
   ret {<4 x i32>, <4 x i32>} %res1
 }
 
+define {<4 x i32>, <4 x i32>} @vpload_factor2_interleaved_mask_shuffle2(ptr %ptr, <2 x i1> %m) {
+; CHECK-LABEL: vpload_factor2_interleaved_mask_shuffle2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    li a1, -1
+; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT:    vwaddu.vv v9, v8, v8
+; CHECK-NEXT:    vwmaccu.vx v9, a1, v8
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, ma
+; CHECK-NEXT:    vmsne.vi v0, v9, 0
+; CHECK-NEXT:    vle32.v v10, (a0), v0.t
+; CHECK-NEXT:    li a0, 32
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vnsrl.wx v9, v10, a0
+; CHECK-NEXT:    ret
+  %interleaved.mask = shufflevector <2 x i1> %m, <2 x i1> poison, <8 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3>
+  %interleaved.vec = tail call <8 x i32> @llvm.vp.load.v8i32.p0(ptr %ptr, <8 x i1> %interleaved.mask, i32 4)
+  %v0 = shufflevector <8 x i32> %interleaved.vec, <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %v1 = shufflevector <8 x i32> %interleaved.vec, <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %res0 = insertvalue {<4 x i32>, <4 x i32>} undef, <4 x i32> %v0, 0
+  %res1 = insertvalue {<4 x i32>, <4 x i32>} %res0, <4 x i32> %v1, 1
+  ret {<4 x i32>, <4 x i32>} %res1
+}
+
 define {<4 x i32>, <4 x i32>, <4 x i32>} @vpload_factor3(ptr %ptr) {
 ; CHECK-LABEL: vpload_factor3:
 ; CHECK:       # %bb.0:
@@ -452,8 +479,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV32-NEXT:    li a2, 32
 ; RV32-NEXT:    lui a3, 12
 ; RV32-NEXT:    lui a6, 12291
-; RV32-NEXT:    lui a7, %hi(.LCPI22_0)
-; RV32-NEXT:    addi a7, a7, %lo(.LCPI22_0)
+; RV32-NEXT:    lui a7, %hi(.LCPI23_0)
+; RV32-NEXT:    addi a7, a7, %lo(.LCPI23_0)
 ; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
 ; RV32-NEXT:    vle32.v v24, (a5)
 ; RV32-NEXT:    vmv.s.x v0, a3
@@ -538,12 +565,12 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV32-NEXT:    addi a1, a1, 16
 ; RV32-NEXT:    vs4r.v v8, (a1) # vscale x 32-byte Folded Spill
 ; RV32-NEXT:    lui a7, 49164
-; RV32-NEXT:    lui a1, %hi(.LCPI22_1)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI22_1)
+; RV32-NEXT:    lui a1, %hi(.LCPI23_1)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI23_1)
 ; RV32-NEXT:    lui t2, 3
 ; RV32-NEXT:    lui t1, 196656
-; RV32-NEXT:    lui a4, %hi(.LCPI22_3)
-; RV32-NEXT:    addi a4, a4, %lo(.LCPI22_3)
+; RV32-NEXT:    lui a4, %hi(.LCPI23_3)
+; RV32-NEXT:    addi a4, a4, %lo(.LCPI23_3)
 ; RV32-NEXT:    lui t0, 786624
 ; RV32-NEXT:    li a5, 48
 ; RV32-NEXT:    lui a6, 768
@@ -722,8 +749,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV32-NEXT:    vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32-NEXT:    vrgatherei16.vv v24, v8, v2
-; RV32-NEXT:    lui a1, %hi(.LCPI22_2)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI22_2)
+; RV32-NEXT:    lui a1, %hi(.LCPI23_2)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI23_2)
 ; RV32-NEXT:    lui a3, 3073
 ; RV32-NEXT:    addi a3, a3, -1024
 ; RV32-NEXT:    vmv.s.x v0, a3
@@ -787,16 +814,16 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV32-NEXT:    vrgatherei16.vv v28, v8, v3
 ; RV32-NEXT:    vsetivli zero, 10, e32, m4, tu, ma
 ; RV32-NEXT:    vmv.v.v v28, v24
-; RV32-NEXT:    lui a1, %hi(.LCPI22_4)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI22_4)
-; RV32-NEXT:    lui a2, %hi(.LCPI22_5)
-; RV32-NEXT:    addi a2, a2, %lo(.LCPI22_5)
+; RV32-NEXT:    lui a1, %hi(.LCPI23_4)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI23_4)
+; RV32-NEXT:    lui a2, %hi(.LCPI23_5)
+; RV32-NEXT:    addi a2, a2, %lo(.LCPI23_5)
 ; RV32-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; RV32-NEXT:    vle16.v v24, (a2)
 ; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; RV32-NEXT:    vle16.v v8, (a1)
-; RV32-NEXT:    lui a1, %hi(.LCPI22_7)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI22_7)
+; RV32-NEXT:    lui a1, %hi(.LCPI23_7)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI23_7)
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32-NEXT:    vle16.v v10, (a1)
 ; RV32-NEXT:    csrr a1, vlenb
@@ -824,14 +851,14 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV32-NEXT:    vl8r.v v0, (a1) # vscale x 64-byte Folded Reload
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32-NEXT:    vrgatherei16.vv v16, v0, v10
-; RV32-NEXT:    lui a1, %hi(.LCPI22_6)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI22_6)
-; RV32-NEXT:    lui a2, %hi(.LCPI22_8)
-; RV32-NEXT:    addi a2, a2, %lo(.LCPI22_8)
+; RV32-NEXT:    lui a1, %hi(.LCPI23_6)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI23_6)
+; RV32-NEXT:    lui a2, %hi(.LCPI23_8)
+; RV32-NEXT:    addi a2, a2, %lo(.LCPI23_8)
 ; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; RV32-NEXT:    vle16.v v4, (a1)
-; RV32-NEXT:    lui a1, %hi(.LCPI22_9)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI22_9)
+; RV32-NEXT:    lui a1, %hi(.LCPI23_9)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI23_9)
 ; RV32-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; RV32-NEXT:    vle16.v v6, (a1)
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
@@ -918,8 +945,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV64-NEXT:    li a4, 128
 ; RV64-NEXT:    lui a1, 1
 ; RV64-NEXT:    vle64.v v8, (a3)
-; RV64-NEXT:    lui a3, %hi(.LCPI22_0)
-; RV64-NEXT:    addi a3, a3, %lo(.LCPI22_0)
+; RV64-NEXT:    lui a3, %hi(.LCPI23_0)
+; RV64-NEXT:    addi a3, a3, %lo(.LCPI23_0)
 ; RV64-NEXT:    vmv.s.x v0, a4
 ; RV64-NEXT:    csrr a4, vlenb
 ; RV64-NEXT:    li a5, 61
@@ -1107,8 +1134,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV64-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV64-NEXT:    vslideup.vi v12, v16, 1, v0.t
-; RV64-NEXT:    lui a2, %hi(.LCPI22_1)
-; RV64-NEXT:    addi a2, a2, %lo(.LCPI22_1)
+; RV64-NEXT:    lui a2, %hi(.LCPI23_1)
+; RV64-NEXT:    addi a2, a2, %lo(.LCPI23_1)
 ; RV64-NEXT:    li a3, 192
 ; RV64-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; RV64-NEXT:    vle16.v v6, (a2)
@@ -1142,8 +1169,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV64-NEXT:    vrgatherei16.vv v24, v16, v6
 ; RV64-NEXT:    addi a2, sp, 16
 ; RV64-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
-; RV64-NEXT:    lui a2, %hi(.LCPI22_2)
-; RV64-NEXT:    addi a2, a2, %lo(.LCPI22_2)
+; RV64-NEXT:    lui a2, %hi(.LCPI23_2)
+; RV64-NEXT:    addi a2, a2, %lo(.LCPI23_2)
 ; RV64-NEXT:    li a3, 1040
 ; RV64-NEXT:    vmv.s.x v0, a3
 ; RV64-NEXT:    addi a1, a1, -2016
@@ -1227,12 +1254,12 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
 ; RV64-NEXT:    vs4r.v v8, (a1) # vscale x 32-byte Folded Spill
-; RV64-NEXT:    lui a1, %hi(.LCPI22_3)
-; RV64-NEXT:    addi a1, a1, %lo(.LCPI22_3)
+; RV64-NEXT:    lui a1, %hi(.LCPI23_3)
+; RV64-NEXT:    addi a1, a1, %lo(.LCPI23_3)
 ; RV64-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; RV64-NEXT:    vle16.v v20, (a1)
-; RV64-NEXT:    lui a1, %hi(.LCPI22_4)
-; RV64-NEXT:    addi a1, a1, %lo(.LCPI22_4)
+; RV64-NEXT:    lui a1, %hi(.LCPI23_4)
+; RV64-NEXT:    addi a1, a1, %lo(.LCPI23_4)
 ; RV64-NEXT:    vle16.v v8, (a1)
 ; RV64-NEXT:    csrr a1, vlenb
 ; RV64-NEXT:    li a2, 77
@@ -1283,8 +1310,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV64-NEXT:    vl2r.v v8, (a1) # vscale x 16-byte Folded Reload
 ; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64-NEXT:    vrgatherei16.vv v0, v16, v8
-; RV64-NEXT:    lui a1, %hi(.LCPI22_5)
-; RV64-NEXT:    addi a1, a1, %lo(.LCPI22_5)
+; RV64-NEXT:    lui a1, %hi(.LCPI23_5)
+; RV64-NEXT:    addi a1, a1, %lo(.LCPI23_5)
 ; RV64-NEXT:    vle16.v v20, (a1)
 ; RV64-NEXT:    csrr a1, vlenb
 ; RV64-NEXT:    li a2, 61
@@ -1900,8 +1927,8 @@ define {<4 x i32>, <4 x i32>, <4 x i32>} @invalid_vp_mask(ptr %ptr) {
 ; RV32-NEXT:    vle32.v v12, (a0), v0.t
 ; RV32-NEXT:    li a0, 36
 ; RV32-NEXT:    vmv.s.x v20, a1
-; RV32-NEXT:    lui a1, %hi(.LCPI58_0)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI58_0)
+; RV32-NEXT:    lui a1, %hi(.LCPI59_0)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI59_0)
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV32-NEXT:    vle16.v v21, (a1)
 ; RV32-NEXT:    vcompress.vm v8, v12, v11
@@ -1976,8 +2003,8 @@ define {<4 x i32>, <4 x i32>, <4 x i32>} @invalid_vp_evl(ptr %ptr) {
 ; RV32-NEXT:    vmv.s.x v10, a0
 ; RV32-NEXT:    li a0, 146
 ; RV32-NEXT:    vmv.s.x v11, a0
-; RV32-NEXT:    lui a0, %hi(.LCPI59_0)
-; RV32-NEXT:    addi a0, a0, %lo(.LCPI59_0)
+; RV32-NEXT:    lui a0, %hi(.LCPI60_0)
+; RV32-NEXT:    addi a0, a0, %lo(.LCPI60_0)
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV32-NEXT:    vle16.v v20, (a0)
 ; RV32-NEXT:    li a0, 36

>From a7cde74ffd6dc37e2326ac4a3d667512f2145058 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Thu, 24 Jul 2025 20:33:03 -0700
Subject: [PATCH 3/4] Update llvm/lib/CodeGen/InterleavedAccessPass.cpp

Co-authored-by: Luke Lau <luke_lau at icloud.com>
---
 llvm/lib/CodeGen/InterleavedAccessPass.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/CodeGen/InterleavedAccessPass.cpp b/llvm/lib/CodeGen/InterleavedAccessPass.cpp
index 31e44c933ae74..5bb78a84de0c1 100644
--- a/llvm/lib/CodeGen/InterleavedAccessPass.cpp
+++ b/llvm/lib/CodeGen/InterleavedAccessPass.cpp
@@ -598,7 +598,7 @@ static Value *getMask(Value *WideMask, unsigned Factor,
                                             NumSrcElts * 2, StartIndexes) &&
         llvm::all_of(StartIndexes, [](unsigned Start) { return Start == 0; }) &&
         llvm::all_of(SVI->getShuffleMask(),
-                     [&](int Idx) { return Idx < (int)NumSrcElts; })) {
+                     [&NumSrcElts](int Idx) { return Idx < (int)NumSrcElts; })) {
       auto *LeafMaskTy =
           VectorType::get(Type::getInt1Ty(SVI->getContext()), LeafValueEC);
       IRBuilder<> Builder(SVI);

>From 623f85a1a89048efd90bbb010f7db44a473ee7a3 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Thu, 24 Jul 2025 20:58:07 -0700
Subject: [PATCH 4/4] Clang-format

---
 llvm/lib/CodeGen/InterleavedAccessPass.cpp | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/CodeGen/InterleavedAccessPass.cpp b/llvm/lib/CodeGen/InterleavedAccessPass.cpp
index 5bb78a84de0c1..c2839d4c60680 100644
--- a/llvm/lib/CodeGen/InterleavedAccessPass.cpp
+++ b/llvm/lib/CodeGen/InterleavedAccessPass.cpp
@@ -597,8 +597,9 @@ static Value *getMask(Value *WideMask, unsigned Factor,
     if (ShuffleVectorInst::isInterleaveMask(SVI->getShuffleMask(), Factor,
                                             NumSrcElts * 2, StartIndexes) &&
         llvm::all_of(StartIndexes, [](unsigned Start) { return Start == 0; }) &&
-        llvm::all_of(SVI->getShuffleMask(),
-                     [&NumSrcElts](int Idx) { return Idx < (int)NumSrcElts; })) {
+        llvm::all_of(SVI->getShuffleMask(), [&NumSrcElts](int Idx) {
+          return Idx < (int)NumSrcElts;
+        })) {
       auto *LeafMaskTy =
           VectorType::get(Type::getInt1Ty(SVI->getContext()), LeafValueEC);
       IRBuilder<> Builder(SVI);



More information about the llvm-commits mailing list