[llvm] [IA][RISCV] Detecting gap mask from a mask assembled by interleaveN intrinsics (PR #153510)

Min-Yih Hsu via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 14 14:19:19 PDT 2025


https://github.com/mshockwave updated https://github.com/llvm/llvm-project/pull/153510

>From 53fe83e9c041065105ac47666a56625d8296ac3e Mon Sep 17 00:00:00 2001
From: Min-Yih Hsu <min.hsu at sifive.com>
Date: Wed, 13 Aug 2025 16:36:35 -0700
Subject: [PATCH 1/3] [IA][RISCV] Detecting gap mask from masks assembled by
 interleaveN intrinsics

---
 llvm/lib/CodeGen/InterleavedAccessPass.cpp    | 21 ++++-
 .../rvv/fixed-vectors-interleaved-access.ll   | 93 +++++++++++--------
 2 files changed, 75 insertions(+), 39 deletions(-)

diff --git a/llvm/lib/CodeGen/InterleavedAccessPass.cpp b/llvm/lib/CodeGen/InterleavedAccessPass.cpp
index a41a44df3f847..16d4fab2acbf3 100644
--- a/llvm/lib/CodeGen/InterleavedAccessPass.cpp
+++ b/llvm/lib/CodeGen/InterleavedAccessPass.cpp
@@ -596,7 +596,26 @@ static std::pair<Value *, APInt> getMask(Value *WideMask, unsigned Factor,
 
   if (auto *IMI = dyn_cast<IntrinsicInst>(WideMask)) {
     if (unsigned F = getInterleaveIntrinsicFactor(IMI->getIntrinsicID());
-        F && F == Factor && llvm::all_equal(IMI->args())) {
+        F && F == Factor) {
+      Value *RefArg = nullptr;
+      // Check if all the intrinsic arguments are the same, except those that
+      // are zeros, which we mark them as gaps in the gap mask.
+      for (auto [Idx, Arg] : enumerate(IMI->args())) {
+        if (auto *C = dyn_cast<Constant>(Arg); C && C->isZeroValue()) {
+          GapMask.clearBit(Idx);
+          continue;
+        }
+
+        if (!RefArg)
+          RefArg = Arg;
+
+        if (RefArg != Arg)
+          return {nullptr, GapMask};
+      }
+
+      // In a very rare occasion, all the intrinsic arguments might be zeros,
+      // in which case we still want to return an all-zeros constant instead of
+      // nullptr, so we're not using RefArg here.
       return {IMI->getArgOperand(0), GapMask};
     }
   }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
index 7d7ef3e4e2a4b..b0c744cc29f28 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
@@ -205,6 +205,23 @@ define {<4 x i32>, <4 x i32>} @vpload_factor2_interleaved_mask_intrinsic(ptr %pt
   ret {<4 x i32>, <4 x i32>} %res1
 }
 
+define {<2 x i32>, <2 x i32>} @vpload_factor4_interleaved_mask_intrinsic_skip_fields(ptr %ptr, <2 x i1> %m) {
+  ; mask = %m, skip the last two fields.
+; CHECK-LABEL: vpload_factor4_interleaved_mask_intrinsic_skip_fields:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 16
+; CHECK-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
+; CHECK-NEXT:    vlsseg2e32.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+  %interleaved.mask = call <8 x i1> @llvm.vector.interleave4(<2 x i1> %m, <2 x i1> %m, <2 x i1> splat (i1 false), <2 x i1> splat (i1 false))
+  %interleaved.vec = tail call <8 x i32> @llvm.vp.load.v8i32.p0(ptr %ptr, <8 x i1> %interleaved.mask, i32 8)
+  %v0 = shufflevector <8 x i32> %interleaved.vec, <8 x i32> poison, <2 x i32> <i32 0, i32 4>
+  %v1 = shufflevector <8 x i32> %interleaved.vec, <8 x i32> poison, <2 x i32> <i32 1, i32 5>
+  %res0 = insertvalue {<2 x i32>, <2 x i32>} undef, <2 x i32> %v0, 0
+  %res1 = insertvalue {<2 x i32>, <2 x i32>} %res0, <2 x i32> %v1, 1
+  ret {<2 x i32>, <2 x i32>} %res1
+}
+
 define {<4 x i32>, <4 x i32>} @vpload_factor2_interleaved_mask_shuffle(ptr %ptr, <4 x i1> %m) {
 ; CHECK-LABEL: vpload_factor2_interleaved_mask_shuffle:
 ; CHECK:       # %bb.0:
@@ -514,8 +531,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV32-NEXT:    li a2, 32
 ; RV32-NEXT:    lui a3, 12
 ; RV32-NEXT:    lui a6, 12291
-; RV32-NEXT:    lui a7, %hi(.LCPI25_0)
-; RV32-NEXT:    addi a7, a7, %lo(.LCPI25_0)
+; RV32-NEXT:    lui a7, %hi(.LCPI26_0)
+; RV32-NEXT:    addi a7, a7, %lo(.LCPI26_0)
 ; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
 ; RV32-NEXT:    vle32.v v24, (a5)
 ; RV32-NEXT:    vmv.s.x v0, a3
@@ -600,12 +617,12 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV32-NEXT:    addi a1, a1, 16
 ; RV32-NEXT:    vs4r.v v8, (a1) # vscale x 32-byte Folded Spill
 ; RV32-NEXT:    lui a7, 49164
-; RV32-NEXT:    lui a1, %hi(.LCPI25_1)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI25_1)
+; RV32-NEXT:    lui a1, %hi(.LCPI26_1)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI26_1)
 ; RV32-NEXT:    lui t2, 3
 ; RV32-NEXT:    lui t1, 196656
-; RV32-NEXT:    lui a4, %hi(.LCPI25_3)
-; RV32-NEXT:    addi a4, a4, %lo(.LCPI25_3)
+; RV32-NEXT:    lui a4, %hi(.LCPI26_3)
+; RV32-NEXT:    addi a4, a4, %lo(.LCPI26_3)
 ; RV32-NEXT:    lui t0, 786624
 ; RV32-NEXT:    li a5, 48
 ; RV32-NEXT:    lui a6, 768
@@ -784,8 +801,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV32-NEXT:    vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32-NEXT:    vrgatherei16.vv v24, v8, v2
-; RV32-NEXT:    lui a1, %hi(.LCPI25_2)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI25_2)
+; RV32-NEXT:    lui a1, %hi(.LCPI26_2)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI26_2)
 ; RV32-NEXT:    lui a3, 3073
 ; RV32-NEXT:    addi a3, a3, -1024
 ; RV32-NEXT:    vmv.s.x v0, a3
@@ -849,16 +866,16 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV32-NEXT:    vrgatherei16.vv v28, v8, v3
 ; RV32-NEXT:    vsetivli zero, 10, e32, m4, tu, ma
 ; RV32-NEXT:    vmv.v.v v28, v24
-; RV32-NEXT:    lui a1, %hi(.LCPI25_4)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI25_4)
-; RV32-NEXT:    lui a2, %hi(.LCPI25_5)
-; RV32-NEXT:    addi a2, a2, %lo(.LCPI25_5)
+; RV32-NEXT:    lui a1, %hi(.LCPI26_4)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI26_4)
+; RV32-NEXT:    lui a2, %hi(.LCPI26_5)
+; RV32-NEXT:    addi a2, a2, %lo(.LCPI26_5)
 ; RV32-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; RV32-NEXT:    vle16.v v24, (a2)
 ; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; RV32-NEXT:    vle16.v v8, (a1)
-; RV32-NEXT:    lui a1, %hi(.LCPI25_7)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI25_7)
+; RV32-NEXT:    lui a1, %hi(.LCPI26_7)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI26_7)
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32-NEXT:    vle16.v v10, (a1)
 ; RV32-NEXT:    csrr a1, vlenb
@@ -886,14 +903,14 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV32-NEXT:    vl8r.v v0, (a1) # vscale x 64-byte Folded Reload
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32-NEXT:    vrgatherei16.vv v16, v0, v10
-; RV32-NEXT:    lui a1, %hi(.LCPI25_6)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI25_6)
-; RV32-NEXT:    lui a2, %hi(.LCPI25_8)
-; RV32-NEXT:    addi a2, a2, %lo(.LCPI25_8)
+; RV32-NEXT:    lui a1, %hi(.LCPI26_6)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI26_6)
+; RV32-NEXT:    lui a2, %hi(.LCPI26_8)
+; RV32-NEXT:    addi a2, a2, %lo(.LCPI26_8)
 ; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; RV32-NEXT:    vle16.v v4, (a1)
-; RV32-NEXT:    lui a1, %hi(.LCPI25_9)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI25_9)
+; RV32-NEXT:    lui a1, %hi(.LCPI26_9)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI26_9)
 ; RV32-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; RV32-NEXT:    vle16.v v6, (a1)
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
@@ -980,8 +997,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV64-NEXT:    li a4, 128
 ; RV64-NEXT:    lui a1, 1
 ; RV64-NEXT:    vle64.v v8, (a3)
-; RV64-NEXT:    lui a3, %hi(.LCPI25_0)
-; RV64-NEXT:    addi a3, a3, %lo(.LCPI25_0)
+; RV64-NEXT:    lui a3, %hi(.LCPI26_0)
+; RV64-NEXT:    addi a3, a3, %lo(.LCPI26_0)
 ; RV64-NEXT:    vmv.s.x v0, a4
 ; RV64-NEXT:    csrr a4, vlenb
 ; RV64-NEXT:    li a5, 61
@@ -1169,8 +1186,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV64-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV64-NEXT:    vslideup.vi v12, v16, 1, v0.t
-; RV64-NEXT:    lui a2, %hi(.LCPI25_1)
-; RV64-NEXT:    addi a2, a2, %lo(.LCPI25_1)
+; RV64-NEXT:    lui a2, %hi(.LCPI26_1)
+; RV64-NEXT:    addi a2, a2, %lo(.LCPI26_1)
 ; RV64-NEXT:    li a3, 192
 ; RV64-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; RV64-NEXT:    vle16.v v6, (a2)
@@ -1204,8 +1221,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV64-NEXT:    vrgatherei16.vv v24, v16, v6
 ; RV64-NEXT:    addi a2, sp, 16
 ; RV64-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
-; RV64-NEXT:    lui a2, %hi(.LCPI25_2)
-; RV64-NEXT:    addi a2, a2, %lo(.LCPI25_2)
+; RV64-NEXT:    lui a2, %hi(.LCPI26_2)
+; RV64-NEXT:    addi a2, a2, %lo(.LCPI26_2)
 ; RV64-NEXT:    li a3, 1040
 ; RV64-NEXT:    vmv.s.x v0, a3
 ; RV64-NEXT:    addi a1, a1, -2016
@@ -1289,12 +1306,12 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a1, a1, 16
 ; RV64-NEXT:    vs4r.v v8, (a1) # vscale x 32-byte Folded Spill
-; RV64-NEXT:    lui a1, %hi(.LCPI25_3)
-; RV64-NEXT:    addi a1, a1, %lo(.LCPI25_3)
+; RV64-NEXT:    lui a1, %hi(.LCPI26_3)
+; RV64-NEXT:    addi a1, a1, %lo(.LCPI26_3)
 ; RV64-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; RV64-NEXT:    vle16.v v20, (a1)
-; RV64-NEXT:    lui a1, %hi(.LCPI25_4)
-; RV64-NEXT:    addi a1, a1, %lo(.LCPI25_4)
+; RV64-NEXT:    lui a1, %hi(.LCPI26_4)
+; RV64-NEXT:    addi a1, a1, %lo(.LCPI26_4)
 ; RV64-NEXT:    vle16.v v8, (a1)
 ; RV64-NEXT:    csrr a1, vlenb
 ; RV64-NEXT:    li a2, 77
@@ -1345,8 +1362,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
 ; RV64-NEXT:    vl2r.v v8, (a1) # vscale x 16-byte Folded Reload
 ; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64-NEXT:    vrgatherei16.vv v0, v16, v8
-; RV64-NEXT:    lui a1, %hi(.LCPI25_5)
-; RV64-NEXT:    addi a1, a1, %lo(.LCPI25_5)
+; RV64-NEXT:    lui a1, %hi(.LCPI26_5)
+; RV64-NEXT:    addi a1, a1, %lo(.LCPI26_5)
 ; RV64-NEXT:    vle16.v v20, (a1)
 ; RV64-NEXT:    csrr a1, vlenb
 ; RV64-NEXT:    li a2, 61
@@ -1963,8 +1980,8 @@ define {<4 x i32>, <4 x i32>, <4 x i32>} @invalid_vp_mask(ptr %ptr) {
 ; RV32-NEXT:    vle32.v v12, (a0), v0.t
 ; RV32-NEXT:    li a0, 36
 ; RV32-NEXT:    vmv.s.x v20, a1
-; RV32-NEXT:    lui a1, %hi(.LCPI61_0)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI61_0)
+; RV32-NEXT:    lui a1, %hi(.LCPI62_0)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI62_0)
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV32-NEXT:    vle16.v v21, (a1)
 ; RV32-NEXT:    vcompress.vm v8, v12, v11
@@ -2039,8 +2056,8 @@ define {<4 x i32>, <4 x i32>, <4 x i32>} @invalid_vp_evl(ptr %ptr) {
 ; RV32-NEXT:    vmv.s.x v10, a0
 ; RV32-NEXT:    li a0, 146
 ; RV32-NEXT:    vmv.s.x v11, a0
-; RV32-NEXT:    lui a0, %hi(.LCPI62_0)
-; RV32-NEXT:    addi a0, a0, %lo(.LCPI62_0)
+; RV32-NEXT:    lui a0, %hi(.LCPI63_0)
+; RV32-NEXT:    addi a0, a0, %lo(.LCPI63_0)
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV32-NEXT:    vle16.v v20, (a0)
 ; RV32-NEXT:    li a0, 36
@@ -2198,8 +2215,8 @@ define {<4 x i32>, <4 x i32>, <4 x i32>} @maskedload_factor3_invalid_skip_field(
 ; RV32-NEXT:    vle32.v v12, (a0), v0.t
 ; RV32-NEXT:    li a0, 36
 ; RV32-NEXT:    vmv.s.x v20, a1
-; RV32-NEXT:    lui a1, %hi(.LCPI68_0)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI68_0)
+; RV32-NEXT:    lui a1, %hi(.LCPI69_0)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI69_0)
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV32-NEXT:    vle16.v v21, (a1)
 ; RV32-NEXT:    vcompress.vm v8, v12, v11

>From 17e9bec2da486db6616fdd80693bc2203c624c3a Mon Sep 17 00:00:00 2001
From: Min-Yih Hsu <min.hsu at sifive.com>
Date: Thu, 14 Aug 2025 11:12:52 -0700
Subject: [PATCH 2/3] fixup! Address review comments

---
 llvm/lib/CodeGen/InterleavedAccessPass.cpp            | 11 ++++++-----
 .../RISCV/rvv/fixed-vectors-interleaved-access.ll     |  2 +-
 2 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/llvm/lib/CodeGen/InterleavedAccessPass.cpp b/llvm/lib/CodeGen/InterleavedAccessPass.cpp
index 16d4fab2acbf3..c5039ceac590b 100644
--- a/llvm/lib/CodeGen/InterleavedAccessPass.cpp
+++ b/llvm/lib/CodeGen/InterleavedAccessPass.cpp
@@ -599,7 +599,7 @@ static std::pair<Value *, APInt> getMask(Value *WideMask, unsigned Factor,
         F && F == Factor) {
       Value *RefArg = nullptr;
       // Check if all the intrinsic arguments are the same, except those that
-      // are zeros, which we mark them as gaps in the gap mask.
+      // are zeros, which we mark as gaps in the gap mask.
       for (auto [Idx, Arg] : enumerate(IMI->args())) {
         if (auto *C = dyn_cast<Constant>(Arg); C && C->isZeroValue()) {
           GapMask.clearBit(Idx);
@@ -608,15 +608,16 @@ static std::pair<Value *, APInt> getMask(Value *WideMask, unsigned Factor,
 
         if (!RefArg)
           RefArg = Arg;
-
-        if (RefArg != Arg)
+        else if (RefArg != Arg)
           return {nullptr, GapMask};
       }
 
       // In a very rare occasion, all the intrinsic arguments might be zeros,
       // in which case we still want to return an all-zeros constant instead of
-      // nullptr, so we're not using RefArg here.
-      return {IMI->getArgOperand(0), GapMask};
+      // nullptr.
+      return {RefArg ? RefArg
+                     : Constant::getNullValue(IMI->getArgOperand(0)->getType()),
+              GapMask};
     }
   }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
index b0c744cc29f28..5781660f024e4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
@@ -205,8 +205,8 @@ define {<4 x i32>, <4 x i32>} @vpload_factor2_interleaved_mask_intrinsic(ptr %pt
   ret {<4 x i32>, <4 x i32>} %res1
 }
 
+; mask = %m, skip the last two fields.
 define {<2 x i32>, <2 x i32>} @vpload_factor4_interleaved_mask_intrinsic_skip_fields(ptr %ptr, <2 x i1> %m) {
-  ; mask = %m, skip the last two fields.
 ; CHECK-LABEL: vpload_factor4_interleaved_mask_intrinsic_skip_fields:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a1, 16

>From 7c00c1ceec4976ee6300ddb484de1625da1434b5 Mon Sep 17 00:00:00 2001
From: Min-Yih Hsu <min.hsu at sifive.com>
Date: Thu, 14 Aug 2025 11:49:16 -0700
Subject: [PATCH 3/3] fixup! Simplify the ternary expression

---
 llvm/lib/CodeGen/InterleavedAccessPass.cpp | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/llvm/lib/CodeGen/InterleavedAccessPass.cpp b/llvm/lib/CodeGen/InterleavedAccessPass.cpp
index c5039ceac590b..24024b0905fc9 100644
--- a/llvm/lib/CodeGen/InterleavedAccessPass.cpp
+++ b/llvm/lib/CodeGen/InterleavedAccessPass.cpp
@@ -615,9 +615,7 @@ static std::pair<Value *, APInt> getMask(Value *WideMask, unsigned Factor,
       // In a very rare occasion, all the intrinsic arguments might be zeros,
       // in which case we still want to return an all-zeros constant instead of
       // nullptr.
-      return {RefArg ? RefArg
-                     : Constant::getNullValue(IMI->getArgOperand(0)->getType()),
-              GapMask};
+      return {RefArg ? RefArg : IMI->getArgOperand(0), GapMask};
     }
   }
 



More information about the llvm-commits mailing list