[llvm] c5d6feb - [VPlan] Limit interleave group narrowing to consecutive wide loads.

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 26 04:54:00 PST 2026


Author: Florian Hahn
Date: 2026-02-26T12:52:31Z
New Revision: c5d6feb3152bf39d820935df0d0490f90364d44c

URL: https://github.com/llvm/llvm-project/commit/c5d6feb3152bf39d820935df0d0490f90364d44c
DIFF: https://github.com/llvm/llvm-project/commit/c5d6feb3152bf39d820935df0d0490f90364d44c.diff

LOG: [VPlan] Limit interleave group narrowing to consecutive wide loads.

Tighten check in canNarrowLoad to require consecutive wide loads; we
cannot properly narrow gathers at the moment.

Fixe https://github.com/llvm/llvm-project/issues/183345.

Added: 
    

Modified: 
    llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
    llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 3c9d79a62db14..437243ca28b40 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -5159,7 +5159,7 @@ static bool canNarrowLoad(VPWidenRecipe *WideMember0, unsigned OpIdx,
   if (!Member0OpR)
     return Member0Op == OpV;
   if (auto *W = dyn_cast<VPWidenLoadRecipe>(Member0OpR))
-    return !W->getMask() && Member0Op == OpV;
+    return !W->getMask() && W->isConsecutive() && Member0Op == OpV;
   if (auto *IR = dyn_cast<VPInterleaveRecipe>(Member0OpR))
     return IR->getInterleaveGroup()->isFull() && IR->getVPValue(Idx) == OpV;
   return false;

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll
index fa60f8eec28f1..3a9268cfe6013 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll
@@ -410,3 +410,58 @@ loop.latch:
 exit:
   ret void
 }
+
+; Test case for https://github.com/llvm/llvm-project/issues/183345.
+define void @interleave_group_with_gather(ptr %indices, ptr %src, i64 %n) {
+; CHECK-LABEL: define void @interleave_group_with_gather(
+; CHECK-SAME: ptr [[INDICES:%.*]], ptr [[SRC:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    br label %[[LOOP:.*]]
+; CHECK:       [[LOOP]]:
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    [[OUT_GEP:%.*]] = getelementptr { double, double }, ptr null, i64 [[IV]]
+; CHECK-NEXT:    [[IDX_GEP:%.*]] = getelementptr i32, ptr [[INDICES]], i64 [[IV]]
+; CHECK-NEXT:    [[IDX:%.*]] = load i32, ptr [[IDX_GEP]], align 4
+; CHECK-NEXT:    [[IDX_EXT:%.*]] = sext i32 [[IDX]] to i64
+; CHECK-NEXT:    [[SRC_GEP:%.*]] = getelementptr double, ptr [[SRC]], i64 [[IDX_EXT]]
+; CHECK-NEXT:    [[SRC_VAL:%.*]] = load double, ptr [[SRC_GEP]], align 8
+; CHECK-NEXT:    [[OUT_M1_0_GEP:%.*]] = getelementptr i8, ptr [[OUT_GEP]], i64 -16
+; CHECK-NEXT:    [[OUT_M1_0:%.*]] = load double, ptr [[OUT_M1_0_GEP]], align 8
+; CHECK-NEXT:    [[ADD_0:%.*]] = fadd double 1.000000e+01, [[SRC_VAL]]
+; CHECK-NEXT:    store double [[ADD_0]], ptr [[OUT_M1_0_GEP]], align 8
+; CHECK-NEXT:    [[OUT_M1_1_GEP:%.*]] = getelementptr i8, ptr [[OUT_GEP]], i64 -8
+; CHECK-NEXT:    [[OUT_M1_1:%.*]] = load double, ptr [[OUT_M1_1_GEP]], align 8
+; CHECK-NEXT:    [[ADD_1:%.*]] = fadd double 1.000000e+01, [[SRC_VAL]]
+; CHECK-NEXT:    store double [[ADD_1]], ptr [[OUT_M1_1_GEP]], align 8
+; CHECK-NEXT:    [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-NEXT:    [[EXIT_COND:%.*]] = icmp eq i64 [[IV]], [[N]]
+; CHECK-NEXT:    br i1 [[EXIT_COND]], label %[[EXIT:.*]], label %[[LOOP]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  br label %loop
+
+loop:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+  %out.gep = getelementptr { double, double }, ptr null, i64 %iv
+  %idx.gep = getelementptr i32, ptr %indices, i64 %iv
+  %idx = load i32, ptr %idx.gep, align 4
+  %idx.ext = sext i32 %idx to i64
+  %src.gep = getelementptr double, ptr %src, i64 %idx.ext
+  %src.val = load double, ptr %src.gep, align 8
+  %out.m1.0.gep = getelementptr i8, ptr %out.gep, i64 -16
+  %out.m1.0 = load double, ptr %out.m1.0.gep, align 8
+  %add.0 = fadd double 10.0, %src.val
+  store double %add.0, ptr %out.m1.0.gep, align 8
+  %out.m1.1.gep = getelementptr i8, ptr %out.gep, i64 -8
+  %out.m1.1 = load double, ptr %out.m1.1.gep, align 8
+  %add.1 = fadd double 10.0, %src.val
+  store double %add.1, ptr %out.m1.1.gep, align 8
+  %iv.next = add i64 %iv, 1
+  %exit.cond = icmp eq i64 %iv, %n
+  br i1 %exit.cond, label %exit, label %loop
+
+exit:
+  ret void
+}


        


More information about the llvm-commits mailing list