[llvm] 0e28c9b - [LAA] Skip undef/poison strides in collectStridedAccess.

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Sun Oct 26 22:01:40 PDT 2025


Author: Florian Hahn
Date: 2025-10-27T05:01:17Z
New Revision: 0e28c9bc9d64625db8e4a1707720c9eecff069a4

URL: https://github.com/llvm/llvm-project/commit/0e28c9bc9d64625db8e4a1707720c9eecff069a4
DIFF: https://github.com/llvm/llvm-project/commit/0e28c9bc9d64625db8e4a1707720c9eecff069a4.diff

LOG: [LAA] Skip undef/poison strides in collectStridedAccess.

The map returned by collectStridedAccess is used to replace strides with
their versioned values. This does not work for Undef/Poison, which don't
have use-lists. Don't try to version them, as versioning won't be useful in
practice.

Fixes https://github.com/llvm/llvm-project/issues/162922.

Added: 
    

Modified: 
    llvm/lib/Analysis/LoopAccessAnalysis.cpp
    llvm/test/Transforms/LoopVectorize/version-mem-access.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index 7adb25d0e5e0d..e27a9b1c44014 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -2982,6 +2982,10 @@ void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
   if (!StrideExpr)
     return;
 
+  if (auto *Unknown = dyn_cast<SCEVUnknown>(StrideExpr))
+    if (isa<UndefValue>(Unknown->getValue()))
+      return;
+
   LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
                        "versioning:");
   LLVM_DEBUG(dbgs() << "  Ptr: " << *Ptr << " Stride: " << *StrideExpr << "\n");

diff  --git a/llvm/test/Transforms/LoopVectorize/version-mem-access.ll b/llvm/test/Transforms/LoopVectorize/version-mem-access.ll
index eb5e892a7b7b2..91b30998bf611 100644
--- a/llvm/test/Transforms/LoopVectorize/version-mem-access.ll
+++ b/llvm/test/Transforms/LoopVectorize/version-mem-access.ll
@@ -122,3 +122,96 @@ loop:
 exit:
   ret void
 }
+
+; Make sure we do not crash when the stride is poison.
+; Test for https://github.com/llvm/llvm-project/issues/162922.
+define void @stride_poison(ptr %dst) mustprogress {
+; CHECK-LABEL: define void @stride_poison(
+; CHECK-SAME: ptr [[DST:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 poison, i64 1)
+; CHECK-NEXT:    [[TMP0:%.*]] = udiv i64 99, [[UMAX]]
+; CHECK-NEXT:    [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 2
+; CHECK-NEXT:    br label %[[VECTOR_PH:.*]]
+; CHECK:       [[VECTOR_PH]]:
+; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], 2
+; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]]
+; CHECK-NEXT:    [[TMP2:%.*]] = mul i64 [[N_VEC]], poison
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], poison
+; CHECK-NEXT:    [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], poison
+; CHECK-NEXT:    [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], poison
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP3]]
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP4]]
+; CHECK-NEXT:    store i8 0, ptr [[TMP5]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[TMP6]], align 1
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT:    [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT:    br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK:       [[MIDDLE_BLOCK]]:
+; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]]
+; CHECK-NEXT:    br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH:.*]]
+; CHECK:       [[SCALAR_PH]]:
+;
+entry:
+  br label %loop
+
+loop:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+  %gep.dst = getelementptr i8, ptr %dst, i64 %iv
+  store i8 0, ptr %gep.dst, align 1
+  %iv.next = add nuw nsw i64 %iv, poison
+  %ec = icmp samesign ult i64 %iv, 100
+  br i1 %ec, label %loop, label %exit
+
+exit:
+  ret void
+}
+
+; Make sure we do not crash when the stride is undef.
+define void @stride_undef(ptr %dst) mustprogress {
+; CHECK-LABEL: define void @stride_undef(
+; CHECK-SAME: ptr [[DST:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 undef, i64 1)
+; CHECK-NEXT:    [[TMP0:%.*]] = udiv i64 99, [[UMAX]]
+; CHECK-NEXT:    [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 2
+; CHECK-NEXT:    br label %[[VECTOR_PH:.*]]
+; CHECK:       [[VECTOR_PH]]:
+; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], 2
+; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]]
+; CHECK-NEXT:    [[TMP2:%.*]] = mul i64 [[N_VEC]], undef
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], undef
+; CHECK-NEXT:    [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 0
+; CHECK-NEXT:    [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], undef
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP3]]
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP4]]
+; CHECK-NEXT:    store i8 0, ptr [[TMP5]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[TMP6]], align 1
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT:    [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT:    br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK:       [[MIDDLE_BLOCK]]:
+; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]]
+; CHECK-NEXT:    br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH:.*]]
+; CHECK:       [[SCALAR_PH]]:
+;
+entry:
+  br label %loop
+
+loop:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+  %gep.dst = getelementptr i8, ptr %dst, i64 %iv
+  store i8 0, ptr %gep.dst, align 1
+  %iv.next = add nuw nsw i64 %iv, undef
+  %ec = icmp samesign ult i64 %iv, 100
+  br i1 %ec, label %loop, label %exit
+
+exit:
+  ret void
+}


        


More information about the llvm-commits mailing list