[llvm-branch-commits] [llvm] [WIP|POC][LAA] Another approach to detect WAW hazards (PR #187804)
Andrei Elovikov via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Fri Mar 20 14:48:06 PDT 2026
https://github.com/eas created https://github.com/llvm/llvm-project/pull/187804
None
>From 92a66933a118085e41ef8633d9ab653e7b9da88e Mon Sep 17 00:00:00 2001
From: Andrei Elovikov <andrei.elovikov at sifive.com>
Date: Fri, 20 Mar 2026 13:25:57 -0700
Subject: [PATCH 1/2] Add test
---
.../multiple_stores_to_same_addr.ll | 374 ++++++++++++++++++
1 file changed, 374 insertions(+)
create mode 100644 llvm/test/Analysis/LoopAccessAnalysis/multiple_stores_to_same_addr.ll
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/multiple_stores_to_same_addr.ll b/llvm/test/Analysis/LoopAccessAnalysis/multiple_stores_to_same_addr.ll
new file mode 100644
index 0000000000000..0a71d4a3c63c0
--- /dev/null
+++ b/llvm/test/Analysis/LoopAccessAnalysis/multiple_stores_to_same_addr.ll
@@ -0,0 +1,374 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -passes='print<access-info>' -disable-output < %s -enable-mem-access-versioning=false 2>&1 | FileCheck %s
+
+; Could be statically known conflict happens, fine for vectorization becuase of
+; the ordered replicated store/scatter semantics.
+define void @waw_no_mask(ptr %p, i64 %stride, i64 %n) {
+; CHECK-LABEL: 'waw_no_mask'
+; CHECK-NEXT: header:
+; CHECK-NEXT: Memory dependences are safe
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
+entry:
+ br label %header
+
+header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ]
+ %iv.next = add nsw i64 %iv, 1
+ %idx = and i64 %iv, u0xffff0
+
+ %gep = getelementptr inbounds i64, ptr %p, i64 %idx
+ store i64 %iv, ptr %gep
+ store i64 %iv.next, ptr %gep
+
+ %exitcond = icmp slt i64 %iv.next, %n
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+; Could be statically known conflict happens, unsafe to vectorize.
+; FIXME: https://github.com/llvm/llvm-project/issues/187402
+define void @waw_mask(ptr %p, i64 %stride, i64 %n, i64 %n0, i64 %n1) {
+; CHECK-LABEL: 'waw_mask'
+; CHECK-NEXT: header:
+; CHECK-NEXT: Memory dependences are safe
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
+entry:
+ br label %header
+
+header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %latch ]
+ %iv.next = add nsw i64 %iv, 1
+ %idx = and i64 %iv, u0xffff0
+
+ %c0 = icmp sle i64 %iv, %n0
+ %c1 = icmp sle i64 %iv, %n1
+
+ %gep = getelementptr inbounds i64, ptr %p, i64 %idx
+ br i1 %c0, label %store0, label %merge
+
+store0:
+ store i64 %iv, ptr %gep
+ br label %merge
+
+merge:
+ br i1 %c1, label %store1, label %latch
+
+store1:
+ store i64 %iv.next, ptr %gep
+ br label %latch
+
+latch:
+ %exitcond = icmp slt i64 %iv.next, %n
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+; Same as @waw_no_mask but with run-time strided access, so can be speculated `%stride != 0`.
+define void @waw_no_mask_unknown_stride(ptr %p, i64 %stride, i64 %n) {
+; CHECK-LABEL: 'waw_no_mask_unknown_stride'
+; CHECK-NEXT: header:
+; CHECK-NEXT: Memory dependences are safe
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
+entry:
+ br label %header
+
+header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ]
+ %iv.next = add nsw i64 %iv, 1
+ %idx = mul nsw nuw i64 %iv, %stride
+
+ %gep = getelementptr inbounds i64, ptr %p, i64 %idx
+ store i64 %iv, ptr %gep
+ store i64 %iv.next, ptr %gep
+
+ %exitcond = icmp slt i64 %iv.next, %n
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+; Same as @waw_mask but with run-time strided access, so can be speculated `%stride != 0`.
+; FIXME: https://github.com/llvm/llvm-project/issues/187402
+define void @waw_mask_unknown_stride(ptr %p, i64 %stride, i64 %n0, i64 %n1) {
+; CHECK-LABEL: 'waw_mask_unknown_stride'
+; CHECK-NEXT: header:
+; CHECK-NEXT: Memory dependences are safe
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
+entry:
+ br label %header
+
+header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %latch ]
+ %iv.next = add nsw i64 %iv, 1
+ %idx = mul nsw nuw i64 %iv, %stride
+
+ %c0 = icmp sle i64 %iv, %n0
+ %c1 = icmp sle i64 %iv, %n1
+
+ %gep = getelementptr inbounds i64, ptr %p, i64 %idx
+ br i1 %c0, label %store0, label %merge
+
+store0:
+ store i64 %iv, ptr %gep
+ br label %merge
+
+merge:
+ br i1 %c1, label %store1, label %latch
+
+store1:
+ store i64 %iv.next, ptr %gep
+ br label %latch
+
+latch:
+ %exitcond = icmp slt i64 %iv.next, 128
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+; Safe to vectorize.
+define void @no_cross_iter_dependency(ptr %p, i8 %a, i64 %n, i64 %n0, i64 %n1) {
+; CHECK-LABEL: 'no_cross_iter_dependency'
+; CHECK-NEXT: header:
+; CHECK-NEXT: Memory dependences are safe
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
+entry:
+ %a.zext = zext i8 %a to i64
+ %stride = add i64 %a.zext, 1 ; known non-zero
+ br label %header
+
+header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %latch ]
+ %iv.next = add nsw i64 %iv, 1
+ %idx = mul nsw nuw i64 %iv, %stride
+
+ %c0 = icmp sle i64 %iv, %n0
+ %c1 = icmp sle i64 %iv, %n1
+
+ %gep = getelementptr inbounds i64, ptr %p, i64 %idx
+ br i1 %c0, label %store0, label %merge
+
+store0:
+ store i64 %iv, ptr %gep
+ br label %merge
+
+merge:
+ br i1 %c1, label %store1, label %latch
+
+store1:
+ store i64 %iv.next, ptr %gep
+ br label %latch
+
+latch:
+ %exitcond = icmp slt i64 %iv.next, 128
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+; Safe to vectorize.
+define void @const_stride(ptr %p, i64 %n, i64 %n0, i64 %n1) {
+; CHECK-LABEL: 'const_stride'
+; CHECK-NEXT: header:
+; CHECK-NEXT: Memory dependences are safe
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
+entry:
+ br label %header
+
+header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %latch ]
+ %iv.next = add nsw i64 %iv, 1
+ %idx = mul nsw nuw i64 %iv, 5
+
+ %c0 = icmp sle i64 %iv, %n0
+ %c1 = icmp sle i64 %iv, %n1
+
+ %gep = getelementptr inbounds i64, ptr %p, i64 %idx
+ br i1 %c0, label %store0, label %merge
+
+store0:
+ store i64 %iv, ptr %gep
+ br label %merge
+
+merge:
+ br i1 %c1, label %store1, label %latch
+
+store1:
+ store i64 %iv.next, ptr %gep
+ br label %latch
+
+latch:
+ %exitcond = icmp slt i64 %iv.next, 128
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+define void @indirect_single_store(ptr noalias %p, i64 %n) {
+; CHECK-LABEL: 'indirect_single_store'
+; CHECK-NEXT: header:
+; CHECK-NEXT: Memory dependences are safe
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
+entry:
+ br label %header
+
+header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ]
+ %iv.next = add nsw i64 %iv, 1
+
+ %gep.ld = getelementptr ptr, ptr %p, i64 %iv
+ %gep = load ptr, ptr %gep.ld
+
+ store i64 %iv, ptr %gep
+
+ %exitcond = icmp slt i64 %iv.next, 128
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+define void @indirect_no_mask(ptr noalias %p, i64 %n) {
+; CHECK-LABEL: 'indirect_no_mask'
+; CHECK-NEXT: header:
+; CHECK-NEXT: Memory dependences are safe
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
+entry:
+ br label %header
+
+header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ]
+ %iv.next = add nsw i64 %iv, 1
+
+ %gep.ld = getelementptr ptr, ptr %p, i64 %iv
+ %gep = load ptr, ptr %gep.ld
+
+ store i64 %iv, ptr %gep
+ store i64 %iv.next, ptr %gep
+
+ %exitcond = icmp slt i64 %iv.next, 128
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+define void @indirect_mask(ptr noalias %p, i64 %n, i64 %n0, i64 %n1) {
+; CHECK-LABEL: 'indirect_mask'
+; CHECK-NEXT: header:
+; CHECK-NEXT: Memory dependences are safe
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
+entry:
+ br label %header
+
+header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %latch ]
+ %iv.next = add nsw i64 %iv, 1
+
+ %gep.ld = getelementptr ptr, ptr %p, i64 %iv
+ %gep = load ptr, ptr %gep.ld
+
+ %c0 = icmp sle i64 %iv, %n0
+ %c1 = icmp sle i64 %iv, %n1
+
+ br i1 %c0, label %store0, label %merge
+
+store0:
+ store i64 %iv, ptr %gep
+ br label %merge
+
+merge:
+ br i1 %c1, label %store1, label %latch
+
+store1:
+ store i64 %iv.next, ptr %gep
+ br label %latch
+
+latch:
+ %exitcond = icmp slt i64 %iv.next, 128
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
>From e9a1bc8a18931de54b3cee08e8c84ff974e765de Mon Sep 17 00:00:00 2001
From: Andrei Elovikov <andrei.elovikov at sifive.com>
Date: Fri, 20 Mar 2026 14:43:38 -0700
Subject: [PATCH 2/2] [WIP|POC][LAA] Another approach to detect WAW hazards
---
llvm/lib/Analysis/LoopAccessAnalysis.cpp | 57 +++++++++++++++++++
.../multiple_stores_to_same_addr.ll | 4 --
.../RISCV/gather-scatter-cost.ll | 40 ++++---------
3 files changed, 69 insertions(+), 32 deletions(-)
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index 5f4f305506d40..eaf9ce488b013 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -2689,6 +2689,8 @@ bool LoopAccessInfo::analyzeLoop(AAResults *AA, const LoopInfo *LI,
// to the same address.
SmallPtrSet<Value *, 16> UniformStores;
+ SmallPtrSet<Value *, 16> PointersWithMultipleStores;
+
for (StoreInst *ST : Stores) {
Value *Ptr = ST->getPointerOperand();
@@ -2719,6 +2721,61 @@ bool LoopAccessInfo::analyzeLoop(AAResults *AA, const LoopInfo *LI,
MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
Accesses.addStore(NewLoc, AccessTy);
});
+ } else {
+ PointersWithMultipleStores.insert(Ptr);
+ }
+ }
+
+ for (auto *Ptr : PointersWithMultipleStores) {
+ // NOTE: Known invariant stores are handled separately in both this
+ // file and LoopVectorizationLegality to support the case when
+ // reduction wasn't completely transformed into SSA form.
+ if (isInvariant(Ptr))
+ continue;
+
+ // If there are multiple writes into the same pointer we need to avoid the
+ // following scenario:
+ //
+ // code:
+ // if (RT_COND0) *p = x;
+ // if (RT_COND1) *p = y;
+ //
+ // execution:
+ // Iter0 | Iter1
+ // no store | *p = 2
+ // *p = 1 | no store
+ //
+ // Scalar loop would leave `*p == 2`, yet two vectorized scatter's
+ // would result in `*p == 1` which is wrong. Conservatively assume that
+ // only known strided accesses guarantee no such cross iteration
+ // dependency.
+
+ SmallVector<StoreInst *> StoresToPtr(make_filter_range(
+ Stores, [&](StoreInst *ST) { return ST->getOperand(1) == Ptr; }));
+ StoreInst *LastStore = StoresToPtr.back();
+ Type *AccessTy = getLoadStoreType(LastStore);
+ if (!all_of(StoresToPtr, [&](StoreInst *SI) {
+ return getLoadStoreType(SI) == AccessTy;
+ })) {
+ LLVM_DEBUG(dbgs() << "LAA: Multiple stores to the same pointer with "
+ "different access type\n");
+ return false;
+ }
+
+ if (getPtrStride(*PSE, AccessTy, Ptr, TheLoop, *DT, SymbolicStrides, false,
+ false)) {
+ // Known strided access, no cross-iterations dependencies.
+ // TODO: Is `ShouldCheckWrap == false` really ok here?
+ continue;
+ }
+
+ // If LastStore is unmasked, then each iteration/lane is guaranteed to
+ // write a value and orderer semantics of @llvm.scatter/replication would
+ // ensure the final value is coming from the last lane.
+ if (!DT->dominates(LastStore, TheLoop->getLoopLatch()->getTerminator())) {
+ LLVM_DEBUG(dbgs() << "LAA: Last store " << *LastStore
+ << " is masked, potential WAW hazard\n");
+ return false;
}
}
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/multiple_stores_to_same_addr.ll b/llvm/test/Analysis/LoopAccessAnalysis/multiple_stores_to_same_addr.ll
index 0a71d4a3c63c0..a3bbc2714aba3 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/multiple_stores_to_same_addr.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/multiple_stores_to_same_addr.ll
@@ -40,7 +40,6 @@ exit:
define void @waw_mask(ptr %p, i64 %stride, i64 %n, i64 %n0, i64 %n1) {
; CHECK-LABEL: 'waw_mask'
; CHECK-NEXT: header:
-; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
@@ -121,7 +120,6 @@ exit:
define void @waw_mask_unknown_stride(ptr %p, i64 %stride, i64 %n0, i64 %n1) {
; CHECK-LABEL: 'waw_mask_unknown_stride'
; CHECK-NEXT: header:
-; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
@@ -168,7 +166,6 @@ exit:
define void @no_cross_iter_dependency(ptr %p, i8 %a, i64 %n, i64 %n0, i64 %n1) {
; CHECK-LABEL: 'no_cross_iter_dependency'
; CHECK-NEXT: header:
-; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
@@ -328,7 +325,6 @@ exit:
define void @indirect_mask(ptr noalias %p, i64 %n, i64 %n0, i64 %n1) {
; CHECK-LABEL: 'indirect_mask'
; CHECK-NEXT: header:
-; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll
index fabab210fb850..3ea068440ce22 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll
@@ -162,35 +162,19 @@ exit:
define void @store_to_addr_generated_from_invariant_addr(ptr noalias %p0, ptr noalias %p1, ptr noalias %p2, ptr %p3, i64 %N) {
; CHECK-LABEL: @store_to_addr_generated_from_invariant_addr(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], 1
-; CHECK-NEXT: br label [[VECTOR_PH:%.*]]
-; CHECK: vector.ph:
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <vscale x 2 x ptr> poison, ptr [[P0:%.*]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT1:%.*]] = shufflevector <vscale x 2 x ptr> [[BROADCAST_SPLATINSERT2]], <vscale x 2 x ptr> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
-; CHECK: vector.body:
-; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[TMP1]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
-; CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP4]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[P1:%.*]], <vscale x 2 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.vp.scatter.nxv2p0.nxv2p0(<vscale x 2 x ptr> [[BROADCAST_SPLAT1]], <vscale x 2 x ptr> align 8 [[TMP5]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP3]])
+; CHECK: loop:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr i32, ptr [[P1:%.*]], i64 [[IV]]
+; CHECK-NEXT: store ptr [[P0:%.*]], ptr [[ARRAYIDX11]], align 8
; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr [[P2:%.*]], align 4
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[P3:%.*]], i64 [[TMP6]]
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 2 x ptr> poison, ptr [[TMP8]], i64 0
-; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <vscale x 2 x ptr> [[BROADCAST_SPLATINSERT3]], <vscale x 2 x ptr> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i32.nxv2p0(<vscale x 2 x i32> zeroinitializer, <vscale x 2 x ptr> align 4 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP3]])
-; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i32.nxv2p0(<vscale x 2 x i32> zeroinitializer, <vscale x 2 x ptr> align 4 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP3]])
-; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i8.nxv2p0(<vscale x 2 x i8> zeroinitializer, <vscale x 2 x ptr> align 1 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP3]])
-; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP4]]
-; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
-; CHECK: middle.block:
-; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK-NEXT: store i32 0, ptr [[TMP8]], align 4
+; CHECK-NEXT: store i32 0, ptr [[TMP8]], align 4
+; CHECK-NEXT: store i8 0, ptr [[TMP8]], align 1
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV]], [[N:%.*]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT:%.*]], label [[VECTOR_BODY]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -250,7 +234,7 @@ define i8 @mixed_gather_scatters(ptr %A, ptr %B, ptr %C) #0 {
; RVA23-NEXT: [[TMP14]] = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i8> [[TMP13]], <vscale x 2 x i8> [[VEC_PHI]], i32 [[TMP0]])
; RVA23-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP0]]
; RVA23-NEXT: [[TMP15:%.*]] = icmp eq i32 [[AVL_NEXT]], 0
-; RVA23-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; RVA23-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; RVA23: middle.block:
; RVA23-NEXT: [[TMP16:%.*]] = call i8 @llvm.vector.reduce.or.nxv2i8(<vscale x 2 x i8> [[TMP14]])
; RVA23-NEXT: br label [[EXIT:%.*]]
@@ -291,7 +275,7 @@ define i8 @mixed_gather_scatters(ptr %A, ptr %B, ptr %C) #0 {
; RVA23ZVL1024B-NEXT: [[TMP14]] = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> splat (i1 true), <vscale x 1 x i8> [[TMP13]], <vscale x 1 x i8> [[VEC_PHI]], i32 [[TMP0]])
; RVA23ZVL1024B-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP0]]
; RVA23ZVL1024B-NEXT: [[TMP15:%.*]] = icmp eq i32 [[AVL_NEXT]], 0
-; RVA23ZVL1024B-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; RVA23ZVL1024B-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; RVA23ZVL1024B: middle.block:
; RVA23ZVL1024B-NEXT: [[TMP16:%.*]] = call i8 @llvm.vector.reduce.or.nxv1i8(<vscale x 1 x i8> [[TMP14]])
; RVA23ZVL1024B-NEXT: br label [[EXIT:%.*]]
More information about the llvm-branch-commits
mailing list