[llvm] [RISCV] Handle freeze in vp.merge widening codegen prepare (PR #189346)
Luke Lau via llvm-commits
llvm-commits at lists.llvm.org
Mon Mar 30 06:07:33 PDT 2026
https://github.com/lukel97 updated https://github.com/llvm/llvm-project/pull/189346
>From af2f8b123b7ec530d0eca15a4fe6e338aaa86451 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Mon, 30 Mar 2026 16:35:40 +0800
Subject: [PATCH 1/3] Precommit tests
---
.../CodeGen/RISCV/riscv-codegenprepare.ll | 45 +++++++++++++++
.../RISCV/any-of-vectorization.ll | 56 +++++++++++++++++++
.../PhaseOrdering/RISCV/lit.local.cfg | 2 +
3 files changed, 103 insertions(+)
create mode 100644 llvm/test/Transforms/PhaseOrdering/RISCV/any-of-vectorization.ll
create mode 100644 llvm/test/Transforms/PhaseOrdering/RISCV/lit.local.cfg
diff --git a/llvm/test/CodeGen/RISCV/riscv-codegenprepare.ll b/llvm/test/CodeGen/RISCV/riscv-codegenprepare.ll
index c25e337777631..cd07702663379 100644
--- a/llvm/test/CodeGen/RISCV/riscv-codegenprepare.ll
+++ b/llvm/test/CodeGen/RISCV/riscv-codegenprepare.ll
@@ -196,3 +196,48 @@ exit:
%res = call i1 @llvm.vector.reduce.or(<vscale x 4 x i1> %rec)
ret i1 %res
}
+
+define i1 @widen_anyof_rdx_freeze(ptr %p, i64 %n) {
+; CHECK-LABEL: @widen_anyof_rdx_freeze(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[ENTRY]] ], [ [[REC_FREEZE:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[AVL:%.*]] = sub i64 [[N:%.*]], [[IV]]
+; CHECK-NEXT: [[EVL:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[IV]]
+; CHECK-NEXT: [[X:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr [[GEP]], <vscale x 4 x i1> splat (i1 true), i32 [[EVL]])
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 4 x i32> [[X]], zeroinitializer
+; CHECK-NEXT: [[REC:%.*]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[CMP]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[PHI]], i32 [[EVL]])
+; CHECK-NEXT: [[REC_FREEZE]] = freeze <vscale x 4 x i1> [[REC]]
+; CHECK-NEXT: [[EVL_ZEXT:%.*]] = zext i32 [[EVL]] to i64
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[EVL_ZEXT]]
+; CHECK-NEXT: [[DONE:%.*]] = icmp sge i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]]
+; CHECK: exit:
+; CHECK-NEXT: [[RES:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[REC_FREEZE]])
+; CHECK-NEXT: ret i1 [[RES]]
+;
+entry:
+ br label %loop
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %phi = phi <vscale x 4 x i1> [ zeroinitializer, %entry ], [ %rec.freeze, %loop ]
+ %avl = sub i64 %n, %iv
+ %evl = call i32 @llvm.experimental.get.vector.length(i64 %avl, i32 4, i1 true)
+
+ %gep = getelementptr i32, ptr %p, i64 %iv
+ %x = call <vscale x 4 x i32> @llvm.vp.load(ptr %gep, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+ %cmp = icmp ne <vscale x 4 x i32> %x, zeroinitializer
+ %rec = call <vscale x 4 x i1> @llvm.vp.merge(<vscale x 4 x i1> %cmp, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> %phi, i32 %evl)
+ %rec.freeze = freeze <vscale x 4 x i1> %rec
+
+ %evl.zext = zext i32 %evl to i64
+ %iv.next = add i64 %iv, %evl.zext
+ %done = icmp sge i64 %iv.next, %n
+ br i1 %done, label %exit, label %loop
+exit:
+ %res = call i1 @llvm.vector.reduce.or(<vscale x 4 x i1> %rec.freeze)
+ ret i1 %res
+}
diff --git a/llvm/test/Transforms/PhaseOrdering/RISCV/any-of-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/RISCV/any-of-vectorization.ll
new file mode 100644
index 0000000000000..31f9b89c7877a
--- /dev/null
+++ b/llvm/test/Transforms/PhaseOrdering/RISCV/any-of-vectorization.ll
@@ -0,0 +1,56 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6
+; RUN: opt -mtriple riscv64 -mattr=+v -p 'default<O2>,function(riscv-codegenprepare)' -S < %s | FileCheck %s
+
+; Make sure we widen the vp.merge from LoopVectorizer to i8 in RISCVCodeGenPrepare
+
+define i32 @f(ptr %p, i32 %n) {
+; CHECK-LABEL: define range(i32 0, 2) i32 @f(
+; CHECK-SAME: ptr readonly captures(none) [[P:%.*]], i32 [[N:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[SKIP:%.*]] = icmp eq i32 [[N]], 0
+; CHECK-NEXT: br i1 [[SKIP]], label %[[EXIT:.*]], label %[[VECTOR_SCEVCHECK:.*]]
+; CHECK: [[VECTOR_SCEVCHECK]]:
+; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[TMP3:%.*]] = phi i64 [ 0, %[[VECTOR_SCEVCHECK]] ], [ [[CURRENT_ITERATION_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_SCEVCHECK]] ], [ [[DOTFR:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP2:%.*]] = tail call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [4 x i8], ptr [[P]], i64 [[TMP3]]
+; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = tail call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP4]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP2]])
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <vscale x 4 x i32> [[VP_OP_LOAD]], zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP5]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP2]])
+; CHECK-NEXT: [[DOTFR]] = freeze <vscale x 4 x i1> [[TMP6]]
+; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT: [[CURRENT_ITERATION_NEXT]] = add nuw i64 [[TMP3]], [[TMP7]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP8:%.*]] = tail call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[DOTFR]])
+; CHECK-NEXT: [[RDX_SELECT:%.*]] = zext i1 [[TMP8]] to i32
+; CHECK-NEXT: br label %[[EXIT]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[RDX_NEXT_LCSSA:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[RDX_NEXT_LCSSA]]
+;
+entry:
+ %skip = icmp eq i32 %n, 0
+ br i1 %skip, label %exit, label %loop
+
+loop:
+ %iv = phi i32 [0, %entry], [%iv.next, %loop]
+ %rdx = phi i32 [0, %entry], [%rdx.next, %loop]
+ %gep = getelementptr i32, ptr %p, i32 %iv
+ %x = load i32, ptr %gep
+ %cmp = icmp eq i32 %x, 0
+ %rdx.next = select i1 %cmp, i32 1, i32 %rdx
+ %iv.next = add nsw i32 %iv, 1
+ %ec = icmp eq i32 %iv.next, %n
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ %res = phi i32 [0, %entry], [%rdx.next, %loop]
+ ret i32 %res
+}
diff --git a/llvm/test/Transforms/PhaseOrdering/RISCV/lit.local.cfg b/llvm/test/Transforms/PhaseOrdering/RISCV/lit.local.cfg
new file mode 100644
index 0000000000000..17351748513d9
--- /dev/null
+++ b/llvm/test/Transforms/PhaseOrdering/RISCV/lit.local.cfg
@@ -0,0 +1,2 @@
+if not "RISCV" in config.root.targets:
+ config.unsupported = True
>From b0e41c3efe5c22a97592846f9603faa3f3145b32 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Mon, 30 Mar 2026 17:27:09 +0800
Subject: [PATCH 2/3] [RISCV] Handle freeze in vp.merge widening codegen
prepare
This fixes #189335. Previously we used to widen i1 vp.merges that were used in AnyOf reduction patterns from the loop vectorizer to i8. This improved codegen because RVV doesn't have support for tail undisturbed mask instructions, so i1 vp.merges are expanded into a vid.v + compare sequence. Widening it to i8 allows a single vmerge.vim to be used instead.
The pattern used to look like this, where the loop vectorizer emitted a freeze after the reduction:
vector.body:
...
%3 = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> %2, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> %vec.phi, i32 %0)
...
br i1 %5, label %middle.block, label %vector.body
for.cond.cleanup.loopexit: ; preds = %middle.block
%6 = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> %3)
%7 = freeze i1 %6
br label %for.cond.cleanup
But a recent change in InstCombiner now causes the freeze to be hoisted in between the reduction and the vp.merge, breaking the existing pattern:
vector.body:
...
%3 = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> %2, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> %vec.phi, i32 %0)
%.fr = freeze <vscale x 4 x i1> %3
...
br i1 %5, label %middle.block, label %vector.body
for.cond.cleanup.loopexit:
%6 = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> %.fr)
br label %for.cond.cleanup
This patch teaches the transform to handle the freeze. We need to do the change when visiting the freeze instruction otherwise we invalidate the make_early_inc_range iterator.
---
llvm/lib/Target/RISCV/RISCVCodeGenPrepare.cpp | 47 +++++++++++--------
.../CodeGen/RISCV/riscv-codegenprepare.ll | 7 +--
.../RISCV/any-of-vectorization.ll | 7 +--
3 files changed, 36 insertions(+), 25 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVCodeGenPrepare.cpp b/llvm/lib/Target/RISCV/RISCVCodeGenPrepare.cpp
index 1ee4c66a5bde5..6d11708a6bd50 100644
--- a/llvm/lib/Target/RISCV/RISCVCodeGenPrepare.cpp
+++ b/llvm/lib/Target/RISCV/RISCVCodeGenPrepare.cpp
@@ -48,7 +48,8 @@ class RISCVCodeGenPrepare : public InstVisitor<RISCVCodeGenPrepare, bool> {
bool visitAnd(BinaryOperator &BO);
bool visitIntrinsicInst(IntrinsicInst &I);
bool expandVPStrideLoad(IntrinsicInst &I);
- bool widenVPMerge(IntrinsicInst &I);
+ bool widenVPMerge(Instruction *I);
+ bool visitFreezeInst(FreezeInst &BO);
};
} // namespace
@@ -115,12 +116,13 @@ bool RISCVCodeGenPrepare::visitAnd(BinaryOperator &BO) {
// follows:
//
// loop:
-// %phi = phi <vscale x 4 x i1> [ zeroinitializer, %entry ], [ %rec, %loop ]
+// %phi = phi <vscale x 4 x i1> [zeroinitializer, %entry], [%freeze, %loop]
// %cmp = icmp ...
// %rec = call <vscale x 4 x i1> @llvm.vp.merge(%cmp, i1 true, %phi, %evl)
+// %freeze = freeze <vscale x 4 x i1> %rec [optional]
// ...
// middle:
-// %res = call i1 @llvm.vector.reduce.or(<vscale x 4 x i1> %rec)
+// %res = call i1 @llvm.vector.reduce.or(<vscale x 4 x i1> %freeze)
//
// However RVV doesn't have any tail undisturbed mask instructions and so we
// need a convoluted sequence of mask instructions to lower the i1 vp.merge: see
@@ -130,57 +132,64 @@ bool RISCVCodeGenPrepare::visitAnd(BinaryOperator &BO) {
// generate a single vmerge.vim:
//
// loop:
-// %phi = phi <vscale x 4 x i8> [ zeroinitializer, %entry ], [ %rec, %loop ]
+// %phi = phi <vscale x 4 x i8> [zeroinitializer, %entry], [%freeze, %loop]
// %cmp = icmp ...
// %rec = call <vscale x 4 x i8> @llvm.vp.merge(%cmp, i8 true, %phi, %evl)
-// %trunc = trunc <vscale x 4 x i8> %rec to <vscale x 4 x i1>
+// %freeze = freeze <vscale x 4 x i8> %rec
+// %trunc = trunc <vscale x 4 x i8> %freeze to <vscale x 4 x i1>
// ...
// middle:
-// %res = call i1 @llvm.vector.reduce.or(<vscale x 4 x i1> %rec)
+// %res = call i1 @llvm.vector.reduce.or(<vscale x 4 x i1> %trunc)
//
// The trunc will normally be sunk outside of the loop, but even if there are
// users inside the loop it is still profitable.
-bool RISCVCodeGenPrepare::widenVPMerge(IntrinsicInst &II) {
- if (!II.getType()->getScalarType()->isIntegerTy(1))
+bool RISCVCodeGenPrepare::widenVPMerge(Instruction *Root) {
+ if (!Root->getType()->getScalarType()->isIntegerTy(1))
return false;
Value *Mask, *True, *PhiV, *EVL;
using namespace PatternMatch;
- if (!match(&II,
- m_Intrinsic<Intrinsic::vp_merge>(m_Value(Mask), m_Value(True),
- m_Value(PhiV), m_Value(EVL))))
+ auto m_VPMerge = m_Intrinsic<Intrinsic::vp_merge>(
+ m_Value(Mask), m_Value(True), m_Value(PhiV), m_Value(EVL));
+ if (!match(Root, m_CombineOr(m_VPMerge, m_Freeze(m_VPMerge))))
return false;
auto *Phi = dyn_cast<PHINode>(PhiV);
if (!Phi || !Phi->hasOneUse() || Phi->getNumIncomingValues() != 2 ||
!match(Phi->getIncomingValue(0), m_Zero()) ||
- Phi->getIncomingValue(1) != &II)
+ Phi->getIncomingValue(1) != Root)
return false;
Type *WideTy =
- VectorType::get(IntegerType::getInt8Ty(II.getContext()),
- cast<VectorType>(II.getType())->getElementCount());
+ VectorType::get(IntegerType::getInt8Ty(Root->getContext()),
+ cast<VectorType>(Root->getType())->getElementCount());
IRBuilder<> Builder(Phi);
PHINode *WidePhi = Builder.CreatePHI(WideTy, 2);
WidePhi->addIncoming(ConstantAggregateZero::get(WideTy),
Phi->getIncomingBlock(0));
- Builder.SetInsertPoint(&II);
+ Builder.SetInsertPoint(Root);
Value *WideTrue = Builder.CreateZExt(True, WideTy);
Value *WideMerge = Builder.CreateIntrinsic(Intrinsic::vp_merge, {WideTy},
{Mask, WideTrue, WidePhi, EVL});
+ if (isa<FreezeInst>(Root))
+ WideMerge = Builder.CreateFreeze(WideMerge);
WidePhi->addIncoming(WideMerge, Phi->getIncomingBlock(1));
- Value *Trunc = Builder.CreateTrunc(WideMerge, II.getType());
+ Value *Trunc = Builder.CreateTrunc(WideMerge, Root->getType());
- II.replaceAllUsesWith(Trunc);
+ Root->replaceAllUsesWith(Trunc);
// Break the cycle and delete the old chain.
Phi->setIncomingValue(1, Phi->getIncomingValue(0));
- llvm::RecursivelyDeleteTriviallyDeadInstructions(&II);
+ llvm::RecursivelyDeleteTriviallyDeadInstructions(Root);
return true;
}
+bool RISCVCodeGenPrepare::visitFreezeInst(FreezeInst &I) {
+ return widenVPMerge(&I);
+}
+
// LLVM vector reduction intrinsics return a scalar result, but on RISC-V vector
// reduction instructions write the result in the first element of a vector
// register. So when a reduction in a loop uses a scalar phi, we end up with
@@ -216,7 +225,7 @@ bool RISCVCodeGenPrepare::visitIntrinsicInst(IntrinsicInst &I) {
if (expandVPStrideLoad(I))
return true;
- if (widenVPMerge(I))
+ if (widenVPMerge(&I))
return true;
if (I.getIntrinsicID() != Intrinsic::vector_reduce_fadd &&
diff --git a/llvm/test/CodeGen/RISCV/riscv-codegenprepare.ll b/llvm/test/CodeGen/RISCV/riscv-codegenprepare.ll
index cd07702663379..59f4a9f2f52eb 100644
--- a/llvm/test/CodeGen/RISCV/riscv-codegenprepare.ll
+++ b/llvm/test/CodeGen/RISCV/riscv-codegenprepare.ll
@@ -203,14 +203,15 @@ define i1 @widen_anyof_rdx_freeze(ptr %p, i64 %n) {
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[ENTRY]] ], [ [[REC_FREEZE:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = phi <vscale x 4 x i8> [ zeroinitializer, [[ENTRY]] ], [ [[TMP2:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[AVL:%.*]] = sub i64 [[N:%.*]], [[IV]]
; CHECK-NEXT: [[EVL:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[IV]]
; CHECK-NEXT: [[X:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr [[GEP]], <vscale x 4 x i1> splat (i1 true), i32 [[EVL]])
; CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 4 x i32> [[X]], zeroinitializer
-; CHECK-NEXT: [[REC:%.*]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[CMP]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[PHI]], i32 [[EVL]])
-; CHECK-NEXT: [[REC_FREEZE]] = freeze <vscale x 4 x i1> [[REC]]
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> [[CMP]], <vscale x 4 x i8> splat (i8 1), <vscale x 4 x i8> [[TMP0]], i32 [[EVL]])
+; CHECK-NEXT: [[TMP2]] = freeze <vscale x 4 x i8> [[TMP1]]
+; CHECK-NEXT: [[REC_FREEZE:%.*]] = trunc <vscale x 4 x i8> [[TMP2]] to <vscale x 4 x i1>
; CHECK-NEXT: [[EVL_ZEXT:%.*]] = zext i32 [[EVL]] to i64
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[EVL_ZEXT]]
; CHECK-NEXT: [[DONE:%.*]] = icmp sge i64 [[IV_NEXT]], [[N]]
diff --git a/llvm/test/Transforms/PhaseOrdering/RISCV/any-of-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/RISCV/any-of-vectorization.ll
index 31f9b89c7877a..29b5153f98422 100644
--- a/llvm/test/Transforms/PhaseOrdering/RISCV/any-of-vectorization.ll
+++ b/llvm/test/Transforms/PhaseOrdering/RISCV/any-of-vectorization.ll
@@ -14,14 +14,15 @@ define i32 @f(ptr %p, i32 %n) {
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[TMP3:%.*]] = phi i64 [ 0, %[[VECTOR_SCEVCHECK]] ], [ [[CURRENT_ITERATION_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_SCEVCHECK]] ], [ [[DOTFR:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP1:%.*]] = phi <vscale x 4 x i8> [ zeroinitializer, %[[VECTOR_SCEVCHECK]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP2:%.*]] = tail call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [4 x i8], ptr [[P]], i64 [[TMP3]]
; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = tail call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP4]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP2]])
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <vscale x 4 x i32> [[VP_OP_LOAD]], zeroinitializer
-; CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP5]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP2]])
-; CHECK-NEXT: [[DOTFR]] = freeze <vscale x 4 x i1> [[TMP6]]
+; CHECK-NEXT: [[TMP10:%.*]] = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> [[TMP5]], <vscale x 4 x i8> splat (i8 1), <vscale x 4 x i8> [[TMP1]], i32 [[TMP2]])
+; CHECK-NEXT: [[TMP6]] = freeze <vscale x 4 x i8> [[TMP10]]
+; CHECK-NEXT: [[DOTFR:%.*]] = trunc <vscale x 4 x i8> [[TMP6]] to <vscale x 4 x i1>
; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP2]] to i64
; CHECK-NEXT: [[CURRENT_ITERATION_NEXT]] = add nuw i64 [[TMP3]], [[TMP7]]
; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]]
>From 039b68b9a4dea64b118041d1726686f622825586 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Mon, 30 Mar 2026 21:05:58 +0800
Subject: [PATCH 3/3] Early exit if freeze operand isn't a vp_merge
---
llvm/lib/Target/RISCV/RISCVCodeGenPrepare.cpp | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/RISCVCodeGenPrepare.cpp b/llvm/lib/Target/RISCV/RISCVCodeGenPrepare.cpp
index 6d11708a6bd50..e8269677fb344 100644
--- a/llvm/lib/Target/RISCV/RISCVCodeGenPrepare.cpp
+++ b/llvm/lib/Target/RISCV/RISCVCodeGenPrepare.cpp
@@ -187,7 +187,10 @@ bool RISCVCodeGenPrepare::widenVPMerge(Instruction *Root) {
}
bool RISCVCodeGenPrepare::visitFreezeInst(FreezeInst &I) {
- return widenVPMerge(&I);
+ if (auto *II = dyn_cast<IntrinsicInst>(I.getOperand(0)))
+ if (II->getIntrinsicID() == Intrinsic::vp_merge)
+ return widenVPMerge(&I);
+ return false;
}
// LLVM vector reduction intrinsics return a scalar result, but on RISC-V vector
More information about the llvm-commits
mailing list