[llvm] [LV] Enable scalable FindLast on RISCV. (PR #184931)
Elvis Wang via llvm-commits
llvm-commits at lists.llvm.org
Tue Mar 10 19:16:56 PDT 2026
https://github.com/ElvisWang123 updated https://github.com/llvm/llvm-project/pull/184931
>From 18aaba2dd5c7dc309215cfd25285c069470d1c34 Mon Sep 17 00:00:00 2001
From: Elvis Wang <elvis.wang at sifive.com>
Date: Sun, 1 Mar 2026 17:46:58 -0800
Subject: [PATCH 1/2] [LV] Enable scalable FindLast on RISCV.
This patch enables FindLast reduction vectorized with scalable vector on
RISCV.
Note that this patch only makes FindLast can be vectorized with scalable
vector. It doesn't work well with EVL tail-folding since the
header-mask still has user inside loop body. So the header mask cannot
be fully removed.
---
.../Target/RISCV/RISCVTargetTransformInfo.h | 1 +
...conditional-scalar-assignment-fold-tail.ll | 138 ++++++++++++++++++
.../RISCV/select-cmp-reduction.ll | 60 ++++----
3 files changed, 164 insertions(+), 35 deletions(-)
create mode 100644 llvm/test/Transforms/LoopVectorize/RISCV/conditional-scalar-assignment-fold-tail.ll
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
index ad0248138d5bf..42f48c644240d 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -393,6 +393,7 @@ class RISCVTTIImpl final : public BasicTTIImplBase<RISCVTTIImpl> {
case RecurKind::UMax:
case RecurKind::FMin:
case RecurKind::FMax:
+ case RecurKind::FindLast:
return true;
case RecurKind::AnyOf:
case RecurKind::FAdd:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/conditional-scalar-assignment-fold-tail.ll b/llvm/test/Transforms/LoopVectorize/RISCV/conditional-scalar-assignment-fold-tail.ll
new file mode 100644
index 0000000000000..cb907d0d39cc9
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/conditional-scalar-assignment-fold-tail.ll
@@ -0,0 +1,138 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6
+; RUN: opt -passes=loop-vectorize -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -mtriple riscv64 -mattr=+v -S < %s | FileCheck %s
+
+define i32 @simple_find_last_reduction(i64 %N, ptr %data, i32 %a) {
+; CHECK-LABEL: define i32 @simple_find_last_reduction(
+; CHECK-SAME: i64 [[N:%.*]], ptr [[DATA:%.*]], i32 [[A:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[SCALAR_PH:.*:]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[A]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[EXIT]] ], [ [[CURRENT_ITERATION_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ splat (i32 -1), %[[EXIT]] ], [ [[TMP10:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[EXIT]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[EXIT]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP1]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ult <vscale x 4 x i32> [[TMP2]], [[BROADCAST_SPLAT2]]
+; CHECK-NEXT: [[LD_ADDR:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i64 [[IV]]
+; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[LD_ADDR]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP1]])
+; CHECK-NEXT: [[TMP5:%.*]] = icmp slt <vscale x 4 x i32> [[BROADCAST_SPLAT]], [[VP_OP_LOAD]]
+; CHECK-NEXT: [[TMP6:%.*]] = select <vscale x 4 x i1> [[TMP3]], <vscale x 4 x i1> [[TMP5]], <vscale x 4 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP7:%.*]] = freeze <vscale x 4 x i1> [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP7]])
+; CHECK-NEXT: [[TMP9]] = select i1 [[TMP8]], <vscale x 4 x i1> [[TMP6]], <vscale x 4 x i1> [[TMP0]]
+; CHECK-NEXT: [[TMP10]] = select i1 [[TMP8]], <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i32> [[VEC_PHI]]
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT: [[CURRENT_ITERATION_NEXT]] = add i64 [[TMP11]], [[IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
+; CHECK-NEXT: br i1 [[TMP12]], label %[[EXIT1:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[EXIT1]]:
+; CHECK-NEXT: [[SELECT_DATA_LCSSA:%.*]] = call i32 @llvm.experimental.vector.extract.last.active.nxv4i32(<vscale x 4 x i32> [[TMP10]], <vscale x 4 x i1> [[TMP9]], i32 -1)
+; CHECK-NEXT: br label %[[EXIT2:.*]]
+; CHECK: [[EXIT2]]:
+; CHECK-NEXT: ret i32 [[SELECT_DATA_LCSSA]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %data.phi = phi i32 [ -1, %entry ], [ %select.data, %loop ]
+ %ld.addr = getelementptr inbounds i32, ptr %data, i64 %iv
+ %ld = load i32, ptr %ld.addr, align 4
+ %select.cmp = icmp slt i32 %a, %ld
+ %select.data = select i1 %select.cmp, i32 %ld, i32 %data.phi
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exit.cmp = icmp eq i64 %iv.next, %N
+ br i1 %exit.cmp, label %exit, label %loop
+
+exit:
+ ret i32 %select.data
+}
+
+; This test is derived from the following C program:
+; int non_speculatable_find_last_reduction(
+; int* a, int* b, int default_val, int N, int threshold)
+; {
+; int result = default_val;
+; for (int i = 0; i < N; ++i)
+; if (a[i] > threshold)
+; result = b[i];
+; return result;
+; }
+define i32 @non_speculatable_find_last_reduction(ptr noalias %a, ptr noalias %b, i32 %default_val, i64 %N, i32 %threshold) {
+; CHECK-LABEL: define i32 @non_speculatable_find_last_reduction(
+; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i32 [[DEFAULT_VAL:%.*]], i64 [[N:%.*]], i32 [[THRESHOLD:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[SCALAR_PH:.*:]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[THRESHOLD]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[DEFAULT_VAL]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[EXIT]] ], [ [[CURRENT_ITERATION_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT2]], %[[EXIT]] ], [ [[TMP12:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[EXIT]] ], [ [[TMP11:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[EXIT]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP1]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ult <vscale x 4 x i32> [[TMP2]], [[BROADCAST_SPLAT4]]
+; CHECK-NEXT: [[A_ADDR:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[A_ADDR]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP1]])
+; CHECK-NEXT: [[TMP5:%.*]] = icmp sgt <vscale x 4 x i32> [[VP_OP_LOAD]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP6:%.*]] = select <vscale x 4 x i1> [[TMP3]], <vscale x 4 x i1> [[TMP5]], <vscale x 4 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[B]], i64 [[IV]]
+; CHECK-NEXT: [[VP_OP_LOAD5:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP7]], <vscale x 4 x i1> [[TMP5]], i32 [[TMP1]])
+; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 4 x i1> [[TMP3]], <vscale x 4 x i1> [[TMP6]], <vscale x 4 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP9:%.*]] = freeze <vscale x 4 x i1> [[TMP8]]
+; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP9]])
+; CHECK-NEXT: [[TMP11]] = select i1 [[TMP10]], <vscale x 4 x i1> [[TMP8]], <vscale x 4 x i1> [[TMP0]]
+; CHECK-NEXT: [[TMP12]] = select i1 [[TMP10]], <vscale x 4 x i32> [[VP_OP_LOAD5]], <vscale x 4 x i32> [[VEC_PHI]]
+; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT: [[CURRENT_ITERATION_NEXT]] = add i64 [[TMP13]], [[IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
+; CHECK-NEXT: br i1 [[TMP14]], label %[[IF_THEN:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK: [[IF_THEN]]:
+; CHECK-NEXT: [[TMP15:%.*]] = extractelement <vscale x 4 x i32> [[BROADCAST_SPLAT2]], i32 0
+; CHECK-NEXT: [[SELECT_DATA_LCSSA:%.*]] = call i32 @llvm.experimental.vector.extract.last.active.nxv4i32(<vscale x 4 x i32> [[TMP12]], <vscale x 4 x i1> [[TMP11]], i32 [[TMP15]])
+; CHECK-NEXT: br label %[[LATCH:.*]]
+; CHECK: [[LATCH]]:
+; CHECK-NEXT: ret i32 [[SELECT_DATA_LCSSA]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %latch ]
+ %data.phi = phi i32 [ %default_val, %entry ], [ %select.data, %latch ]
+ %a.addr = getelementptr inbounds nuw i32, ptr %a, i64 %iv
+ %ld.a = load i32, ptr %a.addr, align 4
+ %if.cond = icmp sgt i32 %ld.a, %threshold
+ br i1 %if.cond, label %if.then, label %latch
+
+if.then:
+ %b.addr = getelementptr inbounds nuw i32, ptr %b, i64 %iv
+ %ld.b = load i32, ptr %b.addr, align 4
+ br label %latch
+
+latch:
+ %select.data = phi i32 [ %ld.b, %if.then ], [ %data.phi, %loop ]
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exit.cmp = icmp eq i64 %iv.next, %N
+ br i1 %exit.cmp, label %exit, label %loop
+
+exit:
+ ret i32 %select.data
+}
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll
index d7bab8fda3fe3..da0c2c26647aa 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll
@@ -243,47 +243,37 @@ exit: ; preds = %for.body
define float @select_const_f32_from_icmp(ptr nocapture readonly %v, i64 %n) {
; CHECK-LABEL: define float @select_const_f32_from_icmp(
; CHECK-SAME: ptr readonly captures(none) [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 8
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 8
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ splat (float 3.000000e+00), %[[VECTOR_PH]] ], [ [[TMP10:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP12:%.*]] = phi <8 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP13:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[CURRENT_ITERATION_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ splat (float 3.000000e+00), %[[VECTOR_PH]] ], [ [[TMP10:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP4:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP1]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP13:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ult <vscale x 4 x i32> [[TMP13]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[TMP0]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP2]], align 4
-; CHECK-NEXT: [[TMP14:%.*]] = icmp ne <8 x i32> [[WIDE_LOAD]], splat (i32 3)
-; CHECK-NEXT: [[TMP15:%.*]] = freeze <8 x i1> [[TMP14]]
-; CHECK-NEXT: [[TMP16:%.*]] = call i1 @llvm.vector.reduce.or.v8i1(<8 x i1> [[TMP15]])
-; CHECK-NEXT: [[TMP13]] = select i1 [[TMP16]], <8 x i1> [[TMP14]], <8 x i1> [[TMP12]]
-; CHECK-NEXT: [[TMP10]] = select i1 [[TMP16]], <8 x float> splat (float 7.000000e+00), <8 x float> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
-; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP8:%.*]] = call float @llvm.experimental.vector.extract.last.active.v8f32(<8 x float> [[TMP10]], <8 x i1> [[TMP13]], float 3.000000e+00)
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP2]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP1]])
+; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <vscale x 4 x i32> [[VP_OP_LOAD]], splat (i32 3)
+; CHECK-NEXT: [[TMP6:%.*]] = select <vscale x 4 x i1> [[TMP3]], <vscale x 4 x i1> [[TMP5]], <vscale x 4 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP7:%.*]] = freeze <vscale x 4 x i1> [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP7]])
+; CHECK-NEXT: [[TMP9]] = select i1 [[TMP8]], <vscale x 4 x i1> [[TMP6]], <vscale x 4 x i1> [[TMP4]]
+; CHECK-NEXT: [[TMP10]] = select i1 [[TMP8]], <vscale x 4 x float> splat (float 7.000000e+00), <vscale x 4 x float> [[VEC_PHI]]
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT: [[CURRENT_ITERATION_NEXT]] = add i64 [[TMP11]], [[TMP0]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
+; CHECK-NEXT: br i1 [[TMP12]], label %[[SCALAR_PH:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP8]], %[[MIDDLE_BLOCK]] ], [ 3.000000e+00, %[[ENTRY]] ]
+; CHECK-NEXT: [[DOTLCSSA:%.*]] = call float @llvm.experimental.vector.extract.last.active.nxv4f32(<vscale x 4 x float> [[TMP10]], <vscale x 4 x i1> [[TMP9]], float 3.000000e+00)
; CHECK-NEXT: br label %[[FOR_BODY1:.*]]
; CHECK: [[FOR_BODY1]]:
-; CHECK-NEXT: [[TMP9:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[TMP6:%.*]], %[[FOR_BODY1]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = phi fast float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[TMP5:%.*]], %[[FOR_BODY1]] ]
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[TMP9]]
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP11]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP3]], 3
-; CHECK-NEXT: [[TMP5]] = select fast i1 [[TMP4]], float [[TMP1]], float 7.000000e+00
-; CHECK-NEXT: [[TMP6]] = add nuw nsw i64 [[TMP9]], 1
-; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[TMP6]], [[N]]
-; CHECK-NEXT: br i1 [[TMP7]], label %[[EXIT]], label %[[FOR_BODY1]], !llvm.loop [[LOOP8:![0-9]+]]
-; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi float [ [[TMP5]], %[[FOR_BODY1]] ], [ [[TMP8]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret float [[DOTLCSSA]]
;
entry:
@@ -329,7 +319,7 @@ define i32 @pred_select_const_i32_from_icmp(ptr noalias nocapture readonly %src1
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[INDEX]]
; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; CHECK-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP12:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[PREDPHI]])
; CHECK-NEXT: [[TMP13:%.*]] = freeze i1 [[TMP12]]
>From 90b51ccde81b019afcb9f90b3ad44c85eca5fa60 Mon Sep 17 00:00:00 2001
From: Elvis Wang <elvis.wang at sifive.com>
Date: Tue, 10 Mar 2026 19:15:52 -0700
Subject: [PATCH 2/2] Address coments.
* Add new non-tail-folded tests.
* Remove unused options in tests.
---
...conditional-scalar-assignment-fold-tail.ll | 2 +-
.../RISCV/conditional-scalar-assignment.ll | 172 ++++++++++++++++++
2 files changed, 173 insertions(+), 1 deletion(-)
create mode 100644 llvm/test/Transforms/LoopVectorize/RISCV/conditional-scalar-assignment.ll
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/conditional-scalar-assignment-fold-tail.ll b/llvm/test/Transforms/LoopVectorize/RISCV/conditional-scalar-assignment-fold-tail.ll
index cb907d0d39cc9..8cbe7ccca8ed1 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/conditional-scalar-assignment-fold-tail.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/conditional-scalar-assignment-fold-tail.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6
-; RUN: opt -passes=loop-vectorize -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -mtriple riscv64 -mattr=+v -S < %s | FileCheck %s
+; RUN: opt -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S < %s | FileCheck %s
define i32 @simple_find_last_reduction(i64 %N, ptr %data, i32 %a) {
; CHECK-LABEL: define i32 @simple_find_last_reduction(
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/conditional-scalar-assignment.ll b/llvm/test/Transforms/LoopVectorize/RISCV/conditional-scalar-assignment.ll
new file mode 100644
index 0000000000000..7464a836fc928
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/conditional-scalar-assignment.ll
@@ -0,0 +1,172 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6
+; RUN: opt -passes=loop-vectorize -force-tail-folding-style=none -mtriple riscv64 -mattr=+v -S < %s | FileCheck %s
+
+define i32 @simple_find_last_reduction(i64 %N, ptr %data, i32 %a) {
+; CHECK-LABEL: define i32 @simple_find_last_reduction(
+; CHECK-SAME: i64 [[N:%.*]], ptr [[DATA:%.*]], i32 [[A:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[SCALAR_PH:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 2
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH1:.*]], label %[[EXIT:.*]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[A]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[EXIT]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ splat (i32 -1), %[[EXIT]] ], [ [[TMP10:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP4:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[EXIT]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[LD_ADDR:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i64 [[IV]]
+; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[LD_ADDR]], align 4
+; CHECK-NEXT: [[TMP5:%.*]] = icmp slt <vscale x 4 x i32> [[BROADCAST_SPLAT]], [[VP_OP_LOAD]]
+; CHECK-NEXT: [[TMP7:%.*]] = freeze <vscale x 4 x i1> [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP7]])
+; CHECK-NEXT: [[TMP9]] = select i1 [[TMP8]], <vscale x 4 x i1> [[TMP5]], <vscale x 4 x i1> [[TMP4]]
+; CHECK-NEXT: [[TMP10]] = select i1 [[TMP8]], <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i32> [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP3]]
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP11]], label %[[EXIT1:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[EXIT1]]:
+; CHECK-NEXT: [[SELECT_DATA_LCSSA:%.*]] = call i32 @llvm.experimental.vector.extract.last.active.nxv4i32(<vscale x 4 x i32> [[TMP10]], <vscale x 4 x i1> [[TMP9]], i32 -1)
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT3:.*]], label %[[SCALAR_PH1]]
+; CHECK: [[SCALAR_PH1]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[EXIT1]] ], [ 0, %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[SELECT_DATA_LCSSA]], %[[EXIT1]] ], [ -1, %[[SCALAR_PH]] ]
+; CHECK-NEXT: br label %[[EXIT2:.*]]
+; CHECK: [[EXIT2]]:
+; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH1]] ], [ [[IV_NEXT:%.*]], %[[EXIT2]] ]
+; CHECK-NEXT: [[DATA_PHI:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH1]] ], [ [[SELECT_DATA:%.*]], %[[EXIT2]] ]
+; CHECK-NEXT: [[LD_ADDR1:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i64 [[IV1]]
+; CHECK-NEXT: [[LD:%.*]] = load i32, ptr [[LD_ADDR1]], align 4
+; CHECK-NEXT: [[SELECT_CMP:%.*]] = icmp slt i32 [[A]], [[LD]]
+; CHECK-NEXT: [[SELECT_DATA]] = select i1 [[SELECT_CMP]], i32 [[LD]], i32 [[DATA_PHI]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
+; CHECK-NEXT: [[EXIT_CMP:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXIT_CMP]], label %[[EXIT3]], label %[[EXIT2]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK: [[EXIT3]]:
+; CHECK-NEXT: [[SELECT_DATA_LCSSA1:%.*]] = phi i32 [ [[SELECT_DATA]], %[[EXIT2]] ], [ [[SELECT_DATA_LCSSA]], %[[EXIT1]] ]
+; CHECK-NEXT: ret i32 [[SELECT_DATA_LCSSA1]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %data.phi = phi i32 [ -1, %entry ], [ %select.data, %loop ]
+ %ld.addr = getelementptr inbounds i32, ptr %data, i64 %iv
+ %ld = load i32, ptr %ld.addr, align 4
+ %select.cmp = icmp slt i32 %a, %ld
+ %select.data = select i1 %select.cmp, i32 %ld, i32 %data.phi
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exit.cmp = icmp eq i64 %iv.next, %N
+ br i1 %exit.cmp, label %exit, label %loop
+
+exit:
+ ret i32 %select.data
+}
+
+; This test is derived from the following C program:
+; int non_speculatable_find_last_reduction(
+; int* a, int* b, int default_val, int N, int threshold)
+; {
+; int result = default_val;
+; for (int i = 0; i < N; ++i)
+; if (a[i] > threshold)
+; result = b[i];
+; return result;
+; }
+define i32 @non_speculatable_find_last_reduction(ptr noalias %a, ptr noalias %b, i32 %default_val, i64 %N, i32 %threshold) {
+; CHECK-LABEL: define i32 @non_speculatable_find_last_reduction(
+; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i32 [[DEFAULT_VAL:%.*]], i64 [[N:%.*]], i32 [[THRESHOLD:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[SCALAR_PH:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 2
+; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP1]], i64 5)
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[UMAX]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH1:.*]], label %[[EXIT:.*]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[THRESHOLD]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[DEFAULT_VAL]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[EXIT]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT2]], %[[EXIT]] ], [ [[TMP12:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP4:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[EXIT]] ], [ [[TMP11:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[A_ADDR:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[A_ADDR]], align 4
+; CHECK-NEXT: [[TMP5:%.*]] = icmp sgt <vscale x 4 x i32> [[VP_OP_LOAD]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[B]], i64 [[IV]]
+; CHECK-NEXT: [[VP_OP_LOAD5:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr align 4 [[TMP7]], <vscale x 4 x i1> [[TMP5]], <vscale x 4 x i32> poison)
+; CHECK-NEXT: [[TMP9:%.*]] = freeze <vscale x 4 x i1> [[TMP5]]
+; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP9]])
+; CHECK-NEXT: [[TMP11]] = select i1 [[TMP10]], <vscale x 4 x i1> [[TMP5]], <vscale x 4 x i1> [[TMP4]]
+; CHECK-NEXT: [[TMP12]] = select i1 [[TMP10]], <vscale x 4 x i32> [[VP_OP_LOAD5]], <vscale x 4 x i32> [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP3]]
+; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP13]], label %[[IF_THEN:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK: [[IF_THEN]]:
+; CHECK-NEXT: [[TMP15:%.*]] = extractelement <vscale x 4 x i32> [[BROADCAST_SPLAT2]], i32 0
+; CHECK-NEXT: [[SELECT_DATA_LCSSA:%.*]] = call i32 @llvm.experimental.vector.extract.last.active.nxv4i32(<vscale x 4 x i32> [[TMP12]], <vscale x 4 x i1> [[TMP11]], i32 [[TMP15]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT1:.*]], label %[[SCALAR_PH1]]
+; CHECK: [[SCALAR_PH1]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[IF_THEN]] ], [ 0, %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[SELECT_DATA_LCSSA]], %[[IF_THEN]] ], [ [[DEFAULT_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: br label %[[LATCH:.*]]
+; CHECK: [[LATCH]]:
+; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH1]] ], [ [[IV_NEXT:%.*]], %[[LATCH1:.*]] ]
+; CHECK-NEXT: [[DATA_PHI:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH1]] ], [ [[SELECT_DATA:%.*]], %[[LATCH1]] ]
+; CHECK-NEXT: [[A_ADDR1:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV1]]
+; CHECK-NEXT: [[LD_A:%.*]] = load i32, ptr [[A_ADDR1]], align 4
+; CHECK-NEXT: [[IF_COND:%.*]] = icmp sgt i32 [[LD_A]], [[THRESHOLD]]
+; CHECK-NEXT: br i1 [[IF_COND]], label %[[IF_THEN1:.*]], label %[[LATCH1]]
+; CHECK: [[IF_THEN1]]:
+; CHECK-NEXT: [[B_ADDR:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[IV1]]
+; CHECK-NEXT: [[LD_B:%.*]] = load i32, ptr [[B_ADDR]], align 4
+; CHECK-NEXT: br label %[[LATCH1]]
+; CHECK: [[LATCH1]]:
+; CHECK-NEXT: [[SELECT_DATA]] = phi i32 [ [[LD_B]], %[[IF_THEN1]] ], [ [[DATA_PHI]], %[[LATCH]] ]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
+; CHECK-NEXT: [[EXIT_CMP:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXIT_CMP]], label %[[EXIT1]], label %[[LATCH]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK: [[EXIT1]]:
+; CHECK-NEXT: [[SELECT_DATA_LCSSA1:%.*]] = phi i32 [ [[SELECT_DATA]], %[[LATCH1]] ], [ [[SELECT_DATA_LCSSA]], %[[IF_THEN]] ]
+; CHECK-NEXT: ret i32 [[SELECT_DATA_LCSSA1]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %latch ]
+ %data.phi = phi i32 [ %default_val, %entry ], [ %select.data, %latch ]
+ %a.addr = getelementptr inbounds nuw i32, ptr %a, i64 %iv
+ %ld.a = load i32, ptr %a.addr, align 4
+ %if.cond = icmp sgt i32 %ld.a, %threshold
+ br i1 %if.cond, label %if.then, label %latch
+
+if.then:
+ %b.addr = getelementptr inbounds nuw i32, ptr %b, i64 %iv
+ %ld.b = load i32, ptr %b.addr, align 4
+ br label %latch
+
+latch:
+ %select.data = phi i32 [ %ld.b, %if.then ], [ %data.phi, %loop ]
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exit.cmp = icmp eq i64 %iv.next, %N
+ br i1 %exit.cmp, label %exit, label %loop
+
+exit:
+ ret i32 %select.data
+}
More information about the llvm-commits
mailing list