[llvm] [LoopVectorize] Add test case for minloc reduction (PR #141556)
Madhur Amilkanthwar via llvm-commits
llvm-commits at lists.llvm.org
Tue May 27 05:31:02 PDT 2025
https://github.com/madhur13490 updated https://github.com/llvm/llvm-project/pull/141556
>From 81c03736bb7948d74d4a044dae669db9f3b3c5c7 Mon Sep 17 00:00:00 2001
From: Madhur Amilkanthwar <madhura at nvidia.com>
Date: Mon, 26 May 2025 07:14:54 -0700
Subject: [PATCH 1/2] [LoopVectorize] Add test case for minloc reduction
This patch adds a test case extracted from Polyhedron benchmark.
https://fortran.uk/fortran-compiler-comparisons/the-polyhedron-solutions-benchmark-suite/
The test is specifically interesting for vectorizing min/max reduction
pattern.
---
.../LoopVectorize/last-min-index.ll | 84 +++++++++++++++++++
1 file changed, 84 insertions(+)
create mode 100644 llvm/test/Transforms/LoopVectorize/last-min-index.ll
diff --git a/llvm/test/Transforms/LoopVectorize/last-min-index.ll b/llvm/test/Transforms/LoopVectorize/last-min-index.ll
new file mode 100644
index 0000000000000..f69145eafa74a
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/last-min-index.ll
@@ -0,0 +1,84 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes=loop-vectorize -S %s | FileCheck %s --check-prefix=CHECK-REV-MIN
+
+; This test case is extracted from rnflow (fortran) benchmark in polyhedron benchmark suite.
+; The function minlst primarily takes two indices (i.e. range), scans backwards in the range
+; and returns the firstIV of the minimum value.
+
+define fastcc i32 @minlst(i32 %.0.val, i32 %.0.val1, ptr %.0.val3) {
+; CHECK-REV-MIN-LABEL: define internal fastcc i32 @_QFcptrf2Pminlst(
+; CHECK-REV-MIN-SAME: i32 [[DOT0_VAL:%.*]], i32 [[DOT0_VAL1:%.*]], ptr [[DOT0_VAL3:%.*]]) unnamed_addr {
+; CHECK-REV-MIN-NEXT: [[TMP1:%.*]] = sext i32 [[DOT0_VAL]] to i64
+; CHECK-REV-MIN-NEXT: [[TMP2:%.*]] = sub i32 0, [[DOT0_VAL1]]
+; CHECK-REV-MIN-NEXT: [[TMP3:%.*]] = sext i32 [[TMP2]] to i64
+; CHECK-REV-MIN-NEXT: [[TMP4:%.*]] = add nsw i64 [[TMP1]], [[TMP3]]
+; CHECK-REV-MIN-NEXT: [[TMP5:%.*]] = sub nsw i64 0, [[TMP4]]
+; CHECK-REV-MIN-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr i8, ptr [[DOT0_VAL3]], i64 -8
+; CHECK-REV-MIN-NEXT: [[INVARIANT_GEP5:%.*]] = getelementptr i8, ptr [[DOT0_VAL3]], i64 -4
+; CHECK-REV-MIN-NEXT: [[TMP6:%.*]] = icmp slt i64 [[TMP4]], 0
+; CHECK-REV-MIN-NEXT: br i1 [[TMP6]], label %[[DOTLR_PH_PREHEADER:.*]], [[DOT_CRIT_EDGE:label %.*]]
+; CHECK-REV-MIN: [[_LR_PH_PREHEADER:.*:]]
+; CHECK-REV-MIN-NEXT: [[TMP7:%.*]] = sext i32 [[DOT0_VAL1]] to i64
+; CHECK-REV-MIN-NEXT: br label %[[DOTLR_PH:.*]]
+; CHECK-REV-MIN: [[_LR_PH:.*:]]
+; CHECK-REV-MIN-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[TMP7]], %[[DOTLR_PH_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ]
+; CHECK-REV-MIN-NEXT: [[TMP8:%.*]] = phi i64 [ [[TMP14:%.*]], %[[DOTLR_PH]] ], [ [[TMP5]], %[[DOTLR_PH_PREHEADER]] ]
+; CHECK-REV-MIN-NEXT: [[DOT07:%.*]] = phi i32 [ [[DOT1:%.*]], %[[DOTLR_PH]] ], [ [[DOT0_VAL1]], %[[DOTLR_PH_PREHEADER]] ]
+; CHECK-REV-MIN-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1
+; CHECK-REV-MIN-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[INVARIANT_GEP]], i64 [[INDVARS_IV]]
+; CHECK-REV-MIN-NEXT: [[TMP9:%.*]] = load float, ptr [[GEP]], align 4
+; CHECK-REV-MIN-NEXT: [[TMP10:%.*]] = sext i32 [[DOT07]] to i64
+; CHECK-REV-MIN-NEXT: [[GEP6:%.*]] = getelementptr float, ptr [[INVARIANT_GEP5]], i64 [[TMP10]]
+; CHECK-REV-MIN-NEXT: [[TMP11:%.*]] = load float, ptr [[GEP6]], align 4
+; CHECK-REV-MIN-NEXT: [[TMP12:%.*]] = fcmp contract olt float [[TMP9]], [[TMP11]]
+; CHECK-REV-MIN-NEXT: [[TMP13:%.*]] = trunc nsw i64 [[INDVARS_IV_NEXT]] to i32
+; CHECK-REV-MIN-NEXT: [[DOT1]] = select i1 [[TMP12]], i32 [[TMP13]], i32 [[DOT07]]
+; CHECK-REV-MIN-NEXT: [[TMP14]] = add nsw i64 [[TMP8]], -1
+; CHECK-REV-MIN-NEXT: [[TMP15:%.*]] = icmp sgt i64 [[TMP8]], 1
+; CHECK-REV-MIN-NEXT: br i1 [[TMP15]], label %[[DOTLR_PH]], label %[[DOT_CRIT_EDGE_LOOPEXIT:.*]]
+; CHECK-REV-MIN: [[__CRIT_EDGE_LOOPEXIT:.*:]]
+; CHECK-REV-MIN-NEXT: [[DOT1_LCSSA:%.*]] = phi i32 [ [[DOT1]], %[[DOTLR_PH]] ]
+; CHECK-REV-MIN-NEXT: br [[DOT_CRIT_EDGE]]
+; CHECK-REV-MIN: [[__CRIT_EDGE:.*:]]
+; CHECK-REV-MIN-NEXT: [[DOT0_LCSSA:%.*]] = phi i32 [ [[DOT0_VAL1]], [[TMP0:%.*]] ], [ [[DOT1_LCSSA]], %[[DOT_CRIT_EDGE_LOOPEXIT]] ]
+; CHECK-REV-MIN-NEXT: ret i32 [[DOT0_LCSSA]]
+;
+ %1 = sext i32 %.0.val to i64
+ %2 = sub i32 0, %.0.val1
+ %3 = sext i32 %2 to i64
+ %4 = add nsw i64 %1, %3
+ %5 = sub nsw i64 0, %4
+ %invariant.gep = getelementptr i8, ptr %.0.val3, i64 -8
+ %invariant.gep5 = getelementptr i8, ptr %.0.val3, i64 -4
+ %6 = icmp slt i64 %4, 0
+ br i1 %6, label %.lr.ph.preheader, label %._crit_edge
+
+.lr.ph.preheader: ; preds = %0
+ %7 = sext i32 %.0.val1 to i64
+ br label %.lr.ph
+
+.lr.ph: ; preds = %.lr.ph.preheader, %.lr.ph
+ %indvars.iv = phi i64 [ %7, %.lr.ph.preheader ], [ %indvars.iv.next, %.lr.ph ]
+ %8 = phi i64 [ %14, %.lr.ph ], [ %5, %.lr.ph.preheader ]
+ %.07 = phi i32 [ %.1, %.lr.ph ], [ %.0.val1, %.lr.ph.preheader ]
+ %indvars.iv.next = add nsw i64 %indvars.iv, -1
+ %gep = getelementptr float, ptr %invariant.gep, i64 %indvars.iv
+ %9 = load float, ptr %gep, align 4
+ %10 = sext i32 %.07 to i64
+ %gep6 = getelementptr float, ptr %invariant.gep5, i64 %10
+ %11 = load float, ptr %gep6, align 4
+ %12 = fcmp contract olt float %9, %11
+ %13 = trunc nsw i64 %indvars.iv.next to i32
+ %.1 = select i1 %12, i32 %13, i32 %.07
+ %14 = add nsw i64 %8, -1
+ %15 = icmp sgt i64 %8, 1
+ br i1 %15, label %.lr.ph, label %._crit_edge.loopexit
+
+._crit_edge.loopexit: ; preds = %.lr.ph
+ %.1.lcssa = phi i32 [ %.1, %.lr.ph ]
+ br label %._crit_edge
+
+._crit_edge: ; preds = %._crit_edge.loopexit, %0
+ %.0.lcssa = phi i32 [ %.0.val1, %0 ], [ %.1.lcssa, %._crit_edge.loopexit ]
+ ret i32 %.0.lcssa
+}
>From 936d6ca3401a94ded2a7cd7672477b291aaada84 Mon Sep 17 00:00:00 2001
From: Madhur Amilkanthwar <madhura at nvidia.com>
Date: Tue, 27 May 2025 05:29:07 -0700
Subject: [PATCH 2/2] Address review comments
---
.../LoopVectorize/last-min-index.ll | 136 +++++++++---------
1 file changed, 69 insertions(+), 67 deletions(-)
diff --git a/llvm/test/Transforms/LoopVectorize/last-min-index.ll b/llvm/test/Transforms/LoopVectorize/last-min-index.ll
index f69145eafa74a..c99766cfcd318 100644
--- a/llvm/test/Transforms/LoopVectorize/last-min-index.ll
+++ b/llvm/test/Transforms/LoopVectorize/last-min-index.ll
@@ -1,84 +1,86 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt -passes=loop-vectorize -S %s | FileCheck %s --check-prefix=CHECK-REV-MIN
+; RUN: opt -passes=loop-vectorize -force-vector-width=4 -S %s | FileCheck %s --check-prefix=CHECK-REV-MIN
; This test case is extracted from rnflow (fortran) benchmark in polyhedron benchmark suite.
; The function minlst primarily takes two indices (i.e. range), scans backwards in the range
; and returns the firstIV of the minimum value.
-define fastcc i32 @minlst(i32 %.0.val, i32 %.0.val1, ptr %.0.val3) {
-; CHECK-REV-MIN-LABEL: define internal fastcc i32 @_QFcptrf2Pminlst(
-; CHECK-REV-MIN-SAME: i32 [[DOT0_VAL:%.*]], i32 [[DOT0_VAL1:%.*]], ptr [[DOT0_VAL3:%.*]]) unnamed_addr {
-; CHECK-REV-MIN-NEXT: [[TMP1:%.*]] = sext i32 [[DOT0_VAL]] to i64
-; CHECK-REV-MIN-NEXT: [[TMP2:%.*]] = sub i32 0, [[DOT0_VAL1]]
-; CHECK-REV-MIN-NEXT: [[TMP3:%.*]] = sext i32 [[TMP2]] to i64
-; CHECK-REV-MIN-NEXT: [[TMP4:%.*]] = add nsw i64 [[TMP1]], [[TMP3]]
-; CHECK-REV-MIN-NEXT: [[TMP5:%.*]] = sub nsw i64 0, [[TMP4]]
-; CHECK-REV-MIN-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr i8, ptr [[DOT0_VAL3]], i64 -8
-; CHECK-REV-MIN-NEXT: [[INVARIANT_GEP5:%.*]] = getelementptr i8, ptr [[DOT0_VAL3]], i64 -4
-; CHECK-REV-MIN-NEXT: [[TMP6:%.*]] = icmp slt i64 [[TMP4]], 0
-; CHECK-REV-MIN-NEXT: br i1 [[TMP6]], label %[[DOTLR_PH_PREHEADER:.*]], [[DOT_CRIT_EDGE:label %.*]]
-; CHECK-REV-MIN: [[_LR_PH_PREHEADER:.*:]]
-; CHECK-REV-MIN-NEXT: [[TMP7:%.*]] = sext i32 [[DOT0_VAL1]] to i64
-; CHECK-REV-MIN-NEXT: br label %[[DOTLR_PH:.*]]
-; CHECK-REV-MIN: [[_LR_PH:.*:]]
-; CHECK-REV-MIN-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[TMP7]], %[[DOTLR_PH_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ]
-; CHECK-REV-MIN-NEXT: [[TMP8:%.*]] = phi i64 [ [[TMP14:%.*]], %[[DOTLR_PH]] ], [ [[TMP5]], %[[DOTLR_PH_PREHEADER]] ]
-; CHECK-REV-MIN-NEXT: [[DOT07:%.*]] = phi i32 [ [[DOT1:%.*]], %[[DOTLR_PH]] ], [ [[DOT0_VAL1]], %[[DOTLR_PH_PREHEADER]] ]
-; CHECK-REV-MIN-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1
-; CHECK-REV-MIN-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[INVARIANT_GEP]], i64 [[INDVARS_IV]]
-; CHECK-REV-MIN-NEXT: [[TMP9:%.*]] = load float, ptr [[GEP]], align 4
-; CHECK-REV-MIN-NEXT: [[TMP10:%.*]] = sext i32 [[DOT07]] to i64
-; CHECK-REV-MIN-NEXT: [[GEP6:%.*]] = getelementptr float, ptr [[INVARIANT_GEP5]], i64 [[TMP10]]
-; CHECK-REV-MIN-NEXT: [[TMP11:%.*]] = load float, ptr [[GEP6]], align 4
-; CHECK-REV-MIN-NEXT: [[TMP12:%.*]] = fcmp contract olt float [[TMP9]], [[TMP11]]
-; CHECK-REV-MIN-NEXT: [[TMP13:%.*]] = trunc nsw i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-REV-MIN-NEXT: [[DOT1]] = select i1 [[TMP12]], i32 [[TMP13]], i32 [[DOT07]]
-; CHECK-REV-MIN-NEXT: [[TMP14]] = add nsw i64 [[TMP8]], -1
-; CHECK-REV-MIN-NEXT: [[TMP15:%.*]] = icmp sgt i64 [[TMP8]], 1
-; CHECK-REV-MIN-NEXT: br i1 [[TMP15]], label %[[DOTLR_PH]], label %[[DOT_CRIT_EDGE_LOOPEXIT:.*]]
+define fastcc i32 @minlst(i32 %first_index, i32 %last_index, ptr %array) {
+; CHECK-REV-MIN-LABEL: define fastcc i32 @minlst(
+; CHECK-REV-MIN-SAME: i32 [[FIRST_INDEX:%.*]], i32 [[LAST_INDEX:%.*]], ptr [[ARRAY:%.*]]) {
+; CHECK-REV-MIN-NEXT: [[ENTRY:.*]]:
+; CHECK-REV-MIN-NEXT: [[FIRST_INDEX_SEXT:%.*]] = sext i32 [[FIRST_INDEX]] to i64
+; CHECK-REV-MIN-NEXT: [[LAST_INDEX_NEG:%.*]] = sub i32 0, [[LAST_INDEX]]
+; CHECK-REV-MIN-NEXT: [[LAST_INDEX_NEG_SEXT:%.*]] = sext i32 [[LAST_INDEX_NEG]] to i64
+; CHECK-REV-MIN-NEXT: [[ADD:%.*]] = add nsw i64 [[FIRST_INDEX_SEXT]], [[LAST_INDEX_NEG_SEXT]]
+; CHECK-REV-MIN-NEXT: [[SUB:%.*]] = sub nsw i64 0, [[ADD]]
+; CHECK-REV-MIN-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr i8, ptr [[ARRAY]], i64 -8
+; CHECK-REV-MIN-NEXT: [[INVARIANT_GEP5:%.*]] = getelementptr i8, ptr [[ARRAY]], i64 -4
+; CHECK-REV-MIN-NEXT: [[EARLY_EXIT_COND:%.*]] = icmp slt i64 [[ADD]], 0
+; CHECK-REV-MIN-NEXT: br i1 [[EARLY_EXIT_COND]], label %[[LOOP_PREHEADER:.*]], [[DOT_CRIT_EDGE:label %.*]]
+; CHECK-REV-MIN: [[LOOP_PREHEADER]]:
+; CHECK-REV-MIN-NEXT: [[LAST_INDEX_SEXT:%.*]] = sext i32 [[LAST_INDEX]] to i64
+; CHECK-REV-MIN-NEXT: br label %[[LOOP:.*]]
+; CHECK-REV-MIN: [[LOOP]]:
+; CHECK-REV-MIN-NEXT: [[IV:%.*]] = phi i64 [ [[LAST_INDEX_SEXT]], %[[LOOP_PREHEADER]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-REV-MIN-NEXT: [[TMP0:%.*]] = phi i64 [ [[TMP1:%.*]], %[[LOOP]] ], [ [[SUB]], %[[LOOP_PREHEADER]] ]
+; CHECK-REV-MIN-NEXT: [[INDEX:%.*]] = phi i32 [ [[SELECT:%.*]], %[[LOOP]] ], [ [[LAST_INDEX]], %[[LOOP_PREHEADER]] ]
+; CHECK-REV-MIN-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1
+; CHECK-REV-MIN-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[INVARIANT_GEP]], i64 [[IV]]
+; CHECK-REV-MIN-NEXT: [[LOAD1:%.*]] = load float, ptr [[GEP]], align 4
+; CHECK-REV-MIN-NEXT: [[INDEX_SEXT:%.*]] = sext i32 [[INDEX]] to i64
+; CHECK-REV-MIN-NEXT: [[GEP6:%.*]] = getelementptr float, ptr [[INVARIANT_GEP5]], i64 [[INDEX_SEXT]]
+; CHECK-REV-MIN-NEXT: [[LOAD2:%.*]] = load float, ptr [[GEP6]], align 4
+; CHECK-REV-MIN-NEXT: [[CMP:%.*]] = fcmp contract olt float [[LOAD1]], [[LOAD2]]
+; CHECK-REV-MIN-NEXT: [[IV_NEXT_TRUNC:%.*]] = trunc nsw i64 [[IV_NEXT]] to i32
+; CHECK-REV-MIN-NEXT: [[SELECT]] = select i1 [[CMP]], i32 [[IV_NEXT_TRUNC]], i32 [[INDEX]]
+; CHECK-REV-MIN-NEXT: [[TMP1]] = add nsw i64 [[TMP0]], -1
+; CHECK-REV-MIN-NEXT: [[LOOP_COND:%.*]] = icmp sgt i64 [[TMP0]], 1
+; CHECK-REV-MIN-NEXT: br i1 [[LOOP_COND]], label %[[LOOP]], label %[[DOT_CRIT_EDGE_LOOPEXIT:.*]]
; CHECK-REV-MIN: [[__CRIT_EDGE_LOOPEXIT:.*:]]
-; CHECK-REV-MIN-NEXT: [[DOT1_LCSSA:%.*]] = phi i32 [ [[DOT1]], %[[DOTLR_PH]] ]
+; CHECK-REV-MIN-NEXT: [[SELECT_LCSSA:%.*]] = phi i32 [ [[SELECT]], %[[LOOP]] ]
; CHECK-REV-MIN-NEXT: br [[DOT_CRIT_EDGE]]
; CHECK-REV-MIN: [[__CRIT_EDGE:.*:]]
-; CHECK-REV-MIN-NEXT: [[DOT0_LCSSA:%.*]] = phi i32 [ [[DOT0_VAL1]], [[TMP0:%.*]] ], [ [[DOT1_LCSSA]], %[[DOT_CRIT_EDGE_LOOPEXIT]] ]
-; CHECK-REV-MIN-NEXT: ret i32 [[DOT0_LCSSA]]
+; CHECK-REV-MIN-NEXT: [[LAST_INDEX_RET:%.*]] = phi i32 [ [[LAST_INDEX]], %[[ENTRY]] ], [ [[SELECT_LCSSA]], %[[DOT_CRIT_EDGE_LOOPEXIT]] ]
+; CHECK-REV-MIN-NEXT: ret i32 [[LAST_INDEX_RET]]
;
- %1 = sext i32 %.0.val to i64
- %2 = sub i32 0, %.0.val1
- %3 = sext i32 %2 to i64
- %4 = add nsw i64 %1, %3
- %5 = sub nsw i64 0, %4
- %invariant.gep = getelementptr i8, ptr %.0.val3, i64 -8
- %invariant.gep5 = getelementptr i8, ptr %.0.val3, i64 -4
- %6 = icmp slt i64 %4, 0
- br i1 %6, label %.lr.ph.preheader, label %._crit_edge
+entry:
+ %first_index_sext = sext i32 %first_index to i64
+ %last_index_neg = sub i32 0, %last_index
+ %last_index_neg_sext = sext i32 %last_index_neg to i64
+ %add = add nsw i64 %first_index_sext, %last_index_neg_sext
+ %diff = sub nsw i64 0, %add
+ %first_ptr = getelementptr i8, ptr %array, i64 -8
+ %second_ptr = getelementptr i8, ptr %array, i64 -4
+ %early_exit_cond = icmp slt i64 %add, 0
+ br i1 %early_exit_cond, label %loop.preheader, label %._crit_edge
-.lr.ph.preheader: ; preds = %0
- %7 = sext i32 %.0.val1 to i64
- br label %.lr.ph
+loop.preheader: ; preds = %entry
+ %last_index_sext = sext i32 %last_index to i64
+ br label %loop
-.lr.ph: ; preds = %.lr.ph.preheader, %.lr.ph
- %indvars.iv = phi i64 [ %7, %.lr.ph.preheader ], [ %indvars.iv.next, %.lr.ph ]
- %8 = phi i64 [ %14, %.lr.ph ], [ %5, %.lr.ph.preheader ]
- %.07 = phi i32 [ %.1, %.lr.ph ], [ %.0.val1, %.lr.ph.preheader ]
- %indvars.iv.next = add nsw i64 %indvars.iv, -1
- %gep = getelementptr float, ptr %invariant.gep, i64 %indvars.iv
- %9 = load float, ptr %gep, align 4
- %10 = sext i32 %.07 to i64
- %gep6 = getelementptr float, ptr %invariant.gep5, i64 %10
- %11 = load float, ptr %gep6, align 4
- %12 = fcmp contract olt float %9, %11
- %13 = trunc nsw i64 %indvars.iv.next to i32
- %.1 = select i1 %12, i32 %13, i32 %.07
- %14 = add nsw i64 %8, -1
- %15 = icmp sgt i64 %8, 1
- br i1 %15, label %.lr.ph, label %._crit_edge.loopexit
+loop: ; preds = %loop.preheader, %loop
+ %iv = phi i64 [ %last_index_sext, %loop.preheader ], [ %iv.next, %loop ]
+ %dec_iv = phi i64 [ %dec, %loop ], [ %diff, %loop.preheader ]
+ %index = phi i32 [ %select, %loop ], [ %last_index, %loop.preheader ]
+ %iv.next = add nsw i64 %iv, -1
+ %load1_ptr = getelementptr float, ptr %first_ptr, i64 %iv
+ %load1 = load float, ptr %load1_ptr, align 4
+ %index_sext = sext i32 %index to i64
+ %load2_ptr = getelementptr float, ptr %second_ptr, i64 %index_sext
+ %load2 = load float, ptr %load2_ptr, align 4
+ %cmp = fcmp contract olt float %load1, %load2
+ %iv.next.trunc = trunc nsw i64 %iv.next to i32
+ %select = select i1 %cmp, i32 %iv.next.trunc, i32 %index
+ %dec = add nsw i64 %dec_iv, -1
+ %loop_cond = icmp sgt i64 %dec_iv, 1
+ br i1 %loop_cond, label %loop, label %._crit_edge.loopexit
-._crit_edge.loopexit: ; preds = %.lr.ph
- %.1.lcssa = phi i32 [ %.1, %.lr.ph ]
+._crit_edge.loopexit: ; preds = %loop
+ %select.lcssa = phi i32 [ %select, %loop ]
br label %._crit_edge
._crit_edge: ; preds = %._crit_edge.loopexit, %0
- %.0.lcssa = phi i32 [ %.0.val1, %0 ], [ %.1.lcssa, %._crit_edge.loopexit ]
- ret i32 %.0.lcssa
+ %last_index_ret = phi i32 [ %last_index, %entry ], [ %select.lcssa, %._crit_edge.loopexit ]
+ ret i32 %last_index_ret
}
More information about the llvm-commits
mailing list