[llvm] 1a2e344 - [PhaseOrdering] Add additional end-to-end range check removal tests.
Florian Hahn via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 24 11:22:33 PDT 2023
Author: Florian Hahn
Date: 2023-08-24T19:22:17+01:00
New Revision: 1a2e344b10404dd7b285bc14a9176e8c004d8fd4
URL: https://github.com/llvm/llvm-project/commit/1a2e344b10404dd7b285bc14a9176e8c004d8fd4
DIFF: https://github.com/llvm/llvm-project/commit/1a2e344b10404dd7b285bc14a9176e8c004d8fd4.diff
LOG: [PhaseOrdering] Add additional end-to-end range check removal tests.
Tests for
https://github.com/llvm/llvm-project/issues/63125
https://github.com/llvm/llvm-project/issues/64881
Added:
llvm/test/Transforms/PhaseOrdering/loop-access-checks.ll
Modified:
Removed:
llvm/test/Transforms/PhaseOrdering/iterator-with-runtime-check.ll
################################################################################
diff --git a/llvm/test/Transforms/PhaseOrdering/iterator-with-runtime-check.ll b/llvm/test/Transforms/PhaseOrdering/iterator-with-runtime-check.ll
deleted file mode 100644
index 23b1b2b3cd87d2..00000000000000
--- a/llvm/test/Transforms/PhaseOrdering/iterator-with-runtime-check.ll
+++ /dev/null
@@ -1,124 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt -passes='default<O2>' -S %s | FileCheck %s
-
-; Slightly reduced test case for a loop iterating over a std::span with libc++ hardening.
-; TODO: The runtime check in the loop should be removed.
-;
-; #include <span>
-; #include <iostream>
-;
-; void use(unsigned&);
-;
-; void fill_with_foreach(std::span<unsigned> elems) {
-; for (unsigned& x : elems)
-; use(x);
-; }
-
-%"class.std::__1::span" = type { ptr, i64 }
-%"struct.std::__1::__bounded_iter" = type { ptr, ptr, ptr }
-
-define void @test_fill_with_foreach([2 x i64] %elems.coerce) {
-; CHECK-LABEL: define void @test_fill_with_foreach
-; CHECK-SAME: ([2 x i64] [[ELEMS_COERCE:%.*]]) local_unnamed_addr {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[ELEMS_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x i64] [[ELEMS_COERCE]], 0
-; CHECK-NEXT: [[TMP0:%.*]] = inttoptr i64 [[ELEMS_COERCE_FCA_0_EXTRACT]] to ptr
-; CHECK-NEXT: [[ELEMS_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x i64] [[ELEMS_COERCE]], 1
-; CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 [[ELEMS_COERCE_FCA_1_EXTRACT]]
-; CHECK-NEXT: [[CMP_NOT_I_I_I_I:%.*]] = icmp slt i64 [[ELEMS_COERCE_FCA_1_EXTRACT]], 0
-; CHECK-NEXT: br i1 [[CMP_NOT_I_I_I_I]], label [[ERROR:%.*]], label [[FOR_COND_PREHEADER:%.*]]
-; CHECK: for.cond.preheader:
-; CHECK-NEXT: [[CMP_I_NOT2:%.*]] = icmp eq i64 [[ELEMS_COERCE_FCA_1_EXTRACT]], 0
-; CHECK-NEXT: br i1 [[CMP_I_NOT2]], label [[COMMON_RET:%.*]], label [[FOR_BODY:%.*]]
-; CHECK: common.ret:
-; CHECK-NEXT: ret void
-; CHECK: error:
-; CHECK-NEXT: tail call void @error()
-; CHECK-NEXT: br label [[COMMON_RET]]
-; CHECK: for.body:
-; CHECK-NEXT: [[__BEGIN1_SROA_0_03:%.*]] = phi ptr [ [[INCDEC_PTR_I:%.*]], [[FOR_LATCH:%.*]] ], [ [[TMP0]], [[FOR_COND_PREHEADER]] ]
-; CHECK-NEXT: [[CMP2_I_I:%.*]] = icmp ult ptr [[__BEGIN1_SROA_0_03]], [[ADD_PTR_I]]
-; CHECK-NEXT: br i1 [[CMP2_I_I]], label [[FOR_LATCH]], label [[ERROR]]
-; CHECK: for.latch:
-; CHECK-NEXT: tail call void @use(ptr noundef nonnull align 4 dereferenceable(4) [[__BEGIN1_SROA_0_03]])
-; CHECK-NEXT: [[INCDEC_PTR_I]] = getelementptr inbounds i32, ptr [[__BEGIN1_SROA_0_03]], i64 1
-; CHECK-NEXT: [[CMP_I_NOT:%.*]] = icmp eq ptr [[INCDEC_PTR_I]], [[ADD_PTR_I]]
-; CHECK-NEXT: br i1 [[CMP_I_NOT]], label [[COMMON_RET]], label [[FOR_BODY]]
-;
-entry:
- %elems = alloca %"class.std::__1::span", align 8
- %__begin1 = alloca %"struct.std::__1::__bounded_iter", align 8
- %__end1 = alloca %"struct.std::__1::__bounded_iter", align 8
- %elems.coerce.fca.0.extract = extractvalue [2 x i64] %elems.coerce, 0
- store i64 %elems.coerce.fca.0.extract, ptr %elems, align 8
- %elems.coerce.fca.1.extract = extractvalue [2 x i64] %elems.coerce, 1
- %elems.coerce.fca.1.gep = getelementptr inbounds [2 x i64], ptr %elems, i64 0, i64 1
- store i64 %elems.coerce.fca.1.extract, ptr %elems.coerce.fca.1.gep, align 8
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %__begin1) #6
- %0 = load ptr, ptr %elems, align 8
- %__size_.i.i = getelementptr inbounds %"class.std::__1::span", ptr %elems, i64 0, i32 1
- %1 = load i64, ptr %__size_.i.i, align 8
- %add.ptr.i = getelementptr inbounds i32, ptr %0, i64 %1
- store ptr %0, ptr %__begin1, align 8
- %__begin_.i.i.i.i = getelementptr inbounds %"struct.std::__1::__bounded_iter", ptr %__begin1, i64 0, i32 1
- store ptr %0, ptr %__begin_.i.i.i.i, align 8
- %__end_.i.i.i.i = getelementptr inbounds %"struct.std::__1::__bounded_iter", ptr %__begin1, i64 0, i32 2
- store ptr %add.ptr.i, ptr %__end_.i.i.i.i, align 8
- %cmp.not.i.i.i.i = icmp slt i64 %1, 0
- br i1 %cmp.not.i.i.i.i, label %error, label %check.2
-
-check.2:
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %__end1) #6
- %l4 = load ptr, ptr %elems, align 8
- %__size_.i.i4 = getelementptr inbounds %"class.std::__1::span", ptr %elems, i64 0, i32 1
- %l5 = load i64, ptr %__size_.i.i4, align 8
- %add.ptr.i5 = getelementptr inbounds i32, ptr %l4, i64 %l5
- store ptr %add.ptr.i5, ptr %__end1, align 8
- %__begin_.i.i.i.i6 = getelementptr inbounds %"struct.std::__1::__bounded_iter", ptr %__end1, i64 0, i32 1
- store ptr %l4, ptr %__begin_.i.i.i.i6, align 8
- %__end_.i.i.i.i7 = getelementptr inbounds %"struct.std::__1::__bounded_iter", ptr %__end1, i64 0, i32 2
- store ptr %add.ptr.i5, ptr %__end_.i.i.i.i7, align 8
- %cmp.not.i.i.i.i8 = icmp slt i64 %l5, 0
- br i1 %cmp.not.i.i.i.i8, label %error, label %for.cond
-
-error:
- call void @error()
- ret void
-
-for.cond:
- %l8 = load ptr, ptr %__begin1, align 8
- %l9 = load ptr, ptr %__end1, align 8
- %cmp.i = icmp ne ptr %l8, %l9
- br i1 %cmp.i, label %for.body, label %for.cond.cleanup
-
-for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %__end1)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %__begin1)
- ret void
-
-for.body: ; preds = %for.cond
- %l10 = load ptr, ptr %__begin1, align 8
- %__begin_.i.i = getelementptr inbounds %"struct.std::__1::__bounded_iter", ptr %__begin1, i64 0, i32 1
- %l11 = load ptr, ptr %__begin_.i.i, align 8
- %cmp.not.i.i = icmp uge ptr %l10, %l11
- %__end_.i.i = getelementptr inbounds %"struct.std::__1::__bounded_iter", ptr %__begin1, i64 0, i32 2
- %l12 = load ptr, ptr %__end_.i.i, align 8
- %cmp2.i.i = icmp ult ptr %l10, %l12
- %sel = select i1 %cmp.not.i.i, i1 %cmp2.i.i, i1 false
- br i1 %sel, label %for.latch, label %error
-
-for.latch:
- call void @use(ptr noundef nonnull align 4 dereferenceable(4) %l10)
- %l = load ptr, ptr %__begin1, align 8
- %incdec.ptr.i = getelementptr inbounds i32, ptr %l, i64 1
- store ptr %incdec.ptr.i, ptr %__begin1, align 8
- br label %for.cond
-}
-
-declare void @error()
-
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-
-declare void @use(ptr noundef nonnull align 4 dereferenceable(4))
-
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
diff --git a/llvm/test/Transforms/PhaseOrdering/loop-access-checks.ll b/llvm/test/Transforms/PhaseOrdering/loop-access-checks.ll
new file mode 100644
index 00000000000000..c3277bee9edb18
--- /dev/null
+++ b/llvm/test/Transforms/PhaseOrdering/loop-access-checks.ll
@@ -0,0 +1,403 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
+; RUN: opt -passes='default<O2>' -S %s | FileCheck %s
+
+; Slightly reduced test case for a loop iterating over a std::span with libc++ hardening.
+; TODO: The runtime check in the loop should be removed.
+;
+; #include <span>
+; #include <iostream>
+;
+; void use(unsigned&);
+;
+; void fill_with_foreach(std::span<unsigned> elems) {
+; for (unsigned& x : elems)
+; use(x);
+; }
+
+%"class.std::__1::span" = type { ptr, i64 }
+%"struct.std::__1::__bounded_iter" = type { ptr, ptr, ptr }
+
+define void @test_fill_with_foreach([2 x i64] %elems.coerce) {
+; CHECK-LABEL: define void @test_fill_with_foreach
+; CHECK-SAME: ([2 x i64] [[ELEMS_COERCE:%.*]]) local_unnamed_addr {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ELEMS_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x i64] [[ELEMS_COERCE]], 0
+; CHECK-NEXT: [[TMP0:%.*]] = inttoptr i64 [[ELEMS_COERCE_FCA_0_EXTRACT]] to ptr
+; CHECK-NEXT: [[ELEMS_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x i64] [[ELEMS_COERCE]], 1
+; CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 [[ELEMS_COERCE_FCA_1_EXTRACT]]
+; CHECK-NEXT: [[CMP_NOT_I_I_I_I:%.*]] = icmp slt i64 [[ELEMS_COERCE_FCA_1_EXTRACT]], 0
+; CHECK-NEXT: br i1 [[CMP_NOT_I_I_I_I]], label [[ERROR:%.*]], label [[FOR_COND_PREHEADER:%.*]]
+; CHECK: for.cond.preheader:
+; CHECK-NEXT: [[CMP_I_NOT2:%.*]] = icmp eq i64 [[ELEMS_COERCE_FCA_1_EXTRACT]], 0
+; CHECK-NEXT: br i1 [[CMP_I_NOT2]], label [[COMMON_RET:%.*]], label [[FOR_BODY:%.*]]
+; CHECK: common.ret:
+; CHECK-NEXT: ret void
+; CHECK: error:
+; CHECK-NEXT: tail call void @error()
+; CHECK-NEXT: br label [[COMMON_RET]]
+; CHECK: for.body:
+; CHECK-NEXT: [[__BEGIN1_SROA_0_03:%.*]] = phi ptr [ [[INCDEC_PTR_I:%.*]], [[FOR_LATCH:%.*]] ], [ [[TMP0]], [[FOR_COND_PREHEADER]] ]
+; CHECK-NEXT: [[CMP2_I_I:%.*]] = icmp ult ptr [[__BEGIN1_SROA_0_03]], [[ADD_PTR_I]]
+; CHECK-NEXT: br i1 [[CMP2_I_I]], label [[FOR_LATCH]], label [[ERROR]]
+; CHECK: for.latch:
+; CHECK-NEXT: tail call void @use(ptr noundef nonnull align 4 dereferenceable(4) [[__BEGIN1_SROA_0_03]])
+; CHECK-NEXT: [[INCDEC_PTR_I]] = getelementptr inbounds i32, ptr [[__BEGIN1_SROA_0_03]], i64 1
+; CHECK-NEXT: [[CMP_I_NOT:%.*]] = icmp eq ptr [[INCDEC_PTR_I]], [[ADD_PTR_I]]
+; CHECK-NEXT: br i1 [[CMP_I_NOT]], label [[COMMON_RET]], label [[FOR_BODY]]
+;
+entry:
+ %elems = alloca %"class.std::__1::span", align 8
+ %__begin1 = alloca %"struct.std::__1::__bounded_iter", align 8
+ %__end1 = alloca %"struct.std::__1::__bounded_iter", align 8
+ %elems.coerce.fca.0.extract = extractvalue [2 x i64] %elems.coerce, 0
+ store i64 %elems.coerce.fca.0.extract, ptr %elems, align 8
+ %elems.coerce.fca.1.extract = extractvalue [2 x i64] %elems.coerce, 1
+ %elems.coerce.fca.1.gep = getelementptr inbounds [2 x i64], ptr %elems, i64 0, i64 1
+ store i64 %elems.coerce.fca.1.extract, ptr %elems.coerce.fca.1.gep, align 8
+ call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %__begin1) #6
+ %0 = load ptr, ptr %elems, align 8
+ %__size_.i.i = getelementptr inbounds %"class.std::__1::span", ptr %elems, i64 0, i32 1
+ %1 = load i64, ptr %__size_.i.i, align 8
+ %add.ptr.i = getelementptr inbounds i32, ptr %0, i64 %1
+ store ptr %0, ptr %__begin1, align 8
+ %__begin_.i.i.i.i = getelementptr inbounds %"struct.std::__1::__bounded_iter", ptr %__begin1, i64 0, i32 1
+ store ptr %0, ptr %__begin_.i.i.i.i, align 8
+ %__end_.i.i.i.i = getelementptr inbounds %"struct.std::__1::__bounded_iter", ptr %__begin1, i64 0, i32 2
+ store ptr %add.ptr.i, ptr %__end_.i.i.i.i, align 8
+ %cmp.not.i.i.i.i = icmp slt i64 %1, 0
+ br i1 %cmp.not.i.i.i.i, label %error, label %check.2
+
+check.2:
+ call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %__end1) #6
+ %l4 = load ptr, ptr %elems, align 8
+ %__size_.i.i4 = getelementptr inbounds %"class.std::__1::span", ptr %elems, i64 0, i32 1
+ %l5 = load i64, ptr %__size_.i.i4, align 8
+ %add.ptr.i5 = getelementptr inbounds i32, ptr %l4, i64 %l5
+ store ptr %add.ptr.i5, ptr %__end1, align 8
+ %__begin_.i.i.i.i6 = getelementptr inbounds %"struct.std::__1::__bounded_iter", ptr %__end1, i64 0, i32 1
+ store ptr %l4, ptr %__begin_.i.i.i.i6, align 8
+ %__end_.i.i.i.i7 = getelementptr inbounds %"struct.std::__1::__bounded_iter", ptr %__end1, i64 0, i32 2
+ store ptr %add.ptr.i5, ptr %__end_.i.i.i.i7, align 8
+ %cmp.not.i.i.i.i8 = icmp slt i64 %l5, 0
+ br i1 %cmp.not.i.i.i.i8, label %error, label %for.cond
+
+error:
+ call void @error()
+ ret void
+
+for.cond:
+ %l8 = load ptr, ptr %__begin1, align 8
+ %l9 = load ptr, ptr %__end1, align 8
+ %cmp.i = icmp ne ptr %l8, %l9
+ br i1 %cmp.i, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.cond
+ call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %__end1)
+ call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %__begin1)
+ ret void
+
+for.body: ; preds = %for.cond
+ %l10 = load ptr, ptr %__begin1, align 8
+ %__begin_.i.i = getelementptr inbounds %"struct.std::__1::__bounded_iter", ptr %__begin1, i64 0, i32 1
+ %l11 = load ptr, ptr %__begin_.i.i, align 8
+ %cmp.not.i.i = icmp uge ptr %l10, %l11
+ %__end_.i.i = getelementptr inbounds %"struct.std::__1::__bounded_iter", ptr %__begin1, i64 0, i32 2
+ %l12 = load ptr, ptr %__end_.i.i, align 8
+ %cmp2.i.i = icmp ult ptr %l10, %l12
+ %sel = select i1 %cmp.not.i.i, i1 %cmp2.i.i, i1 false
+ br i1 %sel, label %for.latch, label %error
+
+for.latch:
+ call void @use(ptr noundef nonnull align 4 dereferenceable(4) %l10)
+ %l = load ptr, ptr %__begin1, align 8
+ %incdec.ptr.i = getelementptr inbounds i32, ptr %l, i64 1
+ store ptr %incdec.ptr.i, ptr %__begin1, align 8
+ br label %for.cond
+}
+
+declare void @error()
+
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+
+declare void @use(ptr noundef nonnull align 4 dereferenceable(4))
+
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+
+
+; -------------------------------------------------------------------------
+; Test case for runtime check removal when accessing vector elements with
+; hardened glibc++ (PR63125)
+
+%Vector_base = type { %Vector_impl }
+%Vector_impl = type { %Vector_impl_data }
+%Vector_impl_data = type { ptr, ptr, ptr }
+
+define void @foo(ptr noundef nonnull align 8 dereferenceable(24) noalias %vec) #0 {
+; CHECK-LABEL: define void @foo
+; CHECK-SAME: (ptr noalias nocapture noundef nonnull readonly align 8 dereferenceable(24) [[VEC:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[_M_FINISH_I_I:%.*]] = getelementptr inbounds [[VECTOR_IMPL_DATA:%.*]], ptr [[VEC]], i64 0, i32 1
+; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[_M_FINISH_I_I]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC]], align 8
+; CHECK-NEXT: [[SUB_PTR_LHS_CAST_I_I:%.*]] = ptrtoint ptr [[TMP0]] to i64
+; CHECK-NEXT: [[SUB_PTR_RHS_CAST_I_I:%.*]] = ptrtoint ptr [[TMP1]] to i64
+; CHECK-NEXT: [[SUB_PTR_SUB_I_I:%.*]] = sub i64 [[SUB_PTR_LHS_CAST_I_I]], [[SUB_PTR_RHS_CAST_I_I]]
+; CHECK-NEXT: [[SUB_PTR_DIV_I_I:%.*]] = ashr exact i64 [[SUB_PTR_SUB_I_I]], 3
+; CHECK-NEXT: [[CMP_NOT9:%.*]] = icmp eq ptr [[TMP0]], [[TMP1]]
+; CHECK-NEXT: br i1 [[CMP_NOT9]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY:%.*]]
+; CHECK: for.cond.cleanup:
+; CHECK-NEXT: ret void
+; CHECK: for.body:
+; CHECK-NEXT: [[I_010:%.*]] = phi i64 [ [[INC:%.*]], [[OPERATOR_ACC_EXIT:%.*]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[CMP_NOT_I:%.*]] = icmp ugt i64 [[SUB_PTR_DIV_I_I]], [[I_010]]
+; CHECK-NEXT: br i1 [[CMP_NOT_I]], label [[OPERATOR_ACC_EXIT]], label [[IF_THEN_I:%.*]]
+; CHECK: if.then.i:
+; CHECK-NEXT: tail call void @abort() #[[ATTR1:[0-9]+]]
+; CHECK-NEXT: unreachable
+; CHECK: operator_acc.exit:
+; CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds double, ptr [[TMP1]], i64 [[I_010]]
+; CHECK-NEXT: [[TMP2:%.*]] = load double, ptr [[ADD_PTR_I]], align 8
+; CHECK-NEXT: [[ADD:%.*]] = fadd double [[TMP2]], 1.000000e+00
+; CHECK-NEXT: store double [[ADD]], ptr [[ADD_PTR_I]], align 8
+; CHECK-NEXT: [[INC]] = add nuw i64 [[I_010]], 1
+; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i64 [[INC]], [[SUB_PTR_DIV_I_I]]
+; CHECK-NEXT: br i1 [[CMP_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]]
+;
+entry:
+ %vec.addr = alloca ptr, align 8
+ %count = alloca i64, align 8
+ %i = alloca i64, align 8
+ store ptr %vec, ptr %vec.addr, align 8
+ call void @llvm.lifetime.start.p0(i64 8, ptr %count)
+ %0 = load ptr, ptr %vec.addr, align 8
+ %call = call noundef i64 @alloc(ptr noundef nonnull align 8 dereferenceable(24) %0)
+ store i64 %call, ptr %count, align 8
+ call void @llvm.lifetime.start.p0(i64 8, ptr %i)
+ store i64 0, ptr %i, align 8
+ br label %for.cond
+
+for.cond:
+ %1 = load i64, ptr %i, align 8
+ %2 = load i64, ptr %count, align 8
+ %cmp = icmp ne i64 %1, %2
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup:
+ call void @llvm.lifetime.end.p0(i64 8, ptr %i)
+ br label %for.end
+
+for.body:
+ %3 = load ptr, ptr %vec.addr, align 8
+ %4 = load i64, ptr %i, align 8
+ %call1 = call noundef nonnull align 8 dereferenceable(8) ptr @operator_acc(ptr noundef nonnull align 8 dereferenceable(24) %3, i64 noundef %4)
+ %5 = load double, ptr %call1, align 8
+ %add = fadd double %5, 1.000000e+00
+ store double %add, ptr %call1, align 8
+ br label %for.inc
+
+for.inc:
+ %6 = load i64, ptr %i, align 8
+ %inc = add i64 %6, 1
+ store i64 %inc, ptr %i, align 8
+ br label %for.cond
+
+for.end:
+ call void @llvm.lifetime.end.p0(i64 8, ptr %count) #5
+ ret void
+}
+
+define internal noundef i64 @alloc(ptr noundef nonnull align 8 dereferenceable(24) %__cont) {
+entry:
+ %__cont.addr = alloca ptr, align 8
+ store ptr %__cont, ptr %__cont.addr, align 8
+ %0 = load ptr, ptr %__cont.addr, align 8
+ %call = call noundef i64 @size(ptr noundef nonnull align 8 dereferenceable(24) %0) #5
+ ret i64 %call
+}
+
+define internal noundef nonnull align 8 dereferenceable(8) ptr @operator_acc(ptr noundef nonnull align 8 dereferenceable(24) %this, i64 noundef %__n) {
+entry:
+ %this.addr = alloca ptr, align 8
+ %__n.addr = alloca i64, align 8
+ store ptr %this, ptr %this.addr, align 8
+ store i64 %__n, ptr %__n.addr, align 8
+ %this1 = load ptr, ptr %this.addr, align 8
+ br label %do.body
+
+do.body:
+ %0 = load i64, ptr %__n.addr, align 8
+ %call = call noundef i64 @size(ptr noundef nonnull align 8 dereferenceable(24) %this1)
+ %cmp = icmp ult i64 %0, %call
+ %lnot = xor i1 %cmp, true
+ %conv = zext i1 %lnot to i64
+ %tobool = icmp ne i64 %conv, 0
+ br i1 %tobool, label %if.then, label %if.end
+
+if.then:
+ call void @abort() readnone
+ unreachable
+
+if.end:
+ br label %do.cond
+
+do.cond:
+ br label %do.end
+
+do.end:
+ %_M_impl = getelementptr inbounds %Vector_base, ptr %this1, i32 0, i32 0
+ %_M_start = getelementptr inbounds %Vector_impl_data, ptr %_M_impl, i32 0, i32 0
+ %1 = load ptr, ptr %_M_start, align 8
+ %2 = load i64, ptr %__n.addr, align 8
+ %add.ptr = getelementptr inbounds double, ptr %1, i64 %2
+ ret ptr %add.ptr
+}
+
+define internal noundef i64 @size(ptr noundef nonnull align 8 dereferenceable(24) %this) {
+entry:
+ %this.addr = alloca ptr, align 8
+ store ptr %this, ptr %this.addr, align 8
+ %this1 = load ptr, ptr %this.addr, align 8
+ %_M_impl = getelementptr inbounds %Vector_base, ptr %this1, i32 0, i32 0
+ %_M_finish = getelementptr inbounds %Vector_impl_data, ptr %_M_impl, i32 0, i32 1
+ %0 = load ptr, ptr %_M_finish, align 8
+ %_M_impl2 = getelementptr inbounds %Vector_base, ptr %this1, i32 0, i32 0
+ %_M_start = getelementptr inbounds %Vector_impl_data, ptr %_M_impl2, i32 0, i32 0
+ %1 = load ptr, ptr %_M_start, align 8
+ %sub.ptr.lhs.cast = ptrtoint ptr %0 to i64
+ %sub.ptr.rhs.cast = ptrtoint ptr %1 to i64
+ %sub.ptr.sub = sub i64 %sub.ptr.lhs.cast, %sub.ptr.rhs.cast
+ %sub.ptr.div = sdiv exact i64 %sub.ptr.sub, 8
+ ret i64 %sub.ptr.div
+}
+
+declare void @abort()
+
+; -------------------------------------------------------------------------
+; Test case for runtime check removal when accessing elements in a nested loop
+; (PR64881)
+
+
+define void @monkey(ptr noundef %arr, i32 noundef %len) {
+; CHECK-LABEL: define void @monkey
+; CHECK-SAME: (ptr nocapture noundef [[ARR:%.*]], i32 noundef [[LEN:%.*]]) local_unnamed_addr {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP8:%.*]] = icmp ugt i32 [[LEN]], 1
+; CHECK-NEXT: br i1 [[CMP8]], label [[FOR_BODY4_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
+; CHECK: for.body4.preheader:
+; CHECK-NEXT: [[I_09:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_COND_CLEANUP3:%.*]] ], [ 1, [[ENTRY:%.*]] ]
+; CHECK-NEXT: br label [[FOR_BODY4:%.*]]
+; CHECK: for.cond.cleanup:
+; CHECK-NEXT: ret void
+; CHECK: for.cond.cleanup3:
+; CHECK-NEXT: [[INC]] = add nuw i32 [[I_09]], 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC]], [[LEN]]
+; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY4_PREHEADER]], label [[FOR_COND_CLEANUP]]
+; CHECK: for.body4:
+; CHECK-NEXT: [[K_07:%.*]] = phi i32 [ [[DEC:%.*]], [[AT_EXIT:%.*]] ], [ [[I_09]], [[FOR_BODY4_PREHEADER]] ]
+; CHECK-NEXT: [[CMP_NOT_I:%.*]] = icmp ult i32 [[K_07]], [[LEN]]
+; CHECK-NEXT: br i1 [[CMP_NOT_I]], label [[AT_EXIT]], label [[IF_THEN_I:%.*]]
+; CHECK: if.then.i:
+; CHECK-NEXT: tail call void @abort()
+; CHECK-NEXT: unreachable
+; CHECK: at.exit:
+; CHECK-NEXT: [[IDX_EXT_I:%.*]] = zext i32 [[K_07]] to i64
+; CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IDX_EXT_I]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ADD_PTR_I]], align 4
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
+; CHECK-NEXT: store i32 [[ADD]], ptr [[ADD_PTR_I]], align 4
+; CHECK-NEXT: [[DEC]] = add i32 [[K_07]], -1
+; CHECK-NEXT: [[CMP2_NOT:%.*]] = icmp eq i32 [[DEC]], 0
+; CHECK-NEXT: br i1 [[CMP2_NOT]], label [[FOR_COND_CLEANUP3]], label [[FOR_BODY4]]
+;
+entry:
+ %arr.addr = alloca ptr, align 8
+ %len.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ %cleanup.dest.slot = alloca i32, align 4
+ %k = alloca i32, align 4
+ store ptr %arr, ptr %arr.addr, align 8
+ store i32 %len, ptr %len.addr, align 4
+ call void @llvm.lifetime.start.p0(i64 4, ptr %i) #3
+ store i32 1, ptr %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc5, %entry
+ %0 = load i32, ptr %i, align 4
+ %1 = load i32, ptr %len.addr, align 4
+ %cmp = icmp ult i32 %0, %1
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.cond
+ store i32 2, ptr %cleanup.dest.slot, align 4
+ call void @llvm.lifetime.end.p0(i64 4, ptr %i) #3
+ br label %for.end6
+
+for.body: ; preds = %for.cond
+ call void @llvm.lifetime.start.p0(i64 4, ptr %k) #3
+ %2 = load i32, ptr %i, align 4
+ store i32 %2, ptr %k, align 4
+ br label %for.cond1
+
+for.cond1: ; preds = %for.inc, %for.body
+ %3 = load i32, ptr %k, align 4
+ %cmp2 = icmp ugt i32 %3, 0
+ br i1 %cmp2, label %for.body4, label %for.cond.cleanup3
+
+for.cond.cleanup3: ; preds = %for.cond1
+ store i32 5, ptr %cleanup.dest.slot, align 4
+ call void @llvm.lifetime.end.p0(i64 4, ptr %k) #3
+ br label %for.end
+
+for.body4: ; preds = %for.cond1
+ %4 = load ptr, ptr %arr.addr, align 8
+ %5 = load i32, ptr %len.addr, align 4
+ %6 = load i32, ptr %k, align 4
+ %call = call noundef ptr @at(ptr noundef %4, i32 noundef %5, i32 noundef %6)
+ %7 = load i32, ptr %call, align 4
+ %add = add nsw i32 %7, 1
+ store i32 %add, ptr %call, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body4
+ %8 = load i32, ptr %k, align 4
+ %dec = add i32 %8, -1
+ store i32 %dec, ptr %k, align 4
+ br label %for.cond1
+
+for.end: ; preds = %for.cond.cleanup3
+ br label %for.inc5
+
+for.inc5: ; preds = %for.end
+ %9 = load i32, ptr %i, align 4
+ %inc = add i32 %9, 1
+ store i32 %inc, ptr %i, align 4
+ br label %for.cond
+
+for.end6: ; preds = %for.cond.cleanup
+ ret void
+}
+
+define internal noundef ptr @at(ptr noundef %arr, i32 noundef %len, i32 noundef %idx) {
+entry:
+ %arr.addr = alloca ptr, align 8
+ %len.addr = alloca i32, align 4
+ %idx.addr = alloca i32, align 4
+ store ptr %arr, ptr %arr.addr, align 8
+ store i32 %len, ptr %len.addr, align 4
+ store i32 %idx, ptr %idx.addr, align 4
+ %0 = load i32, ptr %idx.addr, align 4
+ %1 = load i32, ptr %len.addr, align 4
+ %cmp = icmp uge i32 %0, %1
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ call void @abort()
+ unreachable
+
+if.end: ; preds = %entry
+ %2 = load ptr, ptr %arr.addr, align 8
+ %3 = load i32, ptr %idx.addr, align 4
+ %idx.ext = zext i32 %3 to i64
+ %add.ptr = getelementptr inbounds i32, ptr %2, i64 %idx.ext
+ ret ptr %add.ptr
+}
More information about the llvm-commits
mailing list