[llvm] eb5aeee - [test] Update BoundsChecking/simple.ll

Arthur Eubanks via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 2 10:54:04 PDT 2022


Author: Arthur Eubanks
Date: 2022-08-02T10:49:38-07:00
New Revision: eb5aeee02f70369c1e9b15cfafb3d100494dbb28

URL: https://github.com/llvm/llvm-project/commit/eb5aeee02f70369c1e9b15cfafb3d100494dbb28
DIFF: https://github.com/llvm/llvm-project/commit/eb5aeee02f70369c1e9b15cfafb3d100494dbb28.diff

LOG: [test] Update BoundsChecking/simple.ll

Use opaque pointers and update_test_checks.py

Precommit a test

Added: 
    

Modified: 
    llvm/test/Instrumentation/BoundsChecking/simple.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Instrumentation/BoundsChecking/simple.ll b/llvm/test/Instrumentation/BoundsChecking/simple.ll
index 1934d17bd3097..a2af719d02516 100644
--- a/llvm/test/Instrumentation/BoundsChecking/simple.ll
+++ b/llvm/test/Instrumentation/BoundsChecking/simple.ll
@@ -1,188 +1,415 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -passes=bounds-checking -S | FileCheck %s
 target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 
- at .str = private constant [8 x i8] c"abcdefg\00"   ; <[8 x i8]*>
+ at .str = private constant [8 x i8] c"abcdefg\00"
 
- at .str_as1 = private addrspace(1) constant [8 x i8] c"abcdefg\00"   ; <[8 x i8] addrspace(1)*>
+ at .str_as1 = private addrspace(1) constant [8 x i8] c"abcdefg\00"
 
 
-declare noalias i8* @malloc(i64) nounwind allocsize(0)
-declare noalias i8* @calloc(i64, i64) nounwind allocsize(0,1)
-declare noalias i8* @realloc(i8* nocapture allocptr, i64) nounwind allocsize(1)
+declare noalias ptr @malloc(i64) nounwind allocsize(0)
+declare noalias ptr @calloc(i64, i64) nounwind allocsize(0,1)
+declare noalias ptr @realloc(ptr nocapture allocptr, i64) nounwind allocsize(1)
 
-; CHECK: @f1
 define void @f1() nounwind {
-  %1 = tail call i8* @malloc(i64 32)
-  %2 = bitcast i8* %1 to i32*
-  %idx = getelementptr inbounds i32, i32* %2, i64 2
-; CHECK-NOT: trap
-  store i32 3, i32* %idx, align 4
+; CHECK-LABEL: @f1(
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call ptr @malloc(i64 32)
+; CHECK-NEXT:    [[IDX:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 2
+; CHECK-NEXT:    store i32 3, ptr [[IDX]], align 4
+; CHECK-NEXT:    ret void
+;
+  %1 = tail call ptr @malloc(i64 32)
+  %idx = getelementptr inbounds i32, ptr %1, i64 2
+  store i32 3, ptr %idx, align 4
   ret void
 }
 
-; CHECK: @f2
 define void @f2() nounwind {
-  %1 = tail call i8* @malloc(i64 32)
-  %2 = bitcast i8* %1 to i32*
-  %idx = getelementptr inbounds i32, i32* %2, i64 8
-; CHECK: trap
-  store i32 3, i32* %idx, align 4
+; CHECK-LABEL: @f2(
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call ptr @malloc(i64 32)
+; CHECK-NEXT:    [[IDX:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 8
+; CHECK-NEXT:    br label [[TRAP:%.*]]
+; CHECK:       2:
+; CHECK-NEXT:    store i32 3, ptr [[IDX]], align 4
+; CHECK-NEXT:    ret void
+; CHECK:       trap:
+; CHECK-NEXT:    call void @llvm.trap() #[[ATTR5:[0-9]+]]
+; CHECK-NEXT:    unreachable
+;
+  %1 = tail call ptr @malloc(i64 32)
+  %idx = getelementptr inbounds i32, ptr %1, i64 8
+  store i32 3, ptr %idx, align 4
   ret void
 }
 
-; CHECK: @f3
 define void @f3(i64 %x) nounwind {
-  %1 = tail call i8* @calloc(i64 4, i64 %x)
-  %2 = bitcast i8* %1 to i32*
-  %idx = getelementptr inbounds i32, i32* %2, i64 8
-; CHECK: mul i64 4, %
-; CHECK: sub i64 {{.*}}, 32
-; CHECK-NEXT: icmp ult i64 {{.*}}, 32
-; CHECK-NEXT: icmp ult i64 {{.*}}, 4
-; CHECK-NEXT: or i1
-; CHECK: trap
-  store i32 3, i32* %idx, align 4
+; CHECK-LABEL: @f3(
+; CHECK-NEXT:    [[TMP1:%.*]] = mul i64 4, [[X:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call ptr @calloc(i64 4, i64 [[X]])
+; CHECK-NEXT:    [[IDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 8
+; CHECK-NEXT:    [[TMP3:%.*]] = sub i64 [[TMP1]], 32
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ult i64 [[TMP1]], 32
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ult i64 [[TMP3]], 4
+; CHECK-NEXT:    [[TMP6:%.*]] = or i1 [[TMP4]], [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = or i1 false, [[TMP6]]
+; CHECK-NEXT:    br i1 [[TMP7]], label [[TRAP:%.*]], label [[TMP8:%.*]]
+; CHECK:       8:
+; CHECK-NEXT:    store i32 3, ptr [[IDX]], align 4
+; CHECK-NEXT:    ret void
+; CHECK:       trap:
+; CHECK-NEXT:    call void @llvm.trap() #[[ATTR5]]
+; CHECK-NEXT:    unreachable
+;
+  %1 = tail call ptr @calloc(i64 4, i64 %x)
+  %idx = getelementptr inbounds i32, ptr %1, i64 8
+  store i32 3, ptr %idx, align 4
   ret void
 }
 
-; CHECK: @store_volatile
 define void @store_volatile(i64 %x) nounwind {
-  %1 = tail call i8* @calloc(i64 4, i64 %x)
-  %2 = bitcast i8* %1 to i32*
-  %idx = getelementptr inbounds i32, i32* %2, i64 8
-; CHECK-NOT: trap
-  store volatile i32 3, i32* %idx, align 4
+; CHECK-LABEL: @store_volatile(
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call ptr @calloc(i64 4, i64 [[X:%.*]])
+; CHECK-NEXT:    [[IDX:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 8
+; CHECK-NEXT:    store volatile i32 3, ptr [[IDX]], align 4
+; CHECK-NEXT:    ret void
+;
+  %1 = tail call ptr @calloc(i64 4, i64 %x)
+  %idx = getelementptr inbounds i32, ptr %1, i64 8
+  store volatile i32 3, ptr %idx, align 4
   ret void
 }
 
-; CHECK: @f4
 define void @f4(i64 %x) nounwind {
-  %1 = tail call i8* @realloc(i8* null, i64 %x) nounwind
-  %2 = bitcast i8* %1 to i32*
-  %idx = getelementptr inbounds i32, i32* %2, i64 8
-; CHECK: trap
-  %3 = load i32, i32* %idx, align 4
+; CHECK-LABEL: @f4(
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call ptr @realloc(ptr null, i64 [[X:%.*]]) #[[ATTR3:[0-9]+]]
+; CHECK-NEXT:    [[IDX:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 8
+; CHECK-NEXT:    [[TMP2:%.*]] = sub i64 [[X]], 32
+; CHECK-NEXT:    [[TMP3:%.*]] = icmp ult i64 [[X]], 32
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ult i64 [[TMP2]], 4
+; CHECK-NEXT:    [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = or i1 false, [[TMP5]]
+; CHECK-NEXT:    br i1 [[TMP6]], label [[TRAP:%.*]], label [[TMP7:%.*]]
+; CHECK:       7:
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr [[IDX]], align 4
+; CHECK-NEXT:    ret void
+; CHECK:       trap:
+; CHECK-NEXT:    call void @llvm.trap() #[[ATTR5]]
+; CHECK-NEXT:    unreachable
+;
+  %1 = tail call ptr @realloc(ptr null, i64 %x) nounwind
+  %idx = getelementptr inbounds i32, ptr %1, i64 8
+  %2 = load i32, ptr %idx, align 4
   ret void
 }
 
-; CHECK: @f5
 define void @f5(i64 %x) nounwind {
-  %idx = getelementptr inbounds [8 x i8], [8 x i8]* @.str, i64 0, i64 %x
-; CHECK: trap
-  %1 = load i8, i8* %idx, align 4
+; CHECK-LABEL: @f5(
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 0, [[X:%.*]]
+; CHECK-NEXT:    [[IDX:%.*]] = getelementptr inbounds [8 x i8], ptr @.str, i64 0, i64 [[X]]
+; CHECK-NEXT:    [[TMP2:%.*]] = sub i64 8, [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = icmp ult i64 8, [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ult i64 [[TMP2]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
+; CHECK-NEXT:    br i1 [[TMP5]], label [[TRAP:%.*]], label [[TMP6:%.*]]
+; CHECK:       6:
+; CHECK-NEXT:    [[TMP7:%.*]] = load i8, ptr [[IDX]], align 4
+; CHECK-NEXT:    ret void
+; CHECK:       trap:
+; CHECK-NEXT:    call void @llvm.trap() #[[ATTR5]]
+; CHECK-NEXT:    unreachable
+;
+  %idx = getelementptr inbounds [8 x i8], ptr @.str, i64 0, i64 %x
+  %1 = load i8, ptr %idx, align 4
   ret void
 }
 
 define void @f5_as1(i64 %x) nounwind {
-; CHECK: @f5_as1
-  %idx = getelementptr inbounds [8 x i8], [8 x i8] addrspace(1)* @.str_as1, i64 0, i64 %x
-  ; CHECK: sub i16
-  ; CHECK: icmp ult i16
-; CHECK: trap
-  %1 = load i8, i8 addrspace(1)* %idx, align 4
+; CHECK-LABEL: @f5_as1(
+; CHECK-NEXT:    [[X_C:%.*]] = trunc i64 [[X:%.*]] to i16
+; CHECK-NEXT:    [[TMP1:%.*]] = add i16 0, [[X_C]]
+; CHECK-NEXT:    [[IDX:%.*]] = getelementptr inbounds [8 x i8], ptr addrspace(1) @.str_as1, i64 0, i64 [[X]]
+; CHECK-NEXT:    [[TMP2:%.*]] = sub i16 8, [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = icmp ult i16 8, [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ult i16 [[TMP2]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
+; CHECK-NEXT:    br i1 [[TMP5]], label [[TRAP:%.*]], label [[TMP6:%.*]]
+; CHECK:       6:
+; CHECK-NEXT:    [[TMP7:%.*]] = load i8, ptr addrspace(1) [[IDX]], align 4
+; CHECK-NEXT:    ret void
+; CHECK:       trap:
+; CHECK-NEXT:    call void @llvm.trap() #[[ATTR5]]
+; CHECK-NEXT:    unreachable
+;
+  %idx = getelementptr inbounds [8 x i8], ptr addrspace(1) @.str_as1, i64 0, i64 %x
+  %1 = load i8, ptr addrspace(1) %idx, align 4
   ret void
 }
 
-; CHECK: @f6
 define void @f6(i64 %x) nounwind {
+; CHECK-LABEL: @f6(
+; CHECK-NEXT:    [[TMP1:%.*]] = alloca i128, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i128, ptr [[TMP1]], align 4
+; CHECK-NEXT:    ret void
+;
   %1 = alloca i128
-; CHECK-NOT: trap
-  %2 = load i128, i128* %1, align 4
+  %2 = load i128, ptr %1, align 4
   ret void
 }
 
-; CHECK: @f7
 define void @f7(i64 %x) nounwind {
+; CHECK-LABEL: @f7(
+; CHECK-NEXT:    [[TMP1:%.*]] = mul i64 16, [[X:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i128, i64 [[X]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = sub i64 [[TMP1]], 0
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ult i64 [[TMP3]], 16
+; CHECK-NEXT:    [[TMP5:%.*]] = or i1 false, [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = or i1 false, [[TMP5]]
+; CHECK-NEXT:    br i1 [[TMP6]], label [[TRAP:%.*]], label [[TMP7:%.*]]
+; CHECK:       7:
+; CHECK-NEXT:    [[TMP8:%.*]] = load i128, ptr [[TMP2]], align 4
+; CHECK-NEXT:    ret void
+; CHECK:       trap:
+; CHECK-NEXT:    call void @llvm.trap() #[[ATTR5]]
+; CHECK-NEXT:    unreachable
+;
   %1 = alloca i128, i64 %x
-; CHECK: mul i64 16,
-; CHECK: trap
-  %2 = load i128, i128* %1, align 4
+  %2 = load i128, ptr %1, align 4
   ret void
 }
 
-; CHECK: @f8
 define void @f8() nounwind {
+; CHECK-LABEL: @f8(
+; CHECK-NEXT:    [[TMP1:%.*]] = alloca i128, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i128, align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = select i1 undef, ptr [[TMP1]], ptr [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i128, ptr [[TMP3]], align 4
+; CHECK-NEXT:    ret void
+;
   %1 = alloca i128
   %2 = alloca i128
-  %3 = select i1 undef, i128* %1, i128* %2
-; CHECK-NOT: trap
-  %4 = load i128, i128* %3, align 4
+  %3 = select i1 undef, ptr %1, ptr %2
+  %4 = load i128, ptr %3, align 4
   ret void
 }
 
-; CHECK: @f9
-define void @f9(i128* %arg) nounwind {
+define void @f9(ptr %arg) nounwind {
+; CHECK-LABEL: @f9(
+; CHECK-NEXT:    [[TMP1:%.*]] = alloca i128, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = select i1 undef, ptr [[ARG:%.*]], ptr [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = load i128, ptr [[TMP2]], align 4
+; CHECK-NEXT:    ret void
+;
   %1 = alloca i128
-  %2 = select i1 undef, i128* %arg, i128* %1
-; CHECK-NOT: trap
-  %3 = load i128, i128* %2, align 4
+  %2 = select i1 undef, ptr %arg, ptr %1
+  %3 = load i128, ptr %2, align 4
   ret void
 }
 
-; CHECK: @f10
 define void @f10(i64 %x, i64 %y) nounwind {
+; CHECK-LABEL: @f10(
+; CHECK-NEXT:    [[TMP1:%.*]] = mul i64 16, [[X:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i128, i64 [[X]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = mul i64 16, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = alloca i128, i64 [[Y]], align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = select i1 undef, i64 [[TMP1]], i64 [[TMP3]]
+; CHECK-NEXT:    [[TMP6:%.*]] = select i1 undef, ptr [[TMP2]], ptr [[TMP4]]
+; CHECK-NEXT:    [[TMP7:%.*]] = sub i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP8:%.*]] = icmp ult i64 [[TMP7]], 16
+; CHECK-NEXT:    [[TMP9:%.*]] = or i1 false, [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = or i1 false, [[TMP9]]
+; CHECK-NEXT:    br i1 [[TMP10]], label [[TRAP:%.*]], label [[TMP11:%.*]]
+; CHECK:       11:
+; CHECK-NEXT:    [[TMP12:%.*]] = load i128, ptr [[TMP6]], align 4
+; CHECK-NEXT:    ret void
+; CHECK:       trap:
+; CHECK-NEXT:    call void @llvm.trap() #[[ATTR5]]
+; CHECK-NEXT:    unreachable
+;
   %1 = alloca i128, i64 %x
   %2 = alloca i128, i64 %y
-  %3 = select i1 undef, i128* %1, i128* %2
-; CHECK: select
-; CHECK: select
-; CHECK: trap
-  %4 = load i128, i128* %3, align 4
+  %3 = select i1 undef, ptr %1, ptr %2
+  %4 = load i128, ptr %3, align 4
   ret void
 }
 
-; CHECK: @f11
-define void @f11(i128* byval(i128) %x) nounwind {
-  %1 = bitcast i128* %x to i8*
-  %2 = getelementptr inbounds i8, i8* %1, i64 16
-; CHECK: br label
-  %3 = load i8, i8* %2, align 4
+define void @f11(ptr byval(i128) %x) nounwind {
+; CHECK-LABEL: @f11(
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[X:%.*]], i64 16
+; CHECK-NEXT:    br label [[TRAP:%.*]]
+; CHECK:       2:
+; CHECK-NEXT:    [[TMP3:%.*]] = load i8, ptr [[TMP1]], align 4
+; CHECK-NEXT:    ret void
+; CHECK:       trap:
+; CHECK-NEXT:    call void @llvm.trap() #[[ATTR5]]
+; CHECK-NEXT:    unreachable
+;
+  %1 = getelementptr inbounds i8, ptr %x, i64 16
+  %2 = load i8, ptr %1, align 4
   ret void
 }
 
-; CHECK: @f11_as1
-define void @f11_as1(i128 addrspace(1)* byval(i128) %x) nounwind {
-  %1 = bitcast i128 addrspace(1)* %x to i8 addrspace(1)*
-  %2 = getelementptr inbounds i8, i8 addrspace(1)* %1, i16 16
-; CHECK: br label
-  %3 = load i8, i8 addrspace(1)* %2, align 4
+define void @f11_as1(ptr addrspace(1) byval(i128) %x) nounwind {
+; CHECK-LABEL: @f11_as1(
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[X:%.*]], i16 16
+; CHECK-NEXT:    br label [[TRAP:%.*]]
+; CHECK:       2:
+; CHECK-NEXT:    [[TMP3:%.*]] = load i8, ptr addrspace(1) [[TMP1]], align 4
+; CHECK-NEXT:    ret void
+; CHECK:       trap:
+; CHECK-NEXT:    call void @llvm.trap() #[[ATTR5]]
+; CHECK-NEXT:    unreachable
+;
+  %1 = getelementptr inbounds i8, ptr addrspace(1) %x, i16 16
+  %2 = load i8, ptr addrspace(1) %1, align 4
   ret void
 }
 
-; CHECK: @f12
 define i64 @f12(i64 %x, i64 %y) nounwind {
-  %1 = tail call i8* @calloc(i64 1, i64 %x)
-; CHECK: mul i64 %y, 8
-; CHECK: trap
-  %2 = bitcast i8* %1 to i64*
-  %3 = getelementptr inbounds i64, i64* %2, i64 %y
-  %4 = load i64, i64* %3, align 8
-  ret i64 %4
+; CHECK-LABEL: @f12(
+; CHECK-NEXT:    [[TMP1:%.*]] = mul i64 1, [[X:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call ptr @calloc(i64 1, i64 [[X]])
+; CHECK-NEXT:    [[DOTIDX:%.*]] = mul i64 [[Y:%.*]], 8
+; CHECK-NEXT:    [[TMP3:%.*]] = add i64 0, [[DOTIDX]]
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i64 [[Y]]
+; CHECK-NEXT:    [[TMP5:%.*]] = sub i64 [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[TMP6:%.*]] = icmp ult i64 [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[TMP7:%.*]] = icmp ult i64 [[TMP5]], 8
+; CHECK-NEXT:    [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = icmp slt i64 [[TMP3]], 0
+; CHECK-NEXT:    [[TMP10:%.*]] = or i1 [[TMP9]], [[TMP8]]
+; CHECK-NEXT:    br i1 [[TMP10]], label [[TRAP:%.*]], label [[TMP11:%.*]]
+; CHECK:       11:
+; CHECK-NEXT:    [[TMP12:%.*]] = load i64, ptr [[TMP4]], align 8
+; CHECK-NEXT:    ret i64 [[TMP12]]
+; CHECK:       trap:
+; CHECK-NEXT:    call void @llvm.trap() #[[ATTR5]]
+; CHECK-NEXT:    unreachable
+;
+  %1 = tail call ptr @calloc(i64 1, i64 %x)
+  %2 = getelementptr inbounds i64, ptr %1, i64 %y
+  %3 = load i64, ptr %2, align 8
+  ret i64 %3
 }
 
-; CHECK: @load_volatile
 define i64 @load_volatile(i64 %x, i64 %y) nounwind {
-  %1 = tail call i8* @calloc(i64 1, i64 %x)
-; CHECK-NOT: trap
-  %2 = bitcast i8* %1 to i64*
-  %3 = getelementptr inbounds i64, i64* %2, i64 %y
-  %4 = load volatile i64, i64* %3, align 8
-  ret i64 %4
+; CHECK-LABEL: @load_volatile(
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call ptr @calloc(i64 1, i64 [[X:%.*]])
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i64 [[Y:%.*]]
+; CHECK-NEXT:    [[TMP3:%.*]] = load volatile i64, ptr [[TMP2]], align 8
+; CHECK-NEXT:    ret i64 [[TMP3]]
+;
+  %1 = tail call ptr @calloc(i64 1, i64 %x)
+  %2 = getelementptr inbounds i64, ptr %1, i64 %y
+  %3 = load volatile i64, ptr %2, align 8
+  ret i64 %3
 }
 
 ; PR17402
-; CHECK-LABEL: @f13
 define void @f13() nounwind {
+; CHECK-LABEL: @f13(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br label [[ALIVE:%.*]]
+; CHECK:       dead:
+; CHECK-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, ptr [[INCDEC_PTR]], i64 1
+; CHECK-NEXT:    [[L:%.*]] = load i32, ptr [[INCDEC_PTR]], align 4
+; CHECK-NEXT:    br label [[ALIVE]]
+; CHECK:       alive:
+; CHECK-NEXT:    ret void
+;
 entry:
   br label %alive
 
 dead:
   ; Self-refential GEPs can occur in dead code.
-  %incdec.ptr = getelementptr inbounds i32, i32* %incdec.ptr, i64 1
-  ; CHECK: %incdec.ptr = getelementptr inbounds i32, i32* %incdec.ptr
-  %l = load i32, i32* %incdec.ptr
+  %incdec.ptr = getelementptr inbounds i32, ptr %incdec.ptr, i64 1
+  %l = load i32, ptr %incdec.ptr
   br label %alive
 
 alive:
   ret void
 }
+
+; Check that merging sizes in a phi works.
+; FIXME: bounds-checking thinks that %alloc has an underlying size of 0.
+define i8 @f14(i1 %i) {
+; CHECK-LABEL: @f14(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br i1 [[I:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
+; CHECK:       bb1:
+; CHECK-NEXT:    [[A:%.*]] = alloca [32 x i8], align 1
+; CHECK-NEXT:    [[G:%.*]] = getelementptr i8, ptr [[A]], i32 32
+; CHECK-NEXT:    br label [[BB2]]
+; CHECK:       bb2:
+; CHECK-NEXT:    [[ALLOC:%.*]] = phi ptr [ null, [[ENTRY:%.*]] ], [ [[G]], [[BB1]] ]
+; CHECK-NEXT:    [[IND:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ -4, [[BB1]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = add i64 0, [[IND]]
+; CHECK-NEXT:    [[P:%.*]] = getelementptr i8, ptr [[ALLOC]], i64 [[IND]]
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i64 0, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i64 0, [[TMP0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = icmp ult i64 [[TMP1]], 1
+; CHECK-NEXT:    [[TMP4:%.*]] = or i1 [[TMP2]], [[TMP3]]
+; CHECK-NEXT:    br i1 [[TMP4]], label [[TRAP:%.*]], label [[TMP5:%.*]]
+; CHECK:       5:
+; CHECK-NEXT:    [[RET:%.*]] = load i8, ptr [[P]], align 1
+; CHECK-NEXT:    ret i8 [[RET]]
+; CHECK:       trap:
+; CHECK-NEXT:    call void @llvm.trap() #[[ATTR5]]
+; CHECK-NEXT:    unreachable
+;
+entry:
+  br i1 %i, label %bb1, label %bb2
+
+bb1:
+  %a = alloca [32 x i8]
+  %g = getelementptr i8, ptr %a, i32 32
+  br label %bb2
+
+bb2:
+  %alloc = phi ptr [ null, %entry ], [ %g, %bb1 ]
+  %ind = phi i64 [ 0, %entry ], [ -4, %bb1 ]
+  %p = getelementptr i8, ptr %alloc, i64 %ind
+  %ret = load i8, ptr %p
+  ret i8 %ret
+}
+
+; Check that merging offsets in a phi works.
+define i8 @f15(i1 %i) {
+; CHECK-LABEL: @f15(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[A1:%.*]] = alloca [32 x i8], align 1
+; CHECK-NEXT:    [[G1:%.*]] = getelementptr i8, ptr [[A1]], i32 100
+; CHECK-NEXT:    br i1 [[I:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
+; CHECK:       bb1:
+; CHECK-NEXT:    [[A2:%.*]] = alloca [32 x i8], align 1
+; CHECK-NEXT:    [[G2:%.*]] = getelementptr i8, ptr [[A2]], i32 16
+; CHECK-NEXT:    br label [[BB2]]
+; CHECK:       bb2:
+; CHECK-NEXT:    [[TMP0:%.*]] = phi i64 [ 100, [[ENTRY:%.*]] ], [ 16, [[BB1]] ]
+; CHECK-NEXT:    [[ALLOC:%.*]] = phi ptr [ [[G1]], [[ENTRY]] ], [ [[G2]], [[BB1]] ]
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i64 32, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i64 32, [[TMP0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = icmp ult i64 [[TMP1]], 1
+; CHECK-NEXT:    [[TMP4:%.*]] = or i1 [[TMP2]], [[TMP3]]
+; CHECK-NEXT:    br i1 [[TMP4]], label [[TRAP:%.*]], label [[TMP5:%.*]]
+; CHECK:       5:
+; CHECK-NEXT:    [[RET:%.*]] = load i8, ptr [[ALLOC]], align 1
+; CHECK-NEXT:    ret i8 [[RET]]
+; CHECK:       trap:
+; CHECK-NEXT:    call void @llvm.trap() #[[ATTR5]]
+; CHECK-NEXT:    unreachable
+;
+entry:
+  %a1 = alloca [32 x i8]
+  %g1 = getelementptr i8, ptr %a1, i32 100
+  br i1 %i, label %bb1, label %bb2
+
+bb1:
+  %a2 = alloca [32 x i8]
+  %g2 = getelementptr i8, ptr %a2, i32 16
+  br label %bb2
+
+bb2:
+  %alloc = phi ptr [ %g1, %entry ], [ %g2, %bb1 ]
+  %ret = load i8, ptr %alloc
+  ret i8 %ret
+}


        


More information about the llvm-commits mailing list