[llvm-branch-commits] [llvm] 4bb006d - [LV] Convert runtime diff check test to use opaque pointers.
Tobias Hieta via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Mon Aug 29 23:09:54 PDT 2022
Author: Florian Hahn
Date: 2022-08-30T08:08:28+02:00
New Revision: 4bb006d0d3b0a5010bcfa9bd63c10450767c2e37
URL: https://github.com/llvm/llvm-project/commit/4bb006d0d3b0a5010bcfa9bd63c10450767c2e37
DIFF: https://github.com/llvm/llvm-project/commit/4bb006d0d3b0a5010bcfa9bd63c10450767c2e37.diff
LOG: [LV] Convert runtime diff check test to use opaque pointers.
Modernize the test to make it easier to extend with up-to-date IR.
(cherry picked from commit 3b135ef446d56e8542d55a1b716e9413f50da7aa)
Added:
Modified:
llvm/test/Transforms/LoopVectorize/runtime-checks-difference.ll
Removed:
################################################################################
diff --git a/llvm/test/Transforms/LoopVectorize/runtime-checks-
diff erence.ll b/llvm/test/Transforms/LoopVectorize/runtime-checks-
diff erence.ll
index 314422420459a..e214f04674c4c 100644
--- a/llvm/test/Transforms/LoopVectorize/runtime-checks-
diff erence.ll
+++ b/llvm/test/Transforms/LoopVectorize/runtime-checks-
diff erence.ll
@@ -3,11 +3,11 @@
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
-define void @same_step_and_size(i32* %a, i32* %b, i64 %n) {
+define void @same_step_and_size(ptr %a, i32* %b, i64 %n) {
; CHECK-LABEL: @same_step_and_size(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A2:%.*]] = ptrtoint i32* [[A:%.*]] to i64
-; CHECK-NEXT: [[B1:%.*]] = ptrtoint i32* [[B:%.*]] to i64
+; CHECK-NEXT: [[A2:%.*]] = ptrtoint ptr [[A:%.*]] to i64
+; CHECK-NEXT: [[B1:%.*]] = ptrtoint ptr [[B:%.*]] to i64
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %scalar.ph, label %vector.memcheck
; CHECK: vector.memcheck:
@@ -20,11 +20,11 @@ entry:
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
- %gep.a = getelementptr inbounds i32, i32* %a, i64 %iv
- %l = load i32, i32* %gep.a
+ %gep.a = getelementptr inbounds i32, ptr %a, i64 %iv
+ %l = load i32, ptr %gep.a
%mul = mul nsw i32 %l, 3
- %gep.b = getelementptr inbounds i32, i32* %b, i64 %iv
- store i32 %mul, i32* %gep.b
+ %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv
+ store i32 %mul, ptr %gep.b
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv.next, %n
br i1 %exitcond, label %exit, label %loop
@@ -33,11 +33,11 @@ exit:
ret void
}
-define void @same_step_and_size_no_dominance_between_accesses(i32* %a, i32* %b, i64 %n, i64 %x) {
+define void @same_step_and_size_no_dominance_between_accesses(ptr %a, ptr %b, i64 %n, i64 %x) {
; CHECK-LABEL: @same_step_and_size_no_dominance_between_accesses(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[B2:%.*]] = ptrtoint i32* [[B:%.*]] to i64
-; CHECK-NEXT: [[A1:%.*]] = ptrtoint i32* [[A:%.*]] to i64
+; CHECK-NEXT: [[B2:%.*]] = ptrtoint ptr [[B:%.*]] to i64
+; CHECK-NEXT: [[A1:%.*]] = ptrtoint ptr [[A:%.*]] to i64
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %scalar.ph, label %vector.memcheck
; CHECK: vector.memcheck:
@@ -54,13 +54,13 @@ loop:
br i1 %cmp, label %then, label %else
then:
- %gep.a = getelementptr inbounds i32, i32* %a, i64 %iv
- store i32 0, i32* %gep.a
+ %gep.a = getelementptr inbounds i32, ptr %a, i64 %iv
+ store i32 0, ptr %gep.a
br label %loop.latch
else:
- %gep.b = getelementptr inbounds i32, i32* %b, i64 %iv
- store i32 10, i32* %gep.b
+ %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv
+ store i32 10, ptr %gep.b
br label %loop.latch
loop.latch:
@@ -72,20 +72,18 @@ exit:
ret void
}
-define void @
diff erent_steps_and_
diff erent_access_sizes(i16* %a, i32* %b, i64 %n) {
+define void @
diff erent_steps_and_
diff erent_access_sizes(ptr %a, ptr %b, i64 %n) {
; CHECK-LABEL: @
diff erent_steps_and_
diff erent_access_sizes(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[B1:%.*]] = bitcast i32* [[B:%.*]] to i8*
-; CHECK-NEXT: [[A3:%.*]] = bitcast i16* [[A:%.*]] to i8*
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %scalar.ph, label %vector.memcheck
; CHECK: vector.memcheck:
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[B]], i64 [[N]]
-; CHECK-NEXT: [[SCEVGEP2:%.*]] = bitcast i32* [[SCEVGEP]] to i8*
-; CHECK-NEXT: [[SCEVGEP4:%.*]] = getelementptr i16, i16* [[A]], i64 [[N]]
-; CHECK-NEXT: [[SCEVGEP45:%.*]] = bitcast i16* [[SCEVGEP4]] to i8*
-; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult i8* [[B1]], [[SCEVGEP45]]
-; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult i8* [[A3]], [[SCEVGEP2]]
+; CHECK-NEXT: [[N_SHL_2:%.]] = shl i64 %n, 2
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr %b, i64 [[N_SHL_2]]
+; CHECK-NEXT: [[N_SHL_1:%.]] = shl i64 %n, 1
+; CHECK-NEXT: [[SCEVGEP4:%.*]] = getelementptr i8, ptr %a, i64 [[N_SHL_1]]
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr %b, [[SCEVGEP4]]
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr %a, [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %scalar.ph, label %vector.ph
;
@@ -94,12 +92,12 @@ entry:
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
- %gep.a = getelementptr inbounds i16, i16* %a, i64 %iv
- %l = load i16, i16* %gep.a
+ %gep.a = getelementptr inbounds i16, ptr %a, i64 %iv
+ %l = load i16, ptr %gep.a
%l.ext = sext i16 %l to i32
%mul = mul nsw i32 %l.ext, 3
- %gep.b = getelementptr inbounds i32, i32* %b, i64 %iv
- store i32 %mul, i32* %gep.b
+ %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv
+ store i32 %mul, ptr %gep.b
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv.next, %n
br i1 %exitcond, label %exit, label %loop
@@ -108,11 +106,11 @@ exit:
ret void
}
-define void @steps_match_but_
diff erent_access_sizes_1([2 x i16]* %a, i32* %b, i64 %n) {
+define void @steps_match_but_
diff erent_access_sizes_1(ptr %a, ptr %b, i64 %n) {
; CHECK-LABEL: @steps_match_but_
diff erent_access_sizes_1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A2:%.*]] = ptrtoint [2 x i16]* [[A:%.*]] to i64
-; CHECK-NEXT: [[B1:%.*]] = ptrtoint i32* [[B:%.*]] to i64
+; CHECK-NEXT: [[A2:%.*]] = ptrtoint ptr [[A:%.*]] to i64
+; CHECK-NEXT: [[B1:%.*]] = ptrtoint ptr [[B:%.*]] to i64
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %scalar.ph, label %vector.memcheck
; CHECK: vector.memcheck:
@@ -126,12 +124,12 @@ entry:
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
- %gep.a = getelementptr inbounds [2 x i16], [2 x i16]* %a, i64 %iv, i64 1
- %l = load i16, i16* %gep.a
+ %gep.a = getelementptr inbounds [2 x i16], ptr %a, i64 %iv, i64 1
+ %l = load i16, ptr %gep.a
%l.ext = sext i16 %l to i32
%mul = mul nsw i32 %l.ext, 3
- %gep.b = getelementptr inbounds i32, i32* %b, i64 %iv
- store i32 %mul, i32* %gep.b
+ %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv
+ store i32 %mul, ptr %gep.b
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv.next, %n
br i1 %exitcond, label %exit, label %loop
@@ -142,11 +140,11 @@ exit:
; Same as @steps_match_but_
diff erent_access_sizes_1, but with source and sink
; accesses flipped.
-define void @steps_match_but_
diff erent_access_sizes_2([2 x i16]* %a, i32* %b, i64 %n) {
+define void @steps_match_but_
diff erent_access_sizes_2(ptr %a, ptr %b, i64 %n) {
; CHECK-LABEL: @steps_match_but_
diff erent_access_sizes_2(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[B2:%.*]] = ptrtoint i32* [[B:%.*]] to i64
-; CHECK-NEXT: [[A1:%.*]] = ptrtoint [2 x i16]* [[A:%.*]] to i64
+; CHECK-NEXT: [[B2:%.*]] = ptrtoint ptr [[B:%.*]] to i64
+; CHECK-NEXT: [[A1:%.*]] = ptrtoint ptr [[A:%.*]] to i64
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %scalar.ph, label %vector.memcheck
; CHECK: vector.memcheck:
@@ -160,12 +158,12 @@ entry:
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
- %gep.b = getelementptr inbounds i32, i32* %b, i64 %iv
- %l = load i32, i32* %gep.b
+ %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv
+ %l = load i32, ptr %gep.b
%mul = mul nsw i32 %l, 3
- %gep.a = getelementptr inbounds [2 x i16], [2 x i16]* %a, i64 %iv, i64 1
+ %gep.a = getelementptr inbounds [2 x i16], ptr %a, i64 %iv, i64 1
%trunc = trunc i32 %mul to i16
- store i16 %trunc, i16* %gep.a
+ store i16 %trunc, ptr %gep.a
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv.next, %n
br i1 %exitcond, label %exit, label %loop
More information about the llvm-branch-commits
mailing list