[llvm] 4e49c9d - [MemDep] Convert tests to opaque pointers (NFC)
Nikita Popov via llvm-commits
llvm-commits at lists.llvm.org
Fri Dec 16 03:11:53 PST 2022
Author: Nikita Popov
Date: 2022-12-16T12:11:31+01:00
New Revision: 4e49c9da14f75f47ca5cb0b4290d9c1c30d3d060
URL: https://github.com/llvm/llvm-project/commit/4e49c9da14f75f47ca5cb0b4290d9c1c30d3d060
DIFF: https://github.com/llvm/llvm-project/commit/4e49c9da14f75f47ca5cb0b4290d9c1c30d3d060.diff
LOG: [MemDep] Convert tests to opaque pointers (NFC)
Added:
Modified:
llvm/test/Analysis/MemoryDependenceAnalysis/InvariantLoad.ll
llvm/test/Analysis/MemoryDependenceAnalysis/invalidation.ll
llvm/test/Analysis/MemoryDependenceAnalysis/invariant.group-bug.ll
llvm/test/Analysis/MemoryDependenceAnalysis/memdep-block-scan-limit.ll
llvm/test/Analysis/MemoryDependenceAnalysis/memdep_requires_dominator_tree.ll
llvm/test/Analysis/MemoryDependenceAnalysis/memdep_with_tbaa.ll
llvm/test/Analysis/MemoryDependenceAnalysis/reorder-over-store-atomic.ll
llvm/test/Analysis/MemoryDependenceAnalysis/reorder-volatile.ll
Removed:
################################################################################
diff --git a/llvm/test/Analysis/MemoryDependenceAnalysis/InvariantLoad.ll b/llvm/test/Analysis/MemoryDependenceAnalysis/InvariantLoad.ll
index cb6da3cdb882..60c97b4c275a 100644
--- a/llvm/test/Analysis/MemoryDependenceAnalysis/InvariantLoad.ll
+++ b/llvm/test/Analysis/MemoryDependenceAnalysis/InvariantLoad.ll
@@ -4,27 +4,27 @@
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128-ni:1"
target triple = "x86_64-unknown-linux-gnu"
-declare void @llvm.memset.p0i8.i8(i8*, i8, i32, i1)
-declare void @foo(i8*)
+declare void @llvm.memset.p0.i8(ptr, i8, i32, i1)
+declare void @foo(ptr)
define i8 @test(i1 %cmp) {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[P:%.*]] = alloca i8
-; CHECK-NEXT: store i8 5, i8* [[P]]
+; CHECK-NEXT: store i8 5, ptr [[P]]
; CHECK-NEXT: br label [[HEADER:%.*]]
; CHECK: header:
; CHECK-NEXT: [[V:%.*]] = phi i8 [ 5, [[ENTRY:%.*]] ], [ -5, [[ALIVE:%.*]] ]
; CHECK-NEXT: [[I:%.*]] = phi i8 [ 0, [[ENTRY]] ], [ [[I_INC:%.*]], [[ALIVE]] ]
; CHECK-NEXT: br i1 [[CMP:%.*]], label [[ALIVE]], label [[DEAD:%.*]]
; CHECK: dead:
-; CHECK-NEXT: call void @foo(i8* [[P]])
+; CHECK-NEXT: call void @foo(ptr [[P]])
; CHECK-NEXT: [[I_1:%.*]] = add i8 [[I]], [[V]]
; CHECK-NEXT: br label [[ALIVE]]
; CHECK: alive:
; CHECK-NEXT: [[I_2:%.*]] = phi i8 [ [[I]], [[HEADER]] ], [ [[I_1]], [[DEAD]] ]
-; CHECK-NEXT: store i8 -5, i8* [[P]]
-; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 1 [[P]], i8 0, i32 1, i1 false)
+; CHECK-NEXT: store i8 -5, ptr [[P]]
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 1 [[P]], i8 0, i32 1, i1 false)
; CHECK-NEXT: [[I_INC]] = add i8 [[I_2]], 1
; CHECK-NEXT: [[CMP_LOOP:%.*]] = icmp ugt i8 [[I_INC]], 100
; CHECK-NEXT: br i1 [[CMP_LOOP]], label [[EXIT:%.*]], label [[HEADER]]
@@ -34,41 +34,40 @@ define i8 @test(i1 %cmp) {
entry:
%p = alloca i8
- %addr = getelementptr inbounds i8, i8* %p, i64 0
- store i8 5, i8* %addr
+ store i8 5, ptr %p
br label %header
header:
%i = phi i8 [0, %entry], [%i.inc, %backedge]
br i1 %cmp, label %alive, label %dead
dead:
- call void @foo(i8* %p)
- %v = load i8, i8* %addr, !invariant.load !1
+ call void @foo(ptr %p)
+ %v = load i8, ptr %p, !invariant.load !1
%i.1 = add i8 %i, %v
br label %alive
alive:
%i.2 = phi i8 [%i, %header], [%i.1, %dead]
- store i8 -5, i8* %addr
+ store i8 -5, ptr %p
br label %backedge
backedge:
- call void @llvm.memset.p0i8.i8(i8 * align 1 %p, i8 0, i32 1, i1 false)
+ call void @llvm.memset.p0.i8(ptr align 1 %p, i8 0, i32 1, i1 false)
%i.inc = add i8 %i.2, 1
%cmp.loop = icmp ugt i8 %i.inc, 100
br i1 %cmp.loop, label %exit, label %header
exit:
- %res = load i8, i8* %addr
+ %res = load i8, ptr %p
ret i8 %res
}
; Check that first two loads are not optimized out while the one marked with
; invariant.load reuses %res1
-define i8 @test2(i1 %cmp, i8 *%p) {
+define i8 @test2(i1 %cmp, ptr %p) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[RES1:%.*]] = load i8, i8* [[P:%.*]], align 1
-; CHECK-NEXT: call void @foo(i8* [[P]])
+; CHECK-NEXT: [[RES1:%.*]] = load i8, ptr [[P:%.*]], align 1
+; CHECK-NEXT: call void @foo(ptr [[P]])
; CHECK-NEXT: br i1 [[CMP:%.*]], label [[B2:%.*]], label [[B1:%.*]]
; CHECK: b1:
-; CHECK-NEXT: [[RES2:%.*]] = load i8, i8* [[P]]
+; CHECK-NEXT: [[RES2:%.*]] = load i8, ptr [[P]]
; CHECK-NEXT: [[RES3:%.*]] = add i8 [[RES1]], [[RES2]]
; CHECK-NEXT: br label [[ALIVE:%.*]]
; CHECK: b2:
@@ -80,15 +79,15 @@ define i8 @test2(i1 %cmp, i8 *%p) {
;
entry:
- %res1 = load i8, i8* %p
- call void @foo(i8 *%p)
+ %res1 = load i8, ptr %p
+ call void @foo(ptr %p)
br i1 %cmp, label %b2, label %b1
b1:
- %res2 = load i8, i8* %p
+ %res2 = load i8, ptr %p
%res3 = add i8 %res1, %res2
br label %alive
b2:
- %v = load i8, i8* %p, !invariant.load !1
+ %v = load i8, ptr %p, !invariant.load !1
%res.dead = add i8 %v, %res1
br label %alive
alive:
@@ -99,14 +98,14 @@ alive:
; This is essentially the same test case as the above one but with %b1 and %b2
; swapped in "br i1 %cmp, label %b1, label %b2" instruction. That helps us to
; ensure that results doesn't depend on visiting order.
-define i8 @test3(i1 %cmp, i8 *%p) {
+define i8 @test3(i1 %cmp, ptr %p) {
; CHECK-LABEL: @test3(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[RES1:%.*]] = load i8, i8* [[P:%.*]], align 1
-; CHECK-NEXT: call void @foo(i8* [[P]])
+; CHECK-NEXT: [[RES1:%.*]] = load i8, ptr [[P:%.*]], align 1
+; CHECK-NEXT: call void @foo(ptr [[P]])
; CHECK-NEXT: br i1 [[CMP:%.*]], label [[B1:%.*]], label [[B2:%.*]]
; CHECK: b1:
-; CHECK-NEXT: [[RES2:%.*]] = load i8, i8* [[P]]
+; CHECK-NEXT: [[RES2:%.*]] = load i8, ptr [[P]]
; CHECK-NEXT: [[RES3:%.*]] = add i8 [[RES1]], [[RES2]]
; CHECK-NEXT: br label [[ALIVE:%.*]]
; CHECK: b2:
@@ -117,15 +116,15 @@ define i8 @test3(i1 %cmp, i8 *%p) {
; CHECK-NEXT: ret i8 [[RES_PHI]]
;
entry:
- %res1 = load i8, i8* %p
- call void @foo(i8 *%p)
+ %res1 = load i8, ptr %p
+ call void @foo(ptr %p)
br i1 %cmp, label %b1, label %b2
b1:
- %res2 = load i8, i8* %p
+ %res2 = load i8, ptr %p
%res3 = add i8 %res1, %res2
br label %alive
b2:
- %v = load i8, i8* %p, !invariant.load !1
+ %v = load i8, ptr %p, !invariant.load !1
%res.dead = add i8 %v, %res1
br label %alive
alive:
@@ -139,32 +138,32 @@ alive:
define void @test4() {
; CHECK-LABEL: @test4(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load float, float* inttoptr (i64 8 to float*), align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr inttoptr (i64 8 to ptr), align 4
; CHECK-NEXT: [[TMP1:%.*]] = fmul float [[TMP0]], [[TMP0]]
; CHECK-NEXT: br label [[FUSION_LOOP_HEADER_DIM_1_PREHEADER:%.*]]
; CHECK: fusion.loop_header.dim.1.preheader:
; CHECK-NEXT: [[TMP2:%.*]] = phi float [ [[TMP0]], [[ENTRY:%.*]] ], [ [[DOTPRE:%.*]], [[FUSION_LOOP_HEADER_DIM_1_PREHEADER]] ]
; CHECK-NEXT: [[FUSION_INVAR_ADDRESS_DIM_0_03:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INVAR_INC3:%.*]], [[FUSION_LOOP_HEADER_DIM_1_PREHEADER]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [2 x [1 x [4 x float]]], [2 x [1 x [4 x float]]]* null, i64 0, i64 [[FUSION_INVAR_ADDRESS_DIM_0_03]], i64 0, i64 2
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [2 x [1 x [4 x float]]], ptr null, i64 0, i64 [[FUSION_INVAR_ADDRESS_DIM_0_03]], i64 0, i64 2
; CHECK-NEXT: [[TMP4:%.*]] = fmul float [[TMP2]], [[TMP2]]
; CHECK-NEXT: [[INVAR_INC3]] = add nuw nsw i64 [[FUSION_INVAR_ADDRESS_DIM_0_03]], 1
-; CHECK-NEXT: [[DOTPHI_TRANS_INSERT:%.*]] = getelementptr inbounds [2 x [1 x [4 x float]]], [2 x [1 x [4 x float]]]* null, i64 0, i64 [[INVAR_INC3]], i64 0, i64 2
-; CHECK-NEXT: [[DOTPRE]] = load float, float* [[DOTPHI_TRANS_INSERT]], align 4, !invariant.load !0
+; CHECK-NEXT: [[DOTPHI_TRANS_INSERT:%.*]] = getelementptr inbounds [2 x [1 x [4 x float]]], ptr null, i64 0, i64 [[INVAR_INC3]], i64 0, i64 2
+; CHECK-NEXT: [[DOTPRE]] = load float, ptr [[DOTPHI_TRANS_INSERT]], align 4, !invariant.load !0
; CHECK-NEXT: br label [[FUSION_LOOP_HEADER_DIM_1_PREHEADER]]
;
entry:
- %0 = getelementptr inbounds [2 x [1 x [4 x float]]], [2 x [1 x [4 x float]]]* null, i64 0, i64 0, i64 0, i64 2
- %1 = load float, float* %0, align 4
+ %0 = getelementptr inbounds [2 x [1 x [4 x float]]], ptr null, i64 0, i64 0, i64 0, i64 2
+ %1 = load float, ptr %0, align 4
%2 = fmul float %1, %1
br label %fusion.loop_header.dim.1.preheader
fusion.loop_header.dim.1.preheader: ; preds = %fusion.loop_header.dim.1.preheader, %entry
%fusion.invar_address.dim.0.03 = phi i64 [ 0, %entry ], [ %invar.inc3, %fusion.loop_header.dim.1.preheader ]
- %3 = getelementptr inbounds [2 x [1 x [4 x float]]], [2 x [1 x [4 x float]]]* null, i64 0, i64 %fusion.invar_address.dim.0.03, i64 0, i64 2
- %4 = load float, float* %3, align 4, !invariant.load !1
+ %3 = getelementptr inbounds [2 x [1 x [4 x float]]], ptr null, i64 0, i64 %fusion.invar_address.dim.0.03, i64 0, i64 2
+ %4 = load float, ptr %3, align 4, !invariant.load !1
%5 = fmul float %4, %4
- %6 = getelementptr inbounds [2 x [1 x [4 x float]]], [2 x [1 x [4 x float]]]* null, i64 0, i64 %fusion.invar_address.dim.0.03, i64 0, i64 2
- %7 = load float, float* %6, align 4, !invariant.load !1
+ %6 = getelementptr inbounds [2 x [1 x [4 x float]]], ptr null, i64 0, i64 %fusion.invar_address.dim.0.03, i64 0, i64 2
+ %7 = load float, ptr %6, align 4, !invariant.load !1
%8 = fmul float %7, %7
%invar.inc3 = add nuw nsw i64 %fusion.invar_address.dim.0.03, 1
br label %fusion.loop_header.dim.1.preheader
diff --git a/llvm/test/Analysis/MemoryDependenceAnalysis/invalidation.ll b/llvm/test/Analysis/MemoryDependenceAnalysis/invalidation.ll
index 16d17161ad21..894e60d319f7 100644
--- a/llvm/test/Analysis/MemoryDependenceAnalysis/invalidation.ll
+++ b/llvm/test/Analysis/MemoryDependenceAnalysis/invalidation.ll
@@ -25,7 +25,7 @@
; CHECK-DT-INVALIDATE: Running analysis: MemoryDependenceAnalysis
;
-define void @test_use_domtree(i32* nocapture %bufUInt, i32* nocapture %pattern) nounwind {
+define void @test_use_domtree(ptr nocapture %bufUInt, ptr nocapture %pattern) nounwind {
entry:
br label %for.body
@@ -34,28 +34,27 @@ for.exit: ; preds = %for.body
for.body: ; preds = %for.body, %entry
%i.01 = phi i32 [ 0, %entry ], [ %tmp8.7, %for.body ]
- %arrayidx = getelementptr i32, i32* %bufUInt, i32 %i.01
- %arrayidx5 = getelementptr i32, i32* %pattern, i32 %i.01
- %tmp6 = load i32, i32* %arrayidx5, align 4
- store i32 %tmp6, i32* %arrayidx, align 4
+ %arrayidx = getelementptr i32, ptr %bufUInt, i32 %i.01
+ %arrayidx5 = getelementptr i32, ptr %pattern, i32 %i.01
+ %tmp6 = load i32, ptr %arrayidx5, align 4
+ store i32 %tmp6, ptr %arrayidx, align 4
%tmp8.7 = add i32 %i.01, 8
%cmp.7 = icmp ult i32 %tmp8.7, 1024
br i1 %cmp.7, label %for.body, label %for.exit
}
%t = type { i32 }
-declare void @foo(i8*)
+declare void @foo(ptr)
-define void @test_use_aa(%t* noalias %stuff ) {
+define void @test_use_aa(ptr noalias %stuff ) {
entry:
- %p = getelementptr inbounds %t, %t* %stuff, i32 0, i32 0
- %before = load i32, i32* %p
+ %before = load i32, ptr %stuff
- call void @foo(i8* null)
+ call void @foo(ptr null)
- %after = load i32, i32* %p
+ %after = load i32, ptr %stuff
%sum = add i32 %before, %after
- store i32 %sum, i32* %p
+ store i32 %sum, ptr %stuff
ret void
}
diff --git a/llvm/test/Analysis/MemoryDependenceAnalysis/invariant.group-bug.ll b/llvm/test/Analysis/MemoryDependenceAnalysis/invariant.group-bug.ll
index f2068327969a..ac5bd9b852c5 100644
--- a/llvm/test/Analysis/MemoryDependenceAnalysis/invariant.group-bug.ll
+++ b/llvm/test/Analysis/MemoryDependenceAnalysis/invariant.group-bug.ll
@@ -10,33 +10,29 @@
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-grtev4-linux-gnu"
-%0 = type { i32 (...)**, %1 }
+%0 = type { ptr, %1 }
%1 = type { %2 }
%2 = type { %3 }
%3 = type { %4, i64, %5 }
-%4 = type { i8* }
+%4 = type { ptr }
%5 = type { i64, [8 x i8] }
-define void @fail(i1* noalias sret(i1) %arg, %0* %arg1, %1* %arg2, i8* %arg3) local_unnamed_addr #0 {
+define void @fail(ptr noalias sret(i1) %arg, ptr %arg1, ptr %arg2, ptr %arg3) local_unnamed_addr #0 {
; CHECK-LABEL: @fail(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[I:%.*]] = bitcast %0* [[ARG1:%.*]] to i64 (%0*)***
-; CHECK-NEXT: [[I4:%.*]] = load i64 (%0*)**, i64 (%0*)*** [[I]], align 8, !invariant.group !6
-; CHECK-NEXT: [[I5:%.*]] = getelementptr inbounds i64 (%0*)*, i64 (%0*)** [[I4]], i64 6
-; CHECK-NEXT: [[I6:%.*]] = load i64 (%0*)*, i64 (%0*)** [[I5]], align 8, !invariant.load !6
-; CHECK-NEXT: [[I7:%.*]] = tail call i64 [[I6]](%0* [[ARG1]]) #[[ATTR1:[0-9]+]]
-; CHECK-NEXT: [[I8:%.*]] = getelementptr inbounds [[TMP1:%.*]], %1* [[ARG2:%.*]], i64 0, i32 0, i32 0, i32 0, i32 0
-; CHECK-NEXT: [[I9:%.*]] = load i8*, i8** [[I8]], align 8
-; CHECK-NEXT: store i8 0, i8* [[I9]], align 1
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i64 (%0*)** [[I4]] to i64 (%0*, i8*, i64)**
+; CHECK-NEXT: [[I4:%.*]] = load ptr, ptr [[ARG1:%.*]], align 8, !invariant.group !6
+; CHECK-NEXT: [[I5:%.*]] = getelementptr inbounds ptr, ptr [[I4]], i64 6
+; CHECK-NEXT: [[I6:%.*]] = load ptr, ptr [[I5]], align 8, !invariant.load !6
+; CHECK-NEXT: [[I7:%.*]] = tail call i64 [[I6]](ptr [[ARG1]]) #[[ATTR1:[0-9]+]]
+; CHECK-NEXT: [[I9:%.*]] = load ptr, ptr [[ARG2:%.*]], align 8
+; CHECK-NEXT: store i8 0, ptr [[I9]], align 1
; CHECK-NEXT: br i1 undef, label [[BB10:%.*]], label [[BB29:%.*]]
; CHECK: bb10:
-; CHECK-NEXT: [[I11:%.*]] = bitcast %0* [[ARG1]] to i64 (%0*, i8*, i64)***
-; CHECK-NEXT: [[I14_PHI_TRANS_INSERT:%.*]] = getelementptr inbounds i64 (%0*, i8*, i64)*, i64 (%0*, i8*, i64)** [[TMP0]], i64 22
-; CHECK-NEXT: [[I15_PRE:%.*]] = load i64 (%0*, i8*, i64)*, i64 (%0*, i8*, i64)** [[I14_PHI_TRANS_INSERT]], align 8, !invariant.load !6
+; CHECK-NEXT: [[I14_PHI_TRANS_INSERT:%.*]] = getelementptr inbounds ptr, ptr [[I4]], i64 22
+; CHECK-NEXT: [[I15_PRE:%.*]] = load ptr, ptr [[I14_PHI_TRANS_INSERT]], align 8, !invariant.load !6
; CHECK-NEXT: br label [[BB12:%.*]]
; CHECK: bb12:
-; CHECK-NEXT: [[I16:%.*]] = call i64 [[I15_PRE]](%0* nonnull [[ARG1]], i8* null, i64 0) #[[ATTR1]]
+; CHECK-NEXT: [[I16:%.*]] = call i64 [[I15_PRE]](ptr nonnull [[ARG1]], ptr null, i64 0) #[[ATTR1]]
; CHECK-NEXT: br i1 undef, label [[BB28:%.*]], label [[BB17:%.*]]
; CHECK: bb17:
; CHECK-NEXT: br i1 undef, label [[BB18:%.*]], label [[BB21:%.*]]
@@ -54,25 +50,22 @@ define void @fail(i1* noalias sret(i1) %arg, %0* %arg1, %1* %arg2, i8* %arg3) lo
; CHECK-NEXT: ret void
;
bb:
- %i = bitcast %0* %arg1 to i64 (%0*)***
- %i4 = load i64 (%0*)**, i64 (%0*)*** %i, align 8, !invariant.group !6
- %i5 = getelementptr inbounds i64 (%0*)*, i64 (%0*)** %i4, i64 6
- %i6 = load i64 (%0*)*, i64 (%0*)** %i5, align 8, !invariant.load !6
- %i7 = tail call i64 %i6(%0* %arg1) #1
- %i8 = getelementptr inbounds %1, %1* %arg2, i64 0, i32 0, i32 0, i32 0, i32 0
- %i9 = load i8*, i8** %i8, align 8
- store i8 0, i8* %i9, align 1
+ %i4 = load ptr, ptr %arg1, align 8, !invariant.group !6
+ %i5 = getelementptr inbounds ptr, ptr %i4, i64 6
+ %i6 = load ptr, ptr %i5, align 8, !invariant.load !6
+ %i7 = tail call i64 %i6(ptr %arg1) #1
+ %i9 = load ptr, ptr %arg2, align 8
+ store i8 0, ptr %i9, align 1
br i1 undef, label %bb10, label %bb29
bb10: ; preds = %bb
- %i11 = bitcast %0* %arg1 to i64 (%0*, i8*, i64)***
br label %bb12
bb12: ; preds = %bb28, %bb10
- %i13 = load i64 (%0*, i8*, i64)**, i64 (%0*, i8*, i64)*** %i11, align 8, !invariant.group !6
- %i14 = getelementptr inbounds i64 (%0*, i8*, i64)*, i64 (%0*, i8*, i64)** %i13, i64 22
- %i15 = load i64 (%0*, i8*, i64)*, i64 (%0*, i8*, i64)** %i14, align 8, !invariant.load !6
- %i16 = call i64 %i15(%0* nonnull %arg1, i8* null, i64 0) #1
+ %i13 = load ptr, ptr %arg1, align 8, !invariant.group !6
+ %i14 = getelementptr inbounds ptr, ptr %i13, i64 22
+ %i15 = load ptr, ptr %i14, align 8, !invariant.load !6
+ %i16 = call i64 %i15(ptr nonnull %arg1, ptr null, i64 0) #1
br i1 undef, label %bb28, label %bb17
bb17: ; preds = %bb12
diff --git a/llvm/test/Analysis/MemoryDependenceAnalysis/memdep-block-scan-limit.ll b/llvm/test/Analysis/MemoryDependenceAnalysis/memdep-block-scan-limit.ll
index daf737883ade..3f7d74062740 100644
--- a/llvm/test/Analysis/MemoryDependenceAnalysis/memdep-block-scan-limit.ll
+++ b/llvm/test/Analysis/MemoryDependenceAnalysis/memdep-block-scan-limit.ll
@@ -6,10 +6,10 @@
; WITH-LIMIT-LABEL: @test(
; WITH-LIMIT-CHECK: load
; WITH-LIMIT-CHECK: load
-define i32 @test(i32* %p) {
- %1 = load i32, i32* %p
+define i32 @test(ptr %p) {
+ %1 = load i32, ptr %p
%2 = add i32 %1, 3
- %3 = load i32, i32* %p
+ %3 = load i32, ptr %p
%4 = add i32 %2, %3
ret i32 %4
}
diff --git a/llvm/test/Analysis/MemoryDependenceAnalysis/memdep_requires_dominator_tree.ll b/llvm/test/Analysis/MemoryDependenceAnalysis/memdep_requires_dominator_tree.ll
index 80dd18a576df..3fa606a5f8e9 100644
--- a/llvm/test/Analysis/MemoryDependenceAnalysis/memdep_requires_dominator_tree.ll
+++ b/llvm/test/Analysis/MemoryDependenceAnalysis/memdep_requires_dominator_tree.ll
@@ -1,6 +1,6 @@
; RUN: opt -passes=gvn < %s
-define void @__memdep_requires_dominator_tree(i32* nocapture %bufUInt, i32* nocapture %pattern) nounwind {
+define void @__memdep_requires_dominator_tree(ptr nocapture %bufUInt, ptr nocapture %pattern) nounwind {
entry:
br label %for.body
@@ -9,10 +9,10 @@ for.exit: ; preds = %for.body
for.body: ; preds = %for.body, %entry
%i.01 = phi i32 [ 0, %entry ], [ %tmp8.7, %for.body ]
- %arrayidx = getelementptr i32, i32* %bufUInt, i32 %i.01
- %arrayidx5 = getelementptr i32, i32* %pattern, i32 %i.01
- %tmp6 = load i32, i32* %arrayidx5, align 4
- store i32 %tmp6, i32* %arrayidx, align 4
+ %arrayidx = getelementptr i32, ptr %bufUInt, i32 %i.01
+ %arrayidx5 = getelementptr i32, ptr %pattern, i32 %i.01
+ %tmp6 = load i32, ptr %arrayidx5, align 4
+ store i32 %tmp6, ptr %arrayidx, align 4
%tmp8.7 = add i32 %i.01, 8
%cmp.7 = icmp ult i32 %tmp8.7, 1024
br i1 %cmp.7, label %for.body, label %for.exit
diff --git a/llvm/test/Analysis/MemoryDependenceAnalysis/memdep_with_tbaa.ll b/llvm/test/Analysis/MemoryDependenceAnalysis/memdep_with_tbaa.ll
index f1427ff53291..7462159c1f2a 100644
--- a/llvm/test/Analysis/MemoryDependenceAnalysis/memdep_with_tbaa.ll
+++ b/llvm/test/Analysis/MemoryDependenceAnalysis/memdep_with_tbaa.ll
@@ -2,29 +2,29 @@
; RUN: opt -aa-pipeline=basic-aa -passes=gvn -S < %s | FileCheck %s
; This test catches an issue in MemoryDependenceAnalysis caching mechanism in presense of TBAA.
-define i64 @foo(i64 addrspace(1)** %arg, i1 %arg1, i1 %arg2, i1 %arg3, i32 %arg4) {
+define i64 @foo(ptr %arg, i1 %arg1, i1 %arg2, i1 %arg3, i32 %arg4) {
; CHECK-LABEL: @foo(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[TMP:%.*]] = load atomic i64 addrspace(1)*, i64 addrspace(1)** [[ARG:%.*]] unordered, align 8
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, i64 addrspace(1)* [[TMP]], i64 8
-; CHECK-NEXT: store atomic i64 0, i64 addrspace(1)* [[TMP5]] unordered, align 8
+; CHECK-NEXT: [[TMP:%.*]] = load atomic ptr addrspace(1), ptr [[ARG:%.*]] unordered, align 8
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr addrspace(1) [[TMP]], i64 8
+; CHECK-NEXT: store atomic i64 0, ptr addrspace(1) [[TMP5]] unordered, align 8
; CHECK-NEXT: br label [[BB6:%.*]]
; CHECK: bb6:
; CHECK-NEXT: [[TMP7:%.*]] = phi i64 [ 0, [[BB:%.*]] ], [ [[TMP22:%.*]], [[BB19:%.*]] ]
; CHECK-NEXT: br i1 [[ARG1:%.*]], label [[BB19]], label [[BB8:%.*]]
; CHECK: bb8:
-; CHECK-NEXT: [[TMP9:%.*]] = load atomic i64 addrspace(1)*, i64 addrspace(1)** [[ARG]] unordered, align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load atomic ptr addrspace(1), ptr [[ARG]] unordered, align 8
; CHECK-NEXT: br i1 [[ARG2:%.*]], label [[BB11:%.*]], label [[BB10:%.*]]
; CHECK: bb10:
; CHECK-NEXT: br label [[BB15:%.*]]
; CHECK: bb11:
; CHECK-NEXT: br i1 [[ARG3:%.*]], label [[BB12:%.*]], label [[BB18:%.*]]
; CHECK: bb12:
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i64, i64 addrspace(1)* [[TMP9]], i64 8
-; CHECK-NEXT: store atomic i64 1, i64 addrspace(1)* [[TMP14]] unordered, align 8
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i64, ptr addrspace(1) [[TMP9]], i64 8
+; CHECK-NEXT: store atomic i64 1, ptr addrspace(1) [[TMP14]] unordered, align 8
; CHECK-NEXT: ret i64 0
; CHECK: bb15:
-; CHECK-NEXT: [[TMP16:%.*]] = phi i64 addrspace(1)* [ [[TMP9]], [[BB10]] ], [ [[TMP27:%.*]], [[BB26:%.*]] ]
+; CHECK-NEXT: [[TMP16:%.*]] = phi ptr addrspace(1) [ [[TMP9]], [[BB10]] ], [ [[TMP27:%.*]], [[BB26:%.*]] ]
; CHECK-NEXT: [[TMP17:%.*]] = phi i64 [ [[TMP7]], [[BB10]] ], [ 0, [[BB26]] ]
; CHECK-NEXT: switch i32 [[ARG4:%.*]], label [[BB19]] [
; CHECK-NEXT: i32 0, label [[BB26]]
@@ -33,37 +33,37 @@ define i64 @foo(i64 addrspace(1)** %arg, i1 %arg1, i1 %arg2, i1 %arg3, i32 %arg4
; CHECK: bb18:
; CHECK-NEXT: br label [[BB19]]
; CHECK: bb19:
-; CHECK-NEXT: [[TMP20:%.*]] = phi i64 addrspace(1)* [ [[TMP16]], [[BB15]] ], [ inttoptr (i64 1 to i64 addrspace(1)*), [[BB6]] ], [ [[TMP9]], [[BB18]] ]
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i64, i64 addrspace(1)* [[TMP20]], i64 8
-; CHECK-NEXT: [[TMP22]] = load atomic i64, i64 addrspace(1)* [[TMP21]] unordered, align 8, !tbaa !0
+; CHECK-NEXT: [[TMP20:%.*]] = phi ptr addrspace(1) [ [[TMP16]], [[BB15]] ], [ inttoptr (i64 1 to ptr addrspace(1)), [[BB6]] ], [ [[TMP9]], [[BB18]] ]
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i64, ptr addrspace(1) [[TMP20]], i64 8
+; CHECK-NEXT: [[TMP22]] = load atomic i64, ptr addrspace(1) [[TMP21]] unordered, align 8, !tbaa !0
; CHECK-NEXT: br label [[BB6]]
; CHECK: bb23:
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i64, i64 addrspace(1)* [[TMP16]], i64 8
-; CHECK-NEXT: [[TMP25:%.*]] = load atomic i64, i64 addrspace(1)* [[TMP24]] unordered, align 8
+; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i64, ptr addrspace(1) [[TMP16]], i64 8
+; CHECK-NEXT: [[TMP25:%.*]] = load atomic i64, ptr addrspace(1) [[TMP24]] unordered, align 8
; CHECK-NEXT: call void @baz(i64 [[TMP25]]) #0
; CHECK-NEXT: ret i64 0
; CHECK: bb26:
; CHECK-NEXT: call void @bar()
-; CHECK-NEXT: [[TMP27]] = load atomic i64 addrspace(1)*, i64 addrspace(1)** [[ARG]] unordered, align 8
-; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i64, i64 addrspace(1)* [[TMP27]], i64 8
-; CHECK-NEXT: [[TMP29:%.*]] = load atomic i64, i64 addrspace(1)* [[TMP28]] unordered, align 8
-; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i64, i64 addrspace(1)* [[TMP27]], i64 40
-; CHECK-NEXT: store atomic i64 [[TMP29]], i64 addrspace(1)* [[TMP30]] unordered, align 4
+; CHECK-NEXT: [[TMP27]] = load atomic ptr addrspace(1), ptr [[ARG]] unordered, align 8
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i64, ptr addrspace(1) [[TMP27]], i64 8
+; CHECK-NEXT: [[TMP29:%.*]] = load atomic i64, ptr addrspace(1) [[TMP28]] unordered, align 8
+; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i64, ptr addrspace(1) [[TMP27]], i64 40
+; CHECK-NEXT: store atomic i64 [[TMP29]], ptr addrspace(1) [[TMP30]] unordered, align 4
; CHECK-NEXT: br label [[BB15]]
;
bb:
- %tmp = load atomic i64 addrspace(1)*, i64 addrspace(1)** %arg unordered, align 8
- %tmp5 = getelementptr inbounds i64, i64 addrspace(1)* %tmp, i64 8
- store atomic i64 0, i64 addrspace(1)* %tmp5 unordered, align 8
+ %tmp = load atomic ptr addrspace(1), ptr %arg unordered, align 8
+ %tmp5 = getelementptr inbounds i64, ptr addrspace(1) %tmp, i64 8
+ store atomic i64 0, ptr addrspace(1) %tmp5 unordered, align 8
br label %bb6
bb6: ; preds = %bb19, %bb
%tmp7 = phi i64 [ 0, %bb ], [ %tmp22, %bb19 ]
- %tmp111 = inttoptr i64 1 to i64 addrspace(1)*
+ %tmp111 = inttoptr i64 1 to ptr addrspace(1)
br i1 %arg1, label %bb19, label %bb8
bb8: ; preds = %bb6
- %tmp9 = load atomic i64 addrspace(1)*, i64 addrspace(1)** %arg unordered, align 8
+ %tmp9 = load atomic ptr addrspace(1), ptr %arg unordered, align 8
br i1 %arg2, label %bb11, label %bb10
bb10: ; preds = %bb8
@@ -73,13 +73,13 @@ bb11: ; preds = %bb8
br i1 %arg3, label %bb12, label %bb18
bb12: ; preds = %bb11
- %tmp13 = phi i64 addrspace(1)* [ %tmp9, %bb11 ]
- %tmp14 = getelementptr inbounds i64, i64 addrspace(1)* %tmp13, i64 8
- store atomic i64 1, i64 addrspace(1)* %tmp14 unordered, align 8
+ %tmp13 = phi ptr addrspace(1) [ %tmp9, %bb11 ]
+ %tmp14 = getelementptr inbounds i64, ptr addrspace(1) %tmp13, i64 8
+ store atomic i64 1, ptr addrspace(1) %tmp14 unordered, align 8
ret i64 0
bb15: ; preds = %bb26, %bb10
- %tmp16 = phi i64 addrspace(1)* [ %tmp9, %bb10 ], [ %tmp27, %bb26 ]
+ %tmp16 = phi ptr addrspace(1) [ %tmp9, %bb10 ], [ %tmp27, %bb26 ]
%tmp17 = phi i64 [ %tmp7, %bb10 ], [ 0, %bb26 ]
switch i32 %arg4, label %bb19 [
i32 0, label %bb26
@@ -90,24 +90,24 @@ bb18: ; preds = %bb11
br label %bb19
bb19: ; preds = %bb18, %bb15, %bb6
- %tmp20 = phi i64 addrspace(1)* [ %tmp16, %bb15 ], [ %tmp111, %bb6 ], [ %tmp9, %bb18 ]
- %tmp21 = getelementptr inbounds i64, i64 addrspace(1)* %tmp20, i64 8
- %tmp22 = load atomic i64, i64 addrspace(1)* %tmp21 unordered, align 8, !tbaa !0
+ %tmp20 = phi ptr addrspace(1) [ %tmp16, %bb15 ], [ %tmp111, %bb6 ], [ %tmp9, %bb18 ]
+ %tmp21 = getelementptr inbounds i64, ptr addrspace(1) %tmp20, i64 8
+ %tmp22 = load atomic i64, ptr addrspace(1) %tmp21 unordered, align 8, !tbaa !0
br label %bb6
bb23: ; preds = %bb15
- %tmp24 = getelementptr inbounds i64, i64 addrspace(1)* %tmp16, i64 8
- %tmp25 = load atomic i64, i64 addrspace(1)* %tmp24 unordered, align 8
+ %tmp24 = getelementptr inbounds i64, ptr addrspace(1) %tmp16, i64 8
+ %tmp25 = load atomic i64, ptr addrspace(1) %tmp24 unordered, align 8
call void @baz(i64 %tmp25) #0
ret i64 0
bb26: ; preds = %bb15
call void @bar()
- %tmp27 = load atomic i64 addrspace(1)*, i64 addrspace(1)** %arg unordered, align 8
- %tmp28 = getelementptr inbounds i64, i64 addrspace(1)* %tmp27, i64 8
- %tmp29 = load atomic i64, i64 addrspace(1)* %tmp28 unordered, align 8
- %tmp30 = getelementptr inbounds i64, i64 addrspace(1)* %tmp27, i64 40
- store atomic i64 %tmp29, i64 addrspace(1)* %tmp30 unordered, align 4
+ %tmp27 = load atomic ptr addrspace(1), ptr %arg unordered, align 8
+ %tmp28 = getelementptr inbounds i64, ptr addrspace(1) %tmp27, i64 8
+ %tmp29 = load atomic i64, ptr addrspace(1) %tmp28 unordered, align 8
+ %tmp30 = getelementptr inbounds i64, ptr addrspace(1) %tmp27, i64 40
+ store atomic i64 %tmp29, ptr addrspace(1) %tmp30 unordered, align 4
br label %bb15
}
diff --git a/llvm/test/Analysis/MemoryDependenceAnalysis/reorder-over-store-atomic.ll b/llvm/test/Analysis/MemoryDependenceAnalysis/reorder-over-store-atomic.ll
index aab094359bb7..e6a388bdd022 100644
--- a/llvm/test/Analysis/MemoryDependenceAnalysis/reorder-over-store-atomic.ll
+++ b/llvm/test/Analysis/MemoryDependenceAnalysis/reorder-over-store-atomic.ll
@@ -6,16 +6,16 @@
define i32 @test_load_seq_cst_unordered() {
; CHECK-LABEL: @test_load_seq_cst_unordered(
-; CHECK-NEXT: [[L1:%.*]] = load atomic i32, i32* @w unordered, align 4
-; CHECK-NEXT: [[LV:%.*]] = load atomic i32, i32* @u seq_cst, align 4
-; CHECK-NEXT: [[L2:%.*]] = load atomic i32, i32* @w unordered, align 4
+; CHECK-NEXT: [[L1:%.*]] = load atomic i32, ptr @w unordered, align 4
+; CHECK-NEXT: [[LV:%.*]] = load atomic i32, ptr @u seq_cst, align 4
+; CHECK-NEXT: [[L2:%.*]] = load atomic i32, ptr @w unordered, align 4
; CHECK-NEXT: [[RES_1:%.*]] = sub i32 [[L1]], [[L2]]
; CHECK-NEXT: [[RES:%.*]] = add i32 [[RES_1]], [[LV]]
; CHECK-NEXT: ret i32 [[RES]]
;
- %l1 = load atomic i32, i32* @w unordered, align 4
- %lv = load atomic i32, i32* @u seq_cst, align 4
- %l2 = load atomic i32, i32* @w unordered, align 4
+ %l1 = load atomic i32, ptr @w unordered, align 4
+ %lv = load atomic i32, ptr @u seq_cst, align 4
+ %l2 = load atomic i32, ptr @w unordered, align 4
%res.1 = sub i32 %l1, %l2
%res = add i32 %res.1, %lv
ret i32 %res
@@ -23,16 +23,16 @@ define i32 @test_load_seq_cst_unordered() {
define i32 @test_load_acquire_unordered() {
; CHECK-LABEL: @test_load_acquire_unordered(
-; CHECK-NEXT: [[L1:%.*]] = load atomic i32, i32* @w unordered, align 4
-; CHECK-NEXT: [[LV:%.*]] = load atomic i32, i32* @u acquire, align 4
-; CHECK-NEXT: [[L2:%.*]] = load atomic i32, i32* @w unordered, align 4
+; CHECK-NEXT: [[L1:%.*]] = load atomic i32, ptr @w unordered, align 4
+; CHECK-NEXT: [[LV:%.*]] = load atomic i32, ptr @u acquire, align 4
+; CHECK-NEXT: [[L2:%.*]] = load atomic i32, ptr @w unordered, align 4
; CHECK-NEXT: [[RES_1:%.*]] = sub i32 [[L1]], [[L2]]
; CHECK-NEXT: [[RES:%.*]] = add i32 [[RES_1]], [[LV]]
; CHECK-NEXT: ret i32 [[RES]]
;
- %l1 = load atomic i32, i32* @w unordered, align 4
- %lv = load atomic i32, i32* @u acquire, align 4
- %l2 = load atomic i32, i32* @w unordered, align 4
+ %l1 = load atomic i32, ptr @w unordered, align 4
+ %lv = load atomic i32, ptr @u acquire, align 4
+ %l2 = load atomic i32, ptr @w unordered, align 4
%res.1 = sub i32 %l1, %l2
%res = add i32 %res.1, %lv
ret i32 %res
@@ -40,51 +40,51 @@ define i32 @test_load_acquire_unordered() {
define i32 @test_store_cst_unordered(i32 %x) {
; CHECK-LABEL: @test_store_cst_unordered(
-; CHECK-NEXT: store atomic i32 [[X:%.*]], i32* @u seq_cst, align 4
+; CHECK-NEXT: store atomic i32 [[X:%.*]], ptr @u seq_cst, align 4
; CHECK-NEXT: ret i32 0
;
- %l1 = load atomic i32, i32* @w unordered, align 4
- store atomic i32 %x, i32* @u seq_cst, align 4
- %l2 = load atomic i32, i32* @w unordered, align 4
+ %l1 = load atomic i32, ptr @w unordered, align 4
+ store atomic i32 %x, ptr @u seq_cst, align 4
+ %l2 = load atomic i32, ptr @w unordered, align 4
%res = sub i32 %l1, %l2
ret i32 %res
}
define i32 @test_store_release_unordered(i32 %x) {
; CHECK-LABEL: @test_store_release_unordered(
-; CHECK-NEXT: store atomic i32 [[X:%.*]], i32* @u release, align 4
+; CHECK-NEXT: store atomic i32 [[X:%.*]], ptr @u release, align 4
; CHECK-NEXT: ret i32 0
;
- %l1 = load atomic i32, i32* @w unordered, align 4
- store atomic i32 %x, i32* @u release, align 4
- %l2 = load atomic i32, i32* @w unordered, align 4
+ %l1 = load atomic i32, ptr @w unordered, align 4
+ store atomic i32 %x, ptr @u release, align 4
+ %l2 = load atomic i32, ptr @w unordered, align 4
%res = sub i32 %l1, %l2
ret i32 %res
}
define i32 @test_stores_seq_cst_unordered(i32 %x) {
; CHECK-LABEL: @test_stores_seq_cst_unordered(
-; CHECK-NEXT: store atomic i32 [[X:%.*]], i32* @w unordered, align 4
-; CHECK-NEXT: store atomic i32 [[X]], i32* @u seq_cst, align 4
-; CHECK-NEXT: store atomic i32 0, i32* @w unordered, align 4
+; CHECK-NEXT: store atomic i32 [[X:%.*]], ptr @w unordered, align 4
+; CHECK-NEXT: store atomic i32 [[X]], ptr @u seq_cst, align 4
+; CHECK-NEXT: store atomic i32 0, ptr @w unordered, align 4
; CHECK-NEXT: ret i32 0
;
- store atomic i32 %x, i32* @w unordered, align 4
- store atomic i32 %x, i32* @u seq_cst, align 4
- store atomic i32 0, i32* @w unordered, align 4
+ store atomic i32 %x, ptr @w unordered, align 4
+ store atomic i32 %x, ptr @u seq_cst, align 4
+ store atomic i32 0, ptr @w unordered, align 4
ret i32 0
}
define i32 @test_stores_release_unordered(i32 %x) {
; CHECK-LABEL: @test_stores_release_unordered(
-; CHECK-NEXT: store atomic i32 [[X:%.*]], i32* @w unordered, align 4
-; CHECK-NEXT: store atomic i32 [[X]], i32* @u release, align 4
-; CHECK-NEXT: store atomic i32 0, i32* @w unordered, align 4
+; CHECK-NEXT: store atomic i32 [[X:%.*]], ptr @w unordered, align 4
+; CHECK-NEXT: store atomic i32 [[X]], ptr @u release, align 4
+; CHECK-NEXT: store atomic i32 0, ptr @w unordered, align 4
; CHECK-NEXT: ret i32 0
;
- store atomic i32 %x, i32* @w unordered, align 4
- store atomic i32 %x, i32* @u release, align 4
- store atomic i32 0, i32* @w unordered, align 4
+ store atomic i32 %x, ptr @w unordered, align 4
+ store atomic i32 %x, ptr @u release, align 4
+ store atomic i32 0, ptr @w unordered, align 4
ret i32 0
}
@@ -92,16 +92,16 @@ define i32 @test_stores_release_unordered(i32 %x) {
; Must respect total order for seq_cst even for unrelated addresses
define i32 @neg_load_seq_cst() {
; CHECK-LABEL: @neg_load_seq_cst(
-; CHECK-NEXT: [[L1:%.*]] = load atomic i32, i32* @w seq_cst, align 4
-; CHECK-NEXT: [[LV:%.*]] = load atomic i32, i32* @u seq_cst, align 4
-; CHECK-NEXT: [[L2:%.*]] = load atomic i32, i32* @w seq_cst, align 4
+; CHECK-NEXT: [[L1:%.*]] = load atomic i32, ptr @w seq_cst, align 4
+; CHECK-NEXT: [[LV:%.*]] = load atomic i32, ptr @u seq_cst, align 4
+; CHECK-NEXT: [[L2:%.*]] = load atomic i32, ptr @w seq_cst, align 4
; CHECK-NEXT: [[RES_1:%.*]] = sub i32 [[L1]], [[L2]]
; CHECK-NEXT: [[RES:%.*]] = add i32 [[RES_1]], [[LV]]
; CHECK-NEXT: ret i32 [[RES]]
;
- %l1 = load atomic i32, i32* @w seq_cst, align 4
- %lv = load atomic i32, i32* @u seq_cst, align 4
- %l2 = load atomic i32, i32* @w seq_cst, align 4
+ %l1 = load atomic i32, ptr @w seq_cst, align 4
+ %lv = load atomic i32, ptr @u seq_cst, align 4
+ %l2 = load atomic i32, ptr @w seq_cst, align 4
%res.1 = sub i32 %l1, %l2
%res = add i32 %res.1, %lv
ret i32 %res
@@ -109,28 +109,28 @@ define i32 @neg_load_seq_cst() {
define i32 @neg_store_seq_cst(i32 %x) {
; CHECK-LABEL: @neg_store_seq_cst(
-; CHECK-NEXT: [[L1:%.*]] = load atomic i32, i32* @w seq_cst, align 4
-; CHECK-NEXT: store atomic i32 [[X:%.*]], i32* @u seq_cst, align 4
-; CHECK-NEXT: [[L2:%.*]] = load atomic i32, i32* @w seq_cst, align 4
+; CHECK-NEXT: [[L1:%.*]] = load atomic i32, ptr @w seq_cst, align 4
+; CHECK-NEXT: store atomic i32 [[X:%.*]], ptr @u seq_cst, align 4
+; CHECK-NEXT: [[L2:%.*]] = load atomic i32, ptr @w seq_cst, align 4
; CHECK-NEXT: [[RES:%.*]] = sub i32 [[L1]], [[L2]]
; CHECK-NEXT: ret i32 [[RES]]
;
- %l1 = load atomic i32, i32* @w seq_cst, align 4
- store atomic i32 %x, i32* @u seq_cst, align 4
- %l2 = load atomic i32, i32* @w seq_cst, align 4
+ %l1 = load atomic i32, ptr @w seq_cst, align 4
+ store atomic i32 %x, ptr @u seq_cst, align 4
+ %l2 = load atomic i32, ptr @w seq_cst, align 4
%res = sub i32 %l1, %l2
ret i32 %res
}
define i32 @neg_stores_seq_cst(i32 %x) {
; CHECK-LABEL: @neg_stores_seq_cst(
-; CHECK-NEXT: store atomic i32 [[X:%.*]], i32* @w seq_cst, align 4
-; CHECK-NEXT: store atomic i32 [[X]], i32* @u seq_cst, align 4
-; CHECK-NEXT: store atomic i32 0, i32* @w seq_cst, align 4
+; CHECK-NEXT: store atomic i32 [[X:%.*]], ptr @w seq_cst, align 4
+; CHECK-NEXT: store atomic i32 [[X]], ptr @u seq_cst, align 4
+; CHECK-NEXT: store atomic i32 0, ptr @w seq_cst, align 4
; CHECK-NEXT: ret i32 0
;
- store atomic i32 %x, i32* @w seq_cst, align 4
- store atomic i32 %x, i32* @u seq_cst, align 4
- store atomic i32 0, i32* @w seq_cst, align 4
+ store atomic i32 %x, ptr @w seq_cst, align 4
+ store atomic i32 %x, ptr @u seq_cst, align 4
+ store atomic i32 0, ptr @w seq_cst, align 4
ret i32 0
}
diff --git a/llvm/test/Analysis/MemoryDependenceAnalysis/reorder-volatile.ll b/llvm/test/Analysis/MemoryDependenceAnalysis/reorder-volatile.ll
index 6659d12db7b1..b7464f64271c 100644
--- a/llvm/test/Analysis/MemoryDependenceAnalysis/reorder-volatile.ll
+++ b/llvm/test/Analysis/MemoryDependenceAnalysis/reorder-volatile.ll
@@ -6,12 +6,12 @@
define i32 @test_load() {
; CHECK-LABEL: @test_load(
-; CHECK-NEXT: [[LV:%.*]] = load volatile i32, i32* @u, align 4
+; CHECK-NEXT: [[LV:%.*]] = load volatile i32, ptr @u, align 4
; CHECK-NEXT: ret i32 [[LV]]
;
- %l1 = load atomic i32, i32* @w unordered, align 4
- %lv = load volatile i32, i32* @u, align 4
- %l2 = load atomic i32, i32* @w unordered, align 4
+ %l1 = load atomic i32, ptr @w unordered, align 4
+ %lv = load volatile i32, ptr @u, align 4
+ %l2 = load atomic i32, ptr @w unordered, align 4
%res.1 = sub i32 %l1, %l2
%res = add i32 %res.1, %lv
ret i32 %res
@@ -19,16 +19,16 @@ define i32 @test_load() {
define i32 @test_load_with_acquire_load() {
; CHECK-LABEL: @test_load_with_acquire_load(
-; CHECK-NEXT: [[L1:%.*]] = load atomic i32, i32* @w acquire, align 4
-; CHECK-NEXT: [[LV:%.*]] = load volatile i32, i32* @u, align 4
-; CHECK-NEXT: [[L2:%.*]] = load atomic i32, i32* @w acquire, align 4
+; CHECK-NEXT: [[L1:%.*]] = load atomic i32, ptr @w acquire, align 4
+; CHECK-NEXT: [[LV:%.*]] = load volatile i32, ptr @u, align 4
+; CHECK-NEXT: [[L2:%.*]] = load atomic i32, ptr @w acquire, align 4
; CHECK-NEXT: [[RES_1:%.*]] = sub i32 [[L1]], [[L2]]
; CHECK-NEXT: [[RES:%.*]] = add i32 [[RES_1]], [[LV]]
; CHECK-NEXT: ret i32 [[RES]]
;
- %l1 = load atomic i32, i32* @w acquire, align 4
- %lv = load volatile i32, i32* @u, align 4
- %l2 = load atomic i32, i32* @w acquire, align 4
+ %l1 = load atomic i32, ptr @w acquire, align 4
+ %lv = load volatile i32, ptr @u, align 4
+ %l2 = load atomic i32, ptr @w acquire, align 4
%res.1 = sub i32 %l1, %l2
%res = add i32 %res.1, %lv
ret i32 %res
@@ -36,16 +36,16 @@ define i32 @test_load_with_acquire_load() {
define i32 @test_load_with_seq_cst_load() {
; CHECK-LABEL: @test_load_with_seq_cst_load(
-; CHECK-NEXT: [[L1:%.*]] = load atomic i32, i32* @w seq_cst, align 4
-; CHECK-NEXT: [[LV:%.*]] = load volatile i32, i32* @u, align 4
-; CHECK-NEXT: [[L2:%.*]] = load atomic i32, i32* @w seq_cst, align 4
+; CHECK-NEXT: [[L1:%.*]] = load atomic i32, ptr @w seq_cst, align 4
+; CHECK-NEXT: [[LV:%.*]] = load volatile i32, ptr @u, align 4
+; CHECK-NEXT: [[L2:%.*]] = load atomic i32, ptr @w seq_cst, align 4
; CHECK-NEXT: [[RES_1:%.*]] = sub i32 [[L1]], [[L2]]
; CHECK-NEXT: [[RES:%.*]] = add i32 [[RES_1]], [[LV]]
; CHECK-NEXT: ret i32 [[RES]]
;
- %l1 = load atomic i32, i32* @w seq_cst, align 4
- %lv = load volatile i32, i32* @u, align 4
- %l2 = load atomic i32, i32* @w seq_cst, align 4
+ %l1 = load atomic i32, ptr @w seq_cst, align 4
+ %lv = load volatile i32, ptr @u, align 4
+ %l2 = load atomic i32, ptr @w seq_cst, align 4
%res.1 = sub i32 %l1, %l2
%res = add i32 %res.1, %lv
ret i32 %res
@@ -53,42 +53,42 @@ define i32 @test_load_with_seq_cst_load() {
define i32 @test_store(i32 %x) {
; CHECK-LABEL: @test_store(
-; CHECK-NEXT: store volatile i32 [[X:%.*]], i32* @u, align 4
+; CHECK-NEXT: store volatile i32 [[X:%.*]], ptr @u, align 4
; CHECK-NEXT: ret i32 0
;
- %l1 = load atomic i32, i32* @w unordered, align 4
- store volatile i32 %x, i32* @u, align 4
- %l2 = load atomic i32, i32* @w unordered, align 4
+ %l1 = load atomic i32, ptr @w unordered, align 4
+ store volatile i32 %x, ptr @u, align 4
+ %l2 = load atomic i32, ptr @w unordered, align 4
%res = sub i32 %l1, %l2
ret i32 %res
}
define i32 @test_store_with_acquire_load(i32 %x) {
; CHECK-LABEL: @test_store_with_acquire_load(
-; CHECK-NEXT: [[L1:%.*]] = load atomic i32, i32* @w acquire, align 4
-; CHECK-NEXT: store volatile i32 [[X:%.*]], i32* @u, align 4
-; CHECK-NEXT: [[L2:%.*]] = load atomic i32, i32* @w acquire, align 4
+; CHECK-NEXT: [[L1:%.*]] = load atomic i32, ptr @w acquire, align 4
+; CHECK-NEXT: store volatile i32 [[X:%.*]], ptr @u, align 4
+; CHECK-NEXT: [[L2:%.*]] = load atomic i32, ptr @w acquire, align 4
; CHECK-NEXT: [[RES:%.*]] = sub i32 [[L1]], [[L2]]
; CHECK-NEXT: ret i32 [[RES]]
;
- %l1 = load atomic i32, i32* @w acquire, align 4
- store volatile i32 %x, i32* @u, align 4
- %l2 = load atomic i32, i32* @w acquire, align 4
+ %l1 = load atomic i32, ptr @w acquire, align 4
+ store volatile i32 %x, ptr @u, align 4
+ %l2 = load atomic i32, ptr @w acquire, align 4
%res = sub i32 %l1, %l2
ret i32 %res
}
define i32 @test_store_with_seq_cst_load(i32 %x) {
; CHECK-LABEL: @test_store_with_seq_cst_load(
-; CHECK-NEXT: [[L1:%.*]] = load atomic i32, i32* @w seq_cst, align 4
-; CHECK-NEXT: store volatile i32 [[X:%.*]], i32* @u, align 4
-; CHECK-NEXT: [[L2:%.*]] = load atomic i32, i32* @w seq_cst, align 4
+; CHECK-NEXT: [[L1:%.*]] = load atomic i32, ptr @w seq_cst, align 4
+; CHECK-NEXT: store volatile i32 [[X:%.*]], ptr @u, align 4
+; CHECK-NEXT: [[L2:%.*]] = load atomic i32, ptr @w seq_cst, align 4
; CHECK-NEXT: [[RES:%.*]] = sub i32 [[L1]], [[L2]]
; CHECK-NEXT: ret i32 [[RES]]
;
- %l1 = load atomic i32, i32* @w seq_cst, align 4
- store volatile i32 %x, i32* @u, align 4
- %l2 = load atomic i32, i32* @w seq_cst, align 4
+ %l1 = load atomic i32, ptr @w seq_cst, align 4
+ store volatile i32 %x, ptr @u, align 4
+ %l2 = load atomic i32, ptr @w seq_cst, align 4
%res = sub i32 %l1, %l2
ret i32 %res
}
More information about the llvm-commits
mailing list