[llvm] 60147c6 - [EarlyCSE] Regenerate test checks (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 20 05:49:46 PST 2022


Author: Nikita Popov
Date: 2022-01-20T14:49:26+01:00
New Revision: 60147c6034e32f687ef82bafe3f0d7fdf451072a

URL: https://github.com/llvm/llvm-project/commit/60147c6034e32f687ef82bafe3f0d7fdf451072a
DIFF: https://github.com/llvm/llvm-project/commit/60147c6034e32f687ef82bafe3f0d7fdf451072a.diff

LOG: [EarlyCSE] Regenerate test checks (NFC)

Added: 
    

Modified: 
    llvm/test/Transforms/EarlyCSE/atomics.ll
    llvm/test/Transforms/EarlyCSE/basic.ll
    llvm/test/Transforms/EarlyCSE/const-speculation.ll
    llvm/test/Transforms/EarlyCSE/floatingpoint.ll
    llvm/test/Transforms/EarlyCSE/memoryssa.ll
    llvm/test/Transforms/EarlyCSE/pr33406.ll
    llvm/test/Transforms/EarlyCSE/readnone-mayunwind.ll
    llvm/test/Transforms/EarlyCSE/writeonly.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/EarlyCSE/atomics.ll b/llvm/test/Transforms/EarlyCSE/atomics.ll
index 4a4b76666344a..4d67858237bc9 100644
--- a/llvm/test/Transforms/EarlyCSE/atomics.ll
+++ b/llvm/test/Transforms/EarlyCSE/atomics.ll
@@ -1,71 +1,86 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -S -early-cse -earlycse-debug-hash | FileCheck %s
 ; RUN: opt < %s -S -basic-aa -early-cse-memssa | FileCheck %s
 
-; CHECK-LABEL: @test12(
 define i32 @test12(i1 %B, i32* %P1, i32* %P2) {
+; CHECK-LABEL: @test12(
+; CHECK-NEXT:    [[LOAD0:%.*]] = load i32, i32* [[P1:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i32, i32* [[P2:%.*]] seq_cst, align 4
+; CHECK-NEXT:    [[LOAD1:%.*]] = load i32, i32* [[P1]], align 4
+; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[B:%.*]], i32 [[LOAD0]], i32 [[LOAD1]]
+; CHECK-NEXT:    ret i32 [[SEL]]
+;
   %load0 = load i32, i32* %P1
   %1 = load atomic i32, i32* %P2 seq_cst, align 4
   %load1 = load i32, i32* %P1
   %sel = select i1 %B, i32 %load0, i32 %load1
   ret i32 %sel
-  ; CHECK: load i32, i32* %P1
-  ; CHECK: load i32, i32* %P1
 }
 
-; CHECK-LABEL: @test13(
 ; atomic to non-atomic forwarding is legal
 define i32 @test13(i1 %B, i32* %P1) {
+; CHECK-LABEL: @test13(
+; CHECK-NEXT:    [[A:%.*]] = load atomic i32, i32* [[P1:%.*]] seq_cst, align 4
+; CHECK-NEXT:    ret i32 0
+;
   %a = load atomic i32, i32* %P1 seq_cst, align 4
   %b = load i32, i32* %P1
   %res = sub i32 %a, %b
   ret i32 %res
-  ; CHECK: load atomic i32, i32* %P1
-  ; CHECK: ret i32 0
 }
 
-; CHECK-LABEL: @test14(
 ; atomic to unordered atomic forwarding is legal
 define i32 @test14(i1 %B, i32* %P1) {
+; CHECK-LABEL: @test14(
+; CHECK-NEXT:    [[A:%.*]] = load atomic i32, i32* [[P1:%.*]] seq_cst, align 4
+; CHECK-NEXT:    ret i32 0
+;
   %a = load atomic i32, i32* %P1 seq_cst, align 4
   %b = load atomic i32, i32* %P1 unordered, align 4
   %res = sub i32 %a, %b
   ret i32 %res
-  ; CHECK: load atomic i32, i32* %P1 seq_cst
-  ; CHECK-NEXT: ret i32 0
 }
 
-; CHECK-LABEL: @test15(
 ; implementation restriction: can't forward to stonger
 ; than unordered
 define i32 @test15(i1 %B, i32* %P1, i32* %P2) {
+; CHECK-LABEL: @test15(
+; CHECK-NEXT:    [[A:%.*]] = load atomic i32, i32* [[P1:%.*]] seq_cst, align 4
+; CHECK-NEXT:    [[B:%.*]] = load atomic i32, i32* [[P1]] seq_cst, align 4
+; CHECK-NEXT:    [[RES:%.*]] = sub i32 [[A]], [[B]]
+; CHECK-NEXT:    ret i32 [[RES]]
+;
   %a = load atomic i32, i32* %P1 seq_cst, align 4
   %b = load atomic i32, i32* %P1 seq_cst, align 4
   %res = sub i32 %a, %b
   ret i32 %res
-  ; CHECK: load atomic i32, i32* %P1
-  ; CHECK: load atomic i32, i32* %P1
 }
 
-; CHECK-LABEL: @test16(
 ; forwarding non-atomic to atomic is wrong! (However,
 ; it would be legal to use the later value in place of the
 ; former in this particular example.  We just don't
 ; do that right now.)
 define i32 @test16(i1 %B, i32* %P1, i32* %P2) {
+; CHECK-LABEL: @test16(
+; CHECK-NEXT:    [[A:%.*]] = load i32, i32* [[P1:%.*]], align 4
+; CHECK-NEXT:    [[B:%.*]] = load atomic i32, i32* [[P1]] unordered, align 4
+; CHECK-NEXT:    [[RES:%.*]] = sub i32 [[A]], [[B]]
+; CHECK-NEXT:    ret i32 [[RES]]
+;
   %a = load i32, i32* %P1, align 4
   %b = load atomic i32, i32* %P1 unordered, align 4
   %res = sub i32 %a, %b
   ret i32 %res
-  ; CHECK: load i32, i32* %P1
-  ; CHECK: load atomic i32, i32* %P1
 }
 
 ; Can't DSE across a full fence
 define void @fence_seq_cst_store(i1 %B, i32* %P1, i32* %P2) {
-; CHECK-LABEL: @fence_seq_cst_store
-; CHECK: store
-; CHECK: store atomic
-; CHECK: store
+; CHECK-LABEL: @fence_seq_cst_store(
+; CHECK-NEXT:    store i32 0, i32* [[P1:%.*]], align 4
+; CHECK-NEXT:    store atomic i32 0, i32* [[P2:%.*]] seq_cst, align 4
+; CHECK-NEXT:    store i32 0, i32* [[P1]], align 4
+; CHECK-NEXT:    ret void
+;
   store i32 0, i32* %P1, align 4
   store atomic i32 0, i32* %P2 seq_cst, align 4
   store i32 0, i32* %P1, align 4
@@ -74,10 +89,12 @@ define void @fence_seq_cst_store(i1 %B, i32* %P1, i32* %P2) {
 
 ; Can't DSE across a full fence
 define void @fence_seq_cst(i1 %B, i32* %P1, i32* %P2) {
-; CHECK-LABEL: @fence_seq_cst
-; CHECK: store
-; CHECK: fence seq_cst
-; CHECK: store
+; CHECK-LABEL: @fence_seq_cst(
+; CHECK-NEXT:    store i32 0, i32* [[P1:%.*]], align 4
+; CHECK-NEXT:    fence seq_cst
+; CHECK-NEXT:    store i32 0, i32* [[P1]], align 4
+; CHECK-NEXT:    ret void
+;
   store i32 0, i32* %P1, align 4
   fence seq_cst
   store i32 0, i32* %P1, align 4
@@ -86,10 +103,12 @@ define void @fence_seq_cst(i1 %B, i32* %P1, i32* %P2) {
 
 ; Can't DSE across a full fence
 define void @fence_asm_sideeffect(i1 %B, i32* %P1, i32* %P2) {
-; CHECK-LABEL: @fence_asm_sideeffect
-; CHECK: store
-; CHECK: call void asm sideeffect
-; CHECK: store
+; CHECK-LABEL: @fence_asm_sideeffect(
+; CHECK-NEXT:    store i32 0, i32* [[P1:%.*]], align 4
+; CHECK-NEXT:    call void asm sideeffect "", ""()
+; CHECK-NEXT:    store i32 0, i32* [[P1]], align 4
+; CHECK-NEXT:    ret void
+;
   store i32 0, i32* %P1, align 4
   call void asm sideeffect "", ""()
   store i32 0, i32* %P1, align 4
@@ -98,10 +117,12 @@ define void @fence_asm_sideeffect(i1 %B, i32* %P1, i32* %P2) {
 
 ; Can't DSE across a full fence
 define void @fence_asm_memory(i1 %B, i32* %P1, i32* %P2) {
-; CHECK-LABEL: @fence_asm_memory
-; CHECK: store
-; CHECK: call void asm
-; CHECK: store
+; CHECK-LABEL: @fence_asm_memory(
+; CHECK-NEXT:    store i32 0, i32* [[P1:%.*]], align 4
+; CHECK-NEXT:    call void asm "", "~{memory}"()
+; CHECK-NEXT:    store i32 0, i32* [[P1]], align 4
+; CHECK-NEXT:    ret void
+;
   store i32 0, i32* %P1, align 4
   call void asm "", "~{memory}"()
   store i32 0, i32* %P1, align 4
@@ -110,32 +131,39 @@ define void @fence_asm_memory(i1 %B, i32* %P1, i32* %P2) {
 
 ; Can't remove a volatile load
 define i32 @volatile_load(i1 %B, i32* %P1, i32* %P2) {
+; CHECK-LABEL: @volatile_load(
+; CHECK-NEXT:    [[A:%.*]] = load i32, i32* [[P1:%.*]], align 4
+; CHECK-NEXT:    [[B:%.*]] = load volatile i32, i32* [[P1]], align 4
+; CHECK-NEXT:    [[RES:%.*]] = sub i32 [[A]], [[B]]
+; CHECK-NEXT:    ret i32 [[RES]]
+;
   %a = load i32, i32* %P1, align 4
   %b = load volatile i32, i32* %P1, align 4
   %res = sub i32 %a, %b
   ret i32 %res
-  ; CHECK-LABEL: @volatile_load
-  ; CHECK: load i32, i32* %P1
-  ; CHECK: load volatile i32, i32* %P1
 }
 
 ; Can't remove redundant volatile loads
 define i32 @redundant_volatile_load(i1 %B, i32* %P1, i32* %P2) {
+; CHECK-LABEL: @redundant_volatile_load(
+; CHECK-NEXT:    [[A:%.*]] = load volatile i32, i32* [[P1:%.*]], align 4
+; CHECK-NEXT:    [[B:%.*]] = load volatile i32, i32* [[P1]], align 4
+; CHECK-NEXT:    [[RES:%.*]] = sub i32 [[A]], [[B]]
+; CHECK-NEXT:    ret i32 [[RES]]
+;
   %a = load volatile i32, i32* %P1, align 4
   %b = load volatile i32, i32* %P1, align 4
   %res = sub i32 %a, %b
   ret i32 %res
-  ; CHECK-LABEL: @redundant_volatile_load
-  ; CHECK: load volatile i32, i32* %P1
-  ; CHECK: load volatile i32, i32* %P1
-  ; CHECK: sub
 }
 
 ; Can't DSE a volatile store
 define void @volatile_store(i1 %B, i32* %P1, i32* %P2) {
-; CHECK-LABEL: @volatile_store
-; CHECK: store volatile
-; CHECK: store
+; CHECK-LABEL: @volatile_store(
+; CHECK-NEXT:    store volatile i32 0, i32* [[P1:%.*]], align 4
+; CHECK-NEXT:    store i32 3, i32* [[P1]], align 4
+; CHECK-NEXT:    ret void
+;
   store volatile i32 0, i32* %P1, align 4
   store i32 3, i32* %P1, align 4
   ret void
@@ -143,9 +171,11 @@ define void @volatile_store(i1 %B, i32* %P1, i32* %P2) {
 
 ; Can't DSE a redundant volatile store
 define void @redundant_volatile_store(i1 %B, i32* %P1, i32* %P2) {
-; CHECK-LABEL: @redundant_volatile_store
-; CHECK: store volatile
-; CHECK: store volatile
+; CHECK-LABEL: @redundant_volatile_store(
+; CHECK-NEXT:    store volatile i32 0, i32* [[P1:%.*]], align 4
+; CHECK-NEXT:    store volatile i32 0, i32* [[P1]], align 4
+; CHECK-NEXT:    ret void
+;
   store volatile i32 0, i32* %P1, align 4
   store volatile i32 0, i32* %P1, align 4
   ret void
@@ -153,21 +183,24 @@ define void @redundant_volatile_store(i1 %B, i32* %P1, i32* %P2) {
 
 ; Can value forward from volatiles
 define i32 @test20(i1 %B, i32* %P1, i32* %P2) {
+; CHECK-LABEL: @test20(
+; CHECK-NEXT:    [[A:%.*]] = load volatile i32, i32* [[P1:%.*]], align 4
+; CHECK-NEXT:    ret i32 0
+;
   %a = load volatile i32, i32* %P1, align 4
   %b = load i32, i32* %P1, align 4
   %res = sub i32 %a, %b
   ret i32 %res
-  ; CHECK-LABEL: @test20
-  ; CHECK: load volatile i32, i32* %P1
-  ; CHECK: ret i32 0
 }
 
 ; Can DSE a non-volatile store in favor of a volatile one
 ; currently a missed optimization
 define void @test21(i1 %B, i32* %P1, i32* %P2) {
-; CHECK-LABEL: @test21
-; CHECK: store 
-; CHECK: store volatile
+; CHECK-LABEL: @test21(
+; CHECK-NEXT:    store i32 0, i32* [[P1:%.*]], align 4
+; CHECK-NEXT:    store volatile i32 3, i32* [[P1]], align 4
+; CHECK-NEXT:    ret void
+;
   store i32 0, i32* %P1, align 4
   store volatile i32 3, i32* %P1, align 4
   ret void
@@ -175,8 +208,10 @@ define void @test21(i1 %B, i32* %P1, i32* %P2) {
 
 ; Can DSE a normal store in favor of a unordered one
 define void @test22(i1 %B, i32* %P1, i32* %P2) {
-; CHECK-LABEL: @test22
-; CHECK-NEXT: store atomic
+; CHECK-LABEL: @test22(
+; CHECK-NEXT:    store atomic i32 3, i32* [[P1:%.*]] unordered, align 4
+; CHECK-NEXT:    ret void
+;
   store i32 0, i32* %P1, align 4
   store atomic i32 3, i32* %P1 unordered, align 4
   ret void
@@ -184,8 +219,10 @@ define void @test22(i1 %B, i32* %P1, i32* %P2) {
 
 ; Can also DSE a unordered store in favor of a normal one
 define void @test23(i1 %B, i32* %P1, i32* %P2) {
-; CHECK-LABEL: @test23
-; CHECK-NEXT: store i32 0
+; CHECK-LABEL: @test23(
+; CHECK-NEXT:    store i32 0, i32* [[P1:%.*]], align 4
+; CHECK-NEXT:    ret void
+;
   store atomic i32 3, i32* %P1 unordered, align 4
   store i32 0, i32* %P1, align 4
   ret void
@@ -195,9 +232,11 @@ define void @test23(i1 %B, i32* %P1, i32* %P2) {
 ; Note that we could remove the earlier store if we could
 ; represent the required ordering.
 define void @test24(i1 %B, i32* %P1, i32* %P2) {
-; CHECK-LABEL: @test24
-; CHECK-NEXT: store atomic
-; CHECK-NEXT: store i32 0
+; CHECK-LABEL: @test24(
+; CHECK-NEXT:    store atomic i32 3, i32* [[P1:%.*]] release, align 4
+; CHECK-NEXT:    store i32 0, i32* [[P1]], align 4
+; CHECK-NEXT:    ret void
+;
   store atomic i32 3, i32* %P1 release, align 4
   store i32 0, i32* %P1, align 4
   ret void
@@ -206,9 +245,11 @@ define void @test24(i1 %B, i32* %P1, i32* %P2) {
 ; Can't remove volatile stores - each is independently observable and
 ; the count of such stores is an observable program side effect.
 define void @test25(i1 %B, i32* %P1, i32* %P2) {
-; CHECK-LABEL: @test25
-; CHECK-NEXT: store volatile
-; CHECK-NEXT: store volatile
+; CHECK-LABEL: @test25(
+; CHECK-NEXT:    store volatile i32 3, i32* [[P1:%.*]], align 4
+; CHECK-NEXT:    store volatile i32 0, i32* [[P1]], align 4
+; CHECK-NEXT:    ret void
+;
   store volatile i32 3, i32* %P1, align 4
   store volatile i32 0, i32* %P1, align 4
   ret void
@@ -216,9 +257,10 @@ define void @test25(i1 %B, i32* %P1, i32* %P2) {
 
 ; Can DSE a unordered store in favor of a unordered one
 define void @test26(i1 %B, i32* %P1, i32* %P2) {
-; CHECK-LABEL: @test26
-; CHECK-NEXT: store atomic i32 3, i32* %P1 unordered, align 4
-; CHECK-NEXT: ret
+; CHECK-LABEL: @test26(
+; CHECK-NEXT:    store atomic i32 3, i32* [[P1:%.*]] unordered, align 4
+; CHECK-NEXT:    ret void
+;
   store atomic i32 0, i32* %P1 unordered, align 4
   store atomic i32 3, i32* %P1 unordered, align 4
   ret void
@@ -227,10 +269,11 @@ define void @test26(i1 %B, i32* %P1, i32* %P2) {
 ; Can DSE a unordered store in favor of a ordered one,
 ; but current don't due to implementation limits
 define void @test27(i1 %B, i32* %P1, i32* %P2) {
-; CHECK-LABEL: @test27
-; CHECK-NEXT: store atomic i32 0, i32* %P1 unordered, align 4
-; CHECK-NEXT: store atomic i32 3, i32* %P1 release, align 4
-; CHECK-NEXT: ret
+; CHECK-LABEL: @test27(
+; CHECK-NEXT:    store atomic i32 0, i32* [[P1:%.*]] unordered, align 4
+; CHECK-NEXT:    store atomic i32 3, i32* [[P1]] release, align 4
+; CHECK-NEXT:    ret void
+;
   store atomic i32 0, i32* %P1 unordered, align 4
   store atomic i32 3, i32* %P1 release, align 4
   ret void
@@ -239,10 +282,11 @@ define void @test27(i1 %B, i32* %P1, i32* %P2) {
 ; Can DSE an unordered atomic store in favor of an
 ; ordered one, but current don't due to implementation limits
 define void @test28(i1 %B, i32* %P1, i32* %P2) {
-; CHECK-LABEL: @test28
-; CHECK-NEXT: store atomic i32 0, i32* %P1 unordered, align 4
-; CHECK-NEXT: store atomic i32 3, i32* %P1 release, align 4
-; CHECK-NEXT: ret
+; CHECK-LABEL: @test28(
+; CHECK-NEXT:    store atomic i32 0, i32* [[P1:%.*]] unordered, align 4
+; CHECK-NEXT:    store atomic i32 3, i32* [[P1]] release, align 4
+; CHECK-NEXT:    ret void
+;
   store atomic i32 0, i32* %P1 unordered, align 4
   store atomic i32 3, i32* %P1 release, align 4
   ret void
@@ -251,9 +295,11 @@ define void @test28(i1 %B, i32* %P1, i32* %P2) {
 ; As an implementation limitation, can't remove ordered stores
 ; see also: @test24
 define void @test29(i1 %B, i32* %P1, i32* %P2) {
-; CHECK-LABEL: @test29
-; CHECK-NEXT: store atomic
-; CHECK-NEXT: store atomic
+; CHECK-LABEL: @test29(
+; CHECK-NEXT:    store atomic i32 3, i32* [[P1:%.*]] release, align 4
+; CHECK-NEXT:    store atomic i32 0, i32* [[P1]] unordered, align 4
+; CHECK-NEXT:    ret void
+;
   store atomic i32 3, i32* %P1 release, align 4
   store atomic i32 0, i32* %P1 unordered, align 4
   ret void

diff  --git a/llvm/test/Transforms/EarlyCSE/basic.ll b/llvm/test/Transforms/EarlyCSE/basic.ll
index 5178e5a89e205..df4c5c6c13ac3 100644
--- a/llvm/test/Transforms/EarlyCSE/basic.ll
+++ b/llvm/test/Transforms/EarlyCSE/basic.ll
@@ -1,62 +1,78 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -S -early-cse -earlycse-debug-hash | FileCheck %s
 ; RUN: opt < %s -S -basic-aa -early-cse-memssa | FileCheck %s
 ; RUN: opt < %s -S -passes=early-cse | FileCheck %s
 
 declare void @llvm.assume(i1) nounwind
 
-; CHECK-LABEL: @test1(
 define void @test1(i8 %V, i32 *%P) {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT:    store i32 23, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[C:%.*]] = zext i8 [[V:%.*]] to i32
+; CHECK-NEXT:    store volatile i32 [[C]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[C]], i32* [[P]], align 4
+; CHECK-NEXT:    [[E:%.*]] = add i32 [[C]], [[C]]
+; CHECK-NEXT:    store volatile i32 [[E]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[E]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[E]], i32* [[P]], align 4
+; CHECK-NEXT:    ret void
+;
   %A = bitcast i64 42 to double  ;; dead
   %B = add i32 4, 19             ;; constant folds
   store i32 %B, i32* %P
-  ; CHECK-NEXT: store i32 23, i32* %P
-  
+
   %C = zext i8 %V to i32
   %D = zext i8 %V to i32  ;; CSE
   store volatile i32 %C, i32* %P
   store volatile i32 %D, i32* %P
-  ; CHECK-NEXT: %C = zext i8 %V to i32
-  ; CHECK-NEXT: store volatile i32 %C
-  ; CHECK-NEXT: store volatile i32 %C
-  
+
   %E = add i32 %C, %C
   %F = add i32 %C, %C
   store volatile i32 %E, i32* %P
   store volatile i32 %F, i32* %P
-  ; CHECK-NEXT: %E = add i32 %C, %C
-  ; CHECK-NEXT: store volatile i32 %E
-  ; CHECK-NEXT: store volatile i32 %E
 
   %G = add nuw i32 %C, %C
   store volatile i32 %G, i32* %P
-  ; CHECK-NEXT: store volatile i32 %E
   ret void
 }
 
 
 ;; Simple load value numbering.
-; CHECK-LABEL: @test2(
 define i32 @test2(i32 *%P) {
+; CHECK-LABEL: @test2(
+; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    ret i32 0
+;
   %V1 = load i32, i32* %P
   %V2 = load i32, i32* %P
   %Diff = sub i32 %V1, %V2
   ret i32 %Diff
-  ; CHECK: ret i32 0
 }
 
-; CHECK-LABEL: @test2a(
 define i32 @test2a(i32 *%P, i1 %b) {
+; CHECK-LABEL: @test2a(
+; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[B:%.*]])
+; CHECK-NEXT:    ret i32 0
+;
   %V1 = load i32, i32* %P
   tail call void @llvm.assume(i1 %b)
   %V2 = load i32, i32* %P
   %Diff = sub i32 %V1, %V2
   ret i32 %Diff
-  ; CHECK: ret i32 0
 }
 
 ;; Cross block load value numbering.
-; CHECK-LABEL: @test3(
 define i32 @test3(i32 *%P, i1 %Cond) {
+; CHECK-LABEL: @test3(
+; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
+; CHECK:       T:
+; CHECK-NEXT:    store i32 4, i32* [[P]], align 4
+; CHECK-NEXT:    ret i32 42
+; CHECK:       F:
+; CHECK-NEXT:    ret i32 0
+;
   %V1 = load i32, i32* %P
   br i1 %Cond, label %T, label %F
 T:
@@ -66,12 +82,19 @@ F:
   %V2 = load i32, i32* %P
   %Diff = sub i32 %V1, %V2
   ret i32 %Diff
-  ; CHECK: F:
-  ; CHECK: ret i32 0
 }
 
-; CHECK-LABEL: @test3a(
 define i32 @test3a(i32 *%P, i1 %Cond, i1 %b) {
+; CHECK-LABEL: @test3a(
+; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
+; CHECK:       T:
+; CHECK-NEXT:    store i32 4, i32* [[P]], align 4
+; CHECK-NEXT:    ret i32 42
+; CHECK:       F:
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[B:%.*]])
+; CHECK-NEXT:    ret i32 0
+;
   %V1 = load i32, i32* %P
   br i1 %Cond, label %T, label %F
 T:
@@ -82,13 +105,20 @@ F:
   %V2 = load i32, i32* %P
   %Diff = sub i32 %V1, %V2
   ret i32 %Diff
-  ; CHECK: F:
-  ; CHECK: ret i32 0
 }
 
 ;; Cross block load value numbering stops when stores happen.
-; CHECK-LABEL: @test4(
 define i32 @test4(i32 *%P, i1 %Cond) {
+; CHECK-LABEL: @test4(
+; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
+; CHECK:       T:
+; CHECK-NEXT:    ret i32 42
+; CHECK:       F:
+; CHECK-NEXT:    store i32 42, i32* [[P]], align 4
+; CHECK-NEXT:    [[DIFF:%.*]] = sub i32 [[V1]], 42
+; CHECK-NEXT:    ret i32 [[DIFF]]
+;
   %V1 = load i32, i32* %P
   br i1 %Cond, label %T, label %F
 T:
@@ -96,142 +126,166 @@ T:
 F:
   ; Clobbers V1
   store i32 42, i32* %P
-  
+
   %V2 = load i32, i32* %P
   %Diff = sub i32 %V1, %V2
   ret i32 %Diff
-  ; CHECK: F:
-  ; CHECK: ret i32 %Diff
 }
 
 declare i32 @func(i32 *%P) readonly
 
 ;; Simple call CSE'ing.
-; CHECK-LABEL: @test5(
 define i32 @test5(i32 *%P) {
+; CHECK-LABEL: @test5(
+; CHECK-NEXT:    [[V1:%.*]] = call i32 @func(i32* [[P:%.*]])
+; CHECK-NEXT:    ret i32 0
+;
   %V1 = call i32 @func(i32* %P)
   %V2 = call i32 @func(i32* %P)
   %Diff = sub i32 %V1, %V2
   ret i32 %Diff
-  ; CHECK: ret i32 0
 }
 
 ;; Trivial Store->load forwarding
-; CHECK-LABEL: @test6(
 define i32 @test6(i32 *%P) {
+; CHECK-LABEL: @test6(
+; CHECK-NEXT:    store i32 42, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    ret i32 42
+;
   store i32 42, i32* %P
   %V1 = load i32, i32* %P
   ret i32 %V1
-  ; CHECK: ret i32 42
 }
 
-; CHECK-LABEL: @test6a(
 define i32 @test6a(i32 *%P, i1 %b) {
+; CHECK-LABEL: @test6a(
+; CHECK-NEXT:    store i32 42, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[B:%.*]])
+; CHECK-NEXT:    ret i32 42
+;
   store i32 42, i32* %P
   tail call void @llvm.assume(i1 %b)
   %V1 = load i32, i32* %P
   ret i32 %V1
-  ; CHECK: ret i32 42
 }
 
 ;; Trivial dead store elimination.
-; CHECK-LABEL: @test7(
 define void @test7(i32 *%P) {
+; CHECK-LABEL: @test7(
+; CHECK-NEXT:    store i32 45, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    ret void
+;
   store i32 42, i32* %P
   store i32 45, i32* %P
   ret void
-  ; CHECK-NEXT: store i32 45
-  ; CHECK-NEXT: ret void
 }
 
 ;; Readnone functions aren't invalidated by stores.
-; CHECK-LABEL: @test8(
 define i32 @test8(i32 *%P) {
+; CHECK-LABEL: @test8(
+; CHECK-NEXT:    [[V1:%.*]] = call i32 @func(i32* [[P:%.*]]) #[[ATTR2:[0-9]+]]
+; CHECK-NEXT:    store i32 4, i32* [[P]], align 4
+; CHECK-NEXT:    ret i32 0
+;
   %V1 = call i32 @func(i32* %P) readnone
   store i32 4, i32* %P
   %V2 = call i32 @func(i32* %P) readnone
   %Diff = sub i32 %V1, %V2
   ret i32 %Diff
-  ; CHECK: ret i32 0
 }
 
 ;; Trivial DSE can't be performed across a readonly call.  The call
 ;; can observe the earlier write.
-; CHECK-LABEL: @test9(
 define i32 @test9(i32 *%P) {
+; CHECK-LABEL: @test9(
+; CHECK-NEXT:    store i32 4, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[V1:%.*]] = call i32 @func(i32* [[P]]) #[[ATTR1:[0-9]+]]
+; CHECK-NEXT:    store i32 5, i32* [[P]], align 4
+; CHECK-NEXT:    ret i32 [[V1]]
+;
   store i32 4, i32* %P
   %V1 = call i32 @func(i32* %P) readonly
-  store i32 5, i32* %P        
+  store i32 5, i32* %P
   ret i32 %V1
-  ; CHECK: store i32 4, i32* %P        
-  ; CHECK-NEXT: %V1 = call i32 @func(i32* %P)
-  ; CHECK-NEXT: store i32 5, i32* %P        
-  ; CHECK-NEXT: ret i32 %V1
 }
 
 ;; Trivial DSE can be performed across a readnone call.
-; CHECK-LABEL: @test10
 define i32 @test10(i32 *%P) {
+; CHECK-LABEL: @test10(
+; CHECK-NEXT:    [[V1:%.*]] = call i32 @func(i32* [[P:%.*]]) #[[ATTR2]]
+; CHECK-NEXT:    store i32 5, i32* [[P]], align 4
+; CHECK-NEXT:    ret i32 [[V1]]
+;
   store i32 4, i32* %P
   %V1 = call i32 @func(i32* %P) readnone
-  store i32 5, i32* %P        
+  store i32 5, i32* %P
   ret i32 %V1
-  ; CHECK-NEXT: %V1 = call i32 @func(i32* %P)
-  ; CHECK-NEXT: store i32 5, i32* %P        
-  ; CHECK-NEXT: ret i32 %V1
 }
 
 ;; Trivial dead store elimination - should work for an entire series of dead stores too.
-; CHECK-LABEL: @test11(
 define void @test11(i32 *%P) {
+; CHECK-LABEL: @test11(
+; CHECK-NEXT:    store i32 45, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    ret void
+;
   store i32 42, i32* %P
   store i32 43, i32* %P
   store i32 44, i32* %P
   store i32 45, i32* %P
   ret void
-  ; CHECK-NEXT: store i32 45
-  ; CHECK-NEXT: ret void
 }
 
-; CHECK-LABEL: @test12(
 define i32 @test12(i1 %B, i32* %P1, i32* %P2) {
+; CHECK-LABEL: @test12(
+; CHECK-NEXT:    [[LOAD0:%.*]] = load i32, i32* [[P1:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i32, i32* [[P2:%.*]] seq_cst, align 4
+; CHECK-NEXT:    [[LOAD1:%.*]] = load i32, i32* [[P1]], align 4
+; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[B:%.*]], i32 [[LOAD0]], i32 [[LOAD1]]
+; CHECK-NEXT:    ret i32 [[SEL]]
+;
   %load0 = load i32, i32* %P1
   %1 = load atomic i32, i32* %P2 seq_cst, align 4
   %load1 = load i32, i32* %P1
   %sel = select i1 %B, i32 %load0, i32 %load1
   ret i32 %sel
-  ; CHECK: load i32, i32* %P1
-  ; CHECK: load i32, i32* %P1
 }
 
 define void @dse1(i32 *%P) {
-; CHECK-LABEL: @dse1
-; CHECK-NOT: store
+; CHECK-LABEL: @dse1(
+; CHECK-NEXT:    [[V:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    ret void
+;
   %v = load i32, i32* %P
   store i32 %v, i32* %P
   ret void
 }
 
 define void @dse2(i32 *%P) {
-; CHECK-LABEL: @dse2
-; CHECK-NOT: store
+; CHECK-LABEL: @dse2(
+; CHECK-NEXT:    [[V:%.*]] = load atomic i32, i32* [[P:%.*]] seq_cst, align 4
+; CHECK-NEXT:    ret void
+;
   %v = load atomic i32, i32* %P seq_cst, align 4
   store i32 %v, i32* %P
   ret void
 }
 
 define void @dse3(i32 *%P) {
-; CHECK-LABEL: @dse3
-; CHECK-NOT: store
+; CHECK-LABEL: @dse3(
+; CHECK-NEXT:    [[V:%.*]] = load atomic i32, i32* [[P:%.*]] seq_cst, align 4
+; CHECK-NEXT:    ret void
+;
   %v = load atomic i32, i32* %P seq_cst, align 4
   store atomic i32 %v, i32* %P unordered, align 4
   ret void
 }
 
 define i32 @dse4(i32 *%P, i32 *%Q) {
-; CHECK-LABEL: @dse4
-; CHECK-NOT: store
-; CHECK: ret i32 0
+; CHECK-LABEL: @dse4(
+; CHECK-NEXT:    [[A:%.*]] = load i32, i32* [[Q:%.*]], align 4
+; CHECK-NEXT:    [[V:%.*]] = load atomic i32, i32* [[P:%.*]] unordered, align 4
+; CHECK-NEXT:    ret i32 0
+;
   %a = load i32, i32* %Q
   %v = load atomic i32, i32* %P unordered, align 4
   store atomic i32 %v, i32* %P unordered, align 4
@@ -242,14 +296,16 @@ define i32 @dse4(i32 *%P, i32 *%Q) {
 
 ; Note that in this example, %P and %Q could in fact be the same
 ; pointer.  %v could be 
diff erent than the value observed for %a
-; and that's okay because we're using relaxed memory ordering.  
-; The only guarantee we have to provide is that each of the loads 
-; has to observe some value written to that location.  We  do 
-; not have to respect the order in which those writes were done.  
+; and that's okay because we're using relaxed memory ordering.
+; The only guarantee we have to provide is that each of the loads
+; has to observe some value written to that location.  We  do
+; not have to respect the order in which those writes were done.
 define i32 @dse5(i32 *%P, i32 *%Q) {
-; CHECK-LABEL: @dse5
-; CHECK-NOT: store
-; CHECK: ret i32 0
+; CHECK-LABEL: @dse5(
+; CHECK-NEXT:    [[V:%.*]] = load atomic i32, i32* [[P:%.*]] unordered, align 4
+; CHECK-NEXT:    [[A:%.*]] = load atomic i32, i32* [[Q:%.*]] unordered, align 4
+; CHECK-NEXT:    ret i32 0
+;
   %v = load atomic i32, i32* %P unordered, align 4
   %a = load atomic i32, i32* %Q unordered, align 4
   store atomic i32 %v, i32* %P unordered, align 4
@@ -260,8 +316,10 @@ define i32 @dse5(i32 *%P, i32 *%Q) {
 
 
 define void @dse_neg1(i32 *%P) {
-; CHECK-LABEL: @dse_neg1
-; CHECK: store
+; CHECK-LABEL: @dse_neg1(
+; CHECK-NEXT:    store i32 5, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    ret void
+;
   %v = load i32, i32* %P
   store i32 5, i32* %P
   ret void
@@ -270,8 +328,11 @@ define void @dse_neg1(i32 *%P) {
 ; Could remove the store, but only if ordering was somehow
 ; encoded.
 define void @dse_neg2(i32 *%P) {
-; CHECK-LABEL: @dse_neg2
-; CHECK: store
+; CHECK-LABEL: @dse_neg2(
+; CHECK-NEXT:    [[V:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store atomic i32 [[V]], i32* [[P]] seq_cst, align 4
+; CHECK-NEXT:    ret void
+;
   %v = load i32, i32* %P
   store atomic i32 %v, i32* %P seq_cst, align 4
   ret void
@@ -280,11 +341,14 @@ define void @dse_neg2(i32 *%P) {
 @c = external global i32, align 4
 declare i32 @reads_c(i32 returned)
 define void @pr28763() {
-entry:
 ; CHECK-LABEL: @pr28763(
-; CHECK: store i32 0, i32* @c, align 4
-; CHECK: call i32 @reads_c(i32 0)
-; CHECK: store i32 2, i32* @c, align 4
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    store i32 0, i32* @c, align 4
+; CHECK-NEXT:    [[CALL:%.*]] = call i32 @reads_c(i32 0)
+; CHECK-NEXT:    store i32 2, i32* @c, align 4
+; CHECK-NEXT:    ret void
+;
+entry:
   %load = load i32, i32* @c, align 4
   store i32 0, i32* @c, align 4
   %call = call i32 @reads_c(i32 0)
@@ -293,10 +357,12 @@ entry:
 }
 
 define i1 @cse_freeze(i1 %a) {
-entry:
 ; CHECK-LABEL: @cse_freeze(
-; CHECK: %b = freeze i1 %a
-; CHECK: ret i1 %b
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[B:%.*]] = freeze i1 [[A:%.*]]
+; CHECK-NEXT:    ret i1 [[B]]
+;
+entry:
   %b = freeze i1 %a
   %c = freeze i1 %a
   %and = and i1 %b, %c

diff  --git a/llvm/test/Transforms/EarlyCSE/const-speculation.ll b/llvm/test/Transforms/EarlyCSE/const-speculation.ll
index a531c14da770c..bf4469ca37331 100644
--- a/llvm/test/Transforms/EarlyCSE/const-speculation.ll
+++ b/llvm/test/Transforms/EarlyCSE/const-speculation.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -early-cse -earlycse-debug-hash -S %s | FileCheck %s
 
 %mystruct = type { i32 }
@@ -15,15 +16,20 @@
 ; crash.
 
 define i1 @test_constant_speculation() {
-; CHECK-LABEL: define i1 @test_constant_speculation
+; CHECK-LABEL: @test_constant_speculation(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br i1 undef, label [[END:%.*]], label [[SELECT:%.*]]
+; CHECK:       select:
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       end:
+; CHECK-NEXT:    [[TMP:%.*]] = phi i32* [ null, [[ENTRY:%.*]] ], [ getelementptr inbounds ([[MYSTRUCT:%.*]], %mystruct* @var, i64 0, i32 0), [[SELECT]] ]
+; CHECK-NEXT:    [[RES:%.*]] = icmp eq i32* [[TMP]], null
+; CHECK-NEXT:    ret i1 [[RES]]
+;
 entry:
   br i1 undef, label %end, label %select
 
 select:
-; CHECK: select:
-; CHECK-NOT: icmp
-; CHECK-NOT: getelementptr
-; CHECK-NOT: select
 
   %tst = icmp eq i32 1, 0
   %elt = getelementptr %mystruct, %mystruct* @var, i64 0, i32 0
@@ -31,8 +37,6 @@ select:
   br label %end
 
 end:
-; CHECK: end:
-; CHECK: %tmp = phi i32* [ null, %entry ], [ getelementptr inbounds (%mystruct, %mystruct* @var, i64 0, i32 0), %select ]
   %tmp = phi i32* [null, %entry], [%sel, %select]
   %res = icmp eq i32* %tmp, null
   ret i1 %res

diff  --git a/llvm/test/Transforms/EarlyCSE/floatingpoint.ll b/llvm/test/Transforms/EarlyCSE/floatingpoint.ll
index a4293f5eed9c1..c7579adfdd3cd 100644
--- a/llvm/test/Transforms/EarlyCSE/floatingpoint.ll
+++ b/llvm/test/Transforms/EarlyCSE/floatingpoint.ll
@@ -1,27 +1,36 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -S -early-cse -earlycse-debug-hash | FileCheck %s
 ; RUN: opt < %s -S -basic-aa -early-cse-memssa | FileCheck %s
 
 ; Ensure we don't simplify away additions vectors of +0.0's (same as scalars).
 define <4 x float> @fV( <4 x float> %a) {
-       ; CHECK: %b = fadd <4 x float> %a, zeroinitializer
-       %b = fadd  <4 x float> %a, <float 0.0,float 0.0,float 0.0,float 0.0>
-       ret <4 x float> %b
+; CHECK-LABEL: @fV(
+; CHECK-NEXT:    [[B:%.*]] = fadd <4 x float> [[A:%.*]], zeroinitializer
+; CHECK-NEXT:    ret <4 x float> [[B]]
+;
+  %b = fadd  <4 x float> %a, <float 0.0,float 0.0,float 0.0,float 0.0>
+  ret <4 x float> %b
 }
 
 define <4 x float> @fW( <4 x float> %a) {
-       ; CHECK: ret <4 x float> %a
-       %b = fadd  <4 x float> %a, <float -0.0,float -0.0,float -0.0,float -0.0>
-       ret <4 x float> %b
+; CHECK-LABEL: @fW(
+; CHECK-NEXT:    ret <4 x float> [[A:%.*]]
+;
+  %b = fadd  <4 x float> %a, <float -0.0,float -0.0,float -0.0,float -0.0>
+  ret <4 x float> %b
 }
 
 ; CSE unary fnegs.
 define void @fX(<4 x float> *%p, <4 x float> %a) {
-       ; CHECK: %x = fneg <4 x float> %a
-       ; CHECK-NEXT: store volatile <4 x float> %x, <4 x float>* %p
-       ; CHECK-NEXT: store volatile <4 x float> %x, <4 x float>* %p
-       %x = fneg <4 x float> %a
-       %y = fneg <4 x float> %a
-       store volatile <4 x float> %x, <4 x float>* %p
-       store volatile <4 x float> %y, <4 x float>* %p
-       ret void
+; CHECK-LABEL: @fX(
+; CHECK-NEXT:    [[X:%.*]] = fneg <4 x float> [[A:%.*]]
+; CHECK-NEXT:    store volatile <4 x float> [[X]], <4 x float>* [[P:%.*]], align 16
+; CHECK-NEXT:    store volatile <4 x float> [[X]], <4 x float>* [[P]], align 16
+; CHECK-NEXT:    ret void
+;
+  %x = fneg <4 x float> %a
+  %y = fneg <4 x float> %a
+  store volatile <4 x float> %x, <4 x float>* %p
+  store volatile <4 x float> %y, <4 x float>* %p
+  ret void
 }

diff  --git a/llvm/test/Transforms/EarlyCSE/memoryssa.ll b/llvm/test/Transforms/EarlyCSE/memoryssa.ll
index 23c7137ca8c63..730e8104452dc 100644
--- a/llvm/test/Transforms/EarlyCSE/memoryssa.ll
+++ b/llvm/test/Transforms/EarlyCSE/memoryssa.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -S -early-cse -earlycse-debug-hash | FileCheck %s --check-prefix=CHECK-NOMEMSSA
 ; RUN: opt < %s -S -basic-aa -early-cse-memssa | FileCheck %s
 ; RUN: opt < %s -S -passes='early-cse' | FileCheck %s --check-prefix=CHECK-NOMEMSSA
@@ -8,61 +9,86 @@
 @G3 = global i32 zeroinitializer
 
 ;; Simple load value numbering across non-clobbering store.
-; CHECK-LABEL: @test1(
-; CHECK-NOMEMSSA-LABEL: @test1(
 define i32 @test1() {
+; CHECK-NOMEMSSA-LABEL: @test1(
+; CHECK-NOMEMSSA-NEXT:    [[V1:%.*]] = load i32, i32* @G1, align 4
+; CHECK-NOMEMSSA-NEXT:    store i32 0, i32* @G2, align 4
+; CHECK-NOMEMSSA-NEXT:    [[V2:%.*]] = load i32, i32* @G1, align 4
+; CHECK-NOMEMSSA-NEXT:    [[DIFF:%.*]] = sub i32 [[V1]], [[V2]]
+; CHECK-NOMEMSSA-NEXT:    ret i32 [[DIFF]]
+;
+; CHECK-LABEL: @test1(
+; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* @G1, align 4
+; CHECK-NEXT:    store i32 0, i32* @G2, align 4
+; CHECK-NEXT:    ret i32 0
+;
   %V1 = load i32, i32* @G1
   store i32 0, i32* @G2
   %V2 = load i32, i32* @G1
-  ; CHECK-NOMEMSSA: sub i32 %V1, %V2
   %Diff = sub i32 %V1, %V2
   ret i32 %Diff
-  ; CHECK: ret i32 0
 }
 
 ;; Simple dead store elimination across non-clobbering store.
-; CHECK-LABEL: @test2(
-; CHECK-NOMEMSSA-LABEL: @test2(
 define void @test2() {
+; CHECK-NOMEMSSA-LABEL: @test2(
+; CHECK-NOMEMSSA-NEXT:  entry:
+; CHECK-NOMEMSSA-NEXT:    [[V1:%.*]] = load i32, i32* @G1, align 4
+; CHECK-NOMEMSSA-NEXT:    store i32 0, i32* @G2, align 4
+; CHECK-NOMEMSSA-NEXT:    store i32 [[V1]], i32* @G1, align 4
+; CHECK-NOMEMSSA-NEXT:    ret void
+;
+; CHECK-LABEL: @test2(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* @G1, align 4
+; CHECK-NEXT:    store i32 0, i32* @G2, align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   %V1 = load i32, i32* @G1
-  ; CHECK: store i32 0, i32* @G2
   store i32 0, i32* @G2
-  ; CHECK-NOT: store
-  ; CHECK-NOMEMSSA: store i32 %V1, i32* @G1
   store i32 %V1, i32* @G1
   ret void
 }
 
 ;; Check that memoryphi optimization happens during EarlyCSE, enabling
 ;; more load CSE opportunities.
-; CHECK-LABEL: @test_memphiopt(
-; CHECK-NOMEMSSA-LABEL: @test_memphiopt(
 define void @test_memphiopt(i1 %c, i32* %p) {
-; CHECK-LABEL: entry:
-; CHECK-NOMEMSSA-LABEL: entry:
+; CHECK-NOMEMSSA-LABEL: @test_memphiopt(
+; CHECK-NOMEMSSA-NEXT:  entry:
+; CHECK-NOMEMSSA-NEXT:    [[V1:%.*]] = load i32, i32* @G1, align 4
+; CHECK-NOMEMSSA-NEXT:    br i1 [[C:%.*]], label [[THEN:%.*]], label [[END:%.*]]
+; CHECK-NOMEMSSA:       then:
+; CHECK-NOMEMSSA-NEXT:    [[PV:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NOMEMSSA-NEXT:    br label [[END]]
+; CHECK-NOMEMSSA:       end:
+; CHECK-NOMEMSSA-NEXT:    [[V2:%.*]] = load i32, i32* @G1, align 4
+; CHECK-NOMEMSSA-NEXT:    [[SUM:%.*]] = add i32 [[V1]], [[V2]]
+; CHECK-NOMEMSSA-NEXT:    store i32 [[SUM]], i32* @G2, align 4
+; CHECK-NOMEMSSA-NEXT:    ret void
+;
+; CHECK-LABEL: @test_memphiopt(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* @G1, align 4
+; CHECK-NEXT:    br i1 [[C:%.*]], label [[THEN:%.*]], label [[END:%.*]]
+; CHECK:       then:
+; CHECK-NEXT:    [[PV:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       end:
+; CHECK-NEXT:    [[SUM:%.*]] = add i32 [[V1]], [[V1]]
+; CHECK-NEXT:    store i32 [[SUM]], i32* @G2, align 4
+; CHECK-NEXT:    ret void
+;
 entry:
-; CHECK: load
-; CHECK-NOMEMSSA: load
   %v1 = load i32, i32* @G1
   br i1 %c, label %then, label %end
 
-; CHECK-LABEL: then:
-; CHECK-NOMEMSSA-LABEL: then:
 then:
-; CHECK: load
-; CHECK-NOMEMSSA: load
   %pv = load i32, i32* %p
-; CHECK-NOT: store
-; CHECK-NOMEMSSA-NOT: store
   store i32 %pv, i32* %p
   br label %end
 
-; CHECK-LABEL: end:
-; CHECK-NOMEMSSA-LABEL: end:
 end:
-; CHECK-NOT: load
-; CHECK-NOMEMSSA: load
   %v2 = load i32, i32* @G1
   %sum = add i32 %v1, %v2
   store i32 %sum, i32* @G2
@@ -72,36 +98,43 @@ end:
 
 ;; Check that MemoryPhi optimization and MemoryUse re-optimization
 ;; happens during EarlyCSE, enabling more load CSE opportunities.
-; CHECK-LABEL: @test_memphiopt2(
-; CHECK-NOMEMSSA-LABEL: @test_memphiopt2(
 define void @test_memphiopt2(i1 %c, i32* %p) {
-; CHECK-LABEL: entry:
-; CHECK-NOMEMSSA-LABEL: entry:
+; CHECK-NOMEMSSA-LABEL: @test_memphiopt2(
+; CHECK-NOMEMSSA-NEXT:  entry:
+; CHECK-NOMEMSSA-NEXT:    [[V1:%.*]] = load i32, i32* @G1, align 4
+; CHECK-NOMEMSSA-NEXT:    store i32 [[V1]], i32* @G2, align 4
+; CHECK-NOMEMSSA-NEXT:    br i1 [[C:%.*]], label [[THEN:%.*]], label [[END:%.*]]
+; CHECK-NOMEMSSA:       then:
+; CHECK-NOMEMSSA-NEXT:    [[PV:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NOMEMSSA-NEXT:    br label [[END]]
+; CHECK-NOMEMSSA:       end:
+; CHECK-NOMEMSSA-NEXT:    [[V2:%.*]] = load i32, i32* @G1, align 4
+; CHECK-NOMEMSSA-NEXT:    store i32 [[V2]], i32* @G3, align 4
+; CHECK-NOMEMSSA-NEXT:    ret void
+;
+; CHECK-LABEL: @test_memphiopt2(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* @G1, align 4
+; CHECK-NEXT:    store i32 [[V1]], i32* @G2, align 4
+; CHECK-NEXT:    br i1 [[C:%.*]], label [[THEN:%.*]], label [[END:%.*]]
+; CHECK:       then:
+; CHECK-NEXT:    [[PV:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       end:
+; CHECK-NEXT:    store i32 [[V1]], i32* @G3, align 4
+; CHECK-NEXT:    ret void
+;
 entry:
-; CHECK: load
-; CHECK-NOMEMSSA: load
   %v1 = load i32, i32* @G1
-; CHECK: store
-; CHECK-NOMEMSSA: store
   store i32 %v1, i32* @G2
   br i1 %c, label %then, label %end
 
-; CHECK-LABEL: then:
-; CHECK-NOMEMSSA-LABEL: then:
 then:
-; CHECK: load
-; CHECK-NOMEMSSA: load
   %pv = load i32, i32* %p
-; CHECK-NOT: store
-; CHECK-NOMEMSSA-NOT: store
   store i32 %pv, i32* %p
   br label %end
 
-; CHECK-LABEL: end:
-; CHECK-NOMEMSSA-LABEL: end:
 end:
-; CHECK-NOT: load
-; CHECK-NOMEMSSA: load
   %v2 = load i32, i32* @G1
   store i32 %v2, i32* @G3
   ret void
@@ -109,39 +142,69 @@ end:
 
 ;; Check that we respect lifetime.start/lifetime.end intrinsics when deleting
 ;; stores that, without the lifetime calls, would be writebacks.
-; CHECK-LABEL: @test_writeback_lifetimes(
-; CHECK-NOMEMSSA-LABEL: @test_writeback_lifetimes(
 define void @test_writeback_lifetimes(i32* %p) {
+; CHECK-NOMEMSSA-LABEL: @test_writeback_lifetimes(
+; CHECK-NOMEMSSA-NEXT:  entry:
+; CHECK-NOMEMSSA-NEXT:    [[Q:%.*]] = getelementptr i32, i32* [[P:%.*]], i64 1
+; CHECK-NOMEMSSA-NEXT:    [[PV:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-NOMEMSSA-NEXT:    [[QV:%.*]] = load i32, i32* [[Q]], align 4
+; CHECK-NOMEMSSA-NEXT:    call void @llvm.lifetime.end.p0i32(i64 8, i32* [[P]])
+; CHECK-NOMEMSSA-NEXT:    call void @llvm.lifetime.start.p0i32(i64 8, i32* [[P]])
+; CHECK-NOMEMSSA-NEXT:    store i32 [[PV]], i32* [[P]], align 4
+; CHECK-NOMEMSSA-NEXT:    store i32 [[QV]], i32* [[Q]], align 4
+; CHECK-NOMEMSSA-NEXT:    ret void
+;
+; CHECK-LABEL: @test_writeback_lifetimes(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[Q:%.*]] = getelementptr i32, i32* [[P:%.*]], i64 1
+; CHECK-NEXT:    [[PV:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-NEXT:    [[QV:%.*]] = load i32, i32* [[Q]], align 4
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0i32(i64 8, i32* [[P]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0i32(i64 8, i32* [[P]])
+; CHECK-NEXT:    store i32 [[PV]], i32* [[P]], align 4
+; CHECK-NEXT:    store i32 [[QV]], i32* [[Q]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   %q = getelementptr i32, i32* %p, i64 1
   %pv = load i32, i32* %p
   %qv = load i32, i32* %q
   call void @llvm.lifetime.end.p0i8(i64 8, i32* %p)
   call void @llvm.lifetime.start.p0i8(i64 8, i32* %p)
-  ; CHECK: store i32 %pv
-  ; CHECK-NOMEMSSA-LABEL: store i32 %pv
   store i32 %pv, i32* %p
-  ; CHECK: store i32 %qv, i32* %q
-  ; CHECK-NOMEMSSA-LABEL: store i32 %qv, i32* %q
   store i32 %qv, i32* %q
   ret void
 }
 
 ;; Check that we respect lifetime.start/lifetime.end intrinsics when deleting
 ;; stores that, without the lifetime calls, would be writebacks.
-; CHECK-LABEL: @test_writeback_lifetimes_multi_arg(
-; CHECK-NOMEMSSA-LABEL: @test_writeback_lifetimes_multi_arg(
 define void @test_writeback_lifetimes_multi_arg(i32* %p, i32* %q) {
+; CHECK-NOMEMSSA-LABEL: @test_writeback_lifetimes_multi_arg(
+; CHECK-NOMEMSSA-NEXT:  entry:
+; CHECK-NOMEMSSA-NEXT:    [[PV:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NOMEMSSA-NEXT:    [[QV:%.*]] = load i32, i32* [[Q:%.*]], align 4
+; CHECK-NOMEMSSA-NEXT:    call void @llvm.lifetime.end.p0i32(i64 8, i32* [[P]])
+; CHECK-NOMEMSSA-NEXT:    call void @llvm.lifetime.start.p0i32(i64 8, i32* [[P]])
+; CHECK-NOMEMSSA-NEXT:    store i32 [[PV]], i32* [[P]], align 4
+; CHECK-NOMEMSSA-NEXT:    store i32 [[QV]], i32* [[Q]], align 4
+; CHECK-NOMEMSSA-NEXT:    ret void
+;
+; CHECK-LABEL: @test_writeback_lifetimes_multi_arg(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[PV:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[QV:%.*]] = load i32, i32* [[Q:%.*]], align 4
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0i32(i64 8, i32* [[P]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0i32(i64 8, i32* [[P]])
+; CHECK-NEXT:    store i32 [[PV]], i32* [[P]], align 4
+; CHECK-NEXT:    store i32 [[QV]], i32* [[Q]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   %pv = load i32, i32* %p
   %qv = load i32, i32* %q
   call void @llvm.lifetime.end.p0i8(i64 8, i32* %p)
   call void @llvm.lifetime.start.p0i8(i64 8, i32* %p)
-  ; CHECK: store i32 %pv
-  ; CHECK-NOMEMSSA-LABEL: store i32 %pv
   store i32 %pv, i32* %p
-  ; CHECK: store i32 %qv, i32* %q
-  ; CHECK-NOMEMSSA-LABEL: store i32 %qv, i32* %q
   store i32 %qv, i32* %q
   ret void
 }

diff  --git a/llvm/test/Transforms/EarlyCSE/pr33406.ll b/llvm/test/Transforms/EarlyCSE/pr33406.ll
index 903b8bc9f2ace..e0d2cccb48ac1 100644
--- a/llvm/test/Transforms/EarlyCSE/pr33406.ll
+++ b/llvm/test/Transforms/EarlyCSE/pr33406.ll
@@ -1,18 +1,18 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -early-cse-memssa -earlycse-debug-hash -S %s | FileCheck %s
 
-; CHECK: define void @patatino() {
-; CHECK:  for.cond:
-; CHECK-NEXT:  br i1 true, label %if.end, label %for.inc
-; CHECK:  if.end:
-; CHECK-NEXT:  %tinkywinky = load i32, i32* @b
-; CHECK-NEXT:  br i1 true, label %for.inc, label %for.inc
-; CHECK:  for.inc:
-; CHECK-NEXT:  ret void
-
-
 @b = external global i32
 
 define void @patatino() {
+; CHECK-LABEL: @patatino(
+; CHECK-NEXT:  for.cond:
+; CHECK-NEXT:    br i1 true, label [[IF_END:%.*]], label [[FOR_INC:%.*]]
+; CHECK:       if.end:
+; CHECK-NEXT:    [[TINKYWINKY:%.*]] = load i32, i32* @b, align 4
+; CHECK-NEXT:    br i1 true, label [[FOR_INC]], label [[FOR_INC]]
+; CHECK:       for.inc:
+; CHECK-NEXT:    ret void
+;
 for.cond:
   br i1 true, label %if.end, label %for.inc
 

diff  --git a/llvm/test/Transforms/EarlyCSE/readnone-mayunwind.ll b/llvm/test/Transforms/EarlyCSE/readnone-mayunwind.ll
index d83a42780c647..baa050a433d80 100644
--- a/llvm/test/Transforms/EarlyCSE/readnone-mayunwind.ll
+++ b/llvm/test/Transforms/EarlyCSE/readnone-mayunwind.ll
@@ -1,12 +1,15 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -early-cse -earlycse-debug-hash < %s | FileCheck %s
 
 declare void @readnone_may_unwind() readnone
 
 define void @f(i32* %ptr) {
 ; CHECK-LABEL: @f(
-; CHECK: store i32 100, i32* %ptr
-; CHECK: call void @readnone_may_unwind()
-; CHECK: store i32 200, i32* %ptr
+; CHECK-NEXT:    store i32 100, i32* [[PTR:%.*]], align 4
+; CHECK-NEXT:    call void @readnone_may_unwind()
+; CHECK-NEXT:    store i32 200, i32* [[PTR]], align 4
+; CHECK-NEXT:    ret void
+;
 
   store i32 100, i32* %ptr
   call void @readnone_may_unwind()

diff  --git a/llvm/test/Transforms/EarlyCSE/writeonly.ll b/llvm/test/Transforms/EarlyCSE/writeonly.ll
index b28af8535083c..3c95efb012a86 100644
--- a/llvm/test/Transforms/EarlyCSE/writeonly.ll
+++ b/llvm/test/Transforms/EarlyCSE/writeonly.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -early-cse -earlycse-debug-hash < %s | FileCheck %s
 
 @var = global i32 undef
@@ -5,11 +6,12 @@ declare void @foo() nounwind
 
 define void @test() {
 ; CHECK-LABEL: @test(
-; CHECK-NOT: store
+; CHECK-NEXT:    call void @foo() #[[ATTR1:[0-9]+]]
+; CHECK-NEXT:    store i32 2, i32* @var, align 4
+; CHECK-NEXT:    ret void
+;
   store i32 1, i32* @var
-; CHECK: call void @foo()
   call void @foo() writeonly
-; CHECK: store i32 2, i32* @var
   store i32 2, i32* @var
   ret void
 }


        


More information about the llvm-commits mailing list