[llvm] 6e56cda - [GVN] Regenerate test checks (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 12 07:16:37 PDT 2023


Author: Nikita Popov
Date: 2023-04-12T16:16:29+02:00
New Revision: 6e56cdac306f3bd0e25377bf30488141c5980ca1

URL: https://github.com/llvm/llvm-project/commit/6e56cdac306f3bd0e25377bf30488141c5980ca1
DIFF: https://github.com/llvm/llvm-project/commit/6e56cdac306f3bd0e25377bf30488141c5980ca1.diff

LOG: [GVN] Regenerate test checks (NFC)

Added: 
    

Modified: 
    llvm/test/Transforms/GVN/PRE/atomic.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/GVN/PRE/atomic.ll b/llvm/test/Transforms/GVN/PRE/atomic.ll
index 185e96747a09f..e8bf25548ba89 100644
--- a/llvm/test/Transforms/GVN/PRE/atomic.ll
+++ b/llvm/test/Transforms/GVN/PRE/atomic.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
 ; RUN: opt -passes=gvn -S < %s | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
@@ -8,8 +9,14 @@ target triple = "x86_64-apple-macosx10.7.0"
 
 ; GVN across unordered store (allowed)
 define i32 @test1() nounwind uwtable ssp {
-; CHECK-LABEL: test1
-; CHECK: add i32 %x, %x
+; CHECK-LABEL: define i32 @test1
+; CHECK-SAME: () #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[X:%.*]] = load i32, ptr @y, align 4
+; CHECK-NEXT:    store atomic i32 [[X]], ptr @x unordered, align 4
+; CHECK-NEXT:    [[Z:%.*]] = add i32 [[X]], [[X]]
+; CHECK-NEXT:    ret i32 [[Z]]
+;
 entry:
   %x = load i32, ptr @y
   store atomic i32 %x, ptr @x unordered, align 4
@@ -20,8 +27,15 @@ entry:
 
 ; GVN across unordered load (allowed)
 define i32 @test3() nounwind uwtable ssp {
-; CHECK-LABEL: test3
-; CHECK: add i32 %x, %x
+; CHECK-LABEL: define i32 @test3
+; CHECK-SAME: () #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[X:%.*]] = load i32, ptr @y, align 4
+; CHECK-NEXT:    [[Y:%.*]] = load atomic i32, ptr @x unordered, align 4
+; CHECK-NEXT:    [[A:%.*]] = add i32 [[X]], [[X]]
+; CHECK-NEXT:    [[B:%.*]] = add i32 [[Y]], [[A]]
+; CHECK-NEXT:    ret i32 [[B]]
+;
 entry:
   %x = load i32, ptr @y
   %y = load atomic i32, ptr @x unordered, align 4
@@ -33,8 +47,13 @@ entry:
 
 ; GVN load to unordered load (allowed)
 define i32 @test5() nounwind uwtable ssp {
-; CHECK-LABEL: test5
-; CHECK: add i32 %x, %x
+; CHECK-LABEL: define i32 @test5
+; CHECK-SAME: () #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[X:%.*]] = load atomic i32, ptr @x unordered, align 4
+; CHECK-NEXT:    [[Z:%.*]] = add i32 [[X]], [[X]]
+; CHECK-NEXT:    ret i32 [[Z]]
+;
 entry:
   %x = load atomic i32, ptr @x unordered, align 4
   %y = load i32, ptr @x
@@ -44,8 +63,14 @@ entry:
 
 ; GVN unordered load to load (unordered load must not be removed)
 define i32 @test6() nounwind uwtable ssp {
-; CHECK-LABEL: test6
-; CHECK: load atomic i32, ptr @x unordered
+; CHECK-LABEL: define i32 @test6
+; CHECK-SAME: () #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[X:%.*]] = load i32, ptr @x, align 4
+; CHECK-NEXT:    [[X2:%.*]] = load atomic i32, ptr @x unordered, align 4
+; CHECK-NEXT:    [[X3:%.*]] = add i32 [[X]], [[X2]]
+; CHECK-NEXT:    ret i32 [[X3]]
+;
 entry:
   %x = load i32, ptr @x
   %x2 = load atomic i32, ptr @x unordered, align 4
@@ -55,8 +80,16 @@ entry:
 
 ; GVN across release-acquire pair (forbidden)
 define i32 @test7() nounwind uwtable ssp {
-; CHECK-LABEL: test7
-; CHECK: add i32 %x, %y
+; CHECK-LABEL: define i32 @test7
+; CHECK-SAME: () #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[X:%.*]] = load i32, ptr @y, align 4
+; CHECK-NEXT:    store atomic i32 [[X]], ptr @x release, align 4
+; CHECK-NEXT:    [[W:%.*]] = load atomic i32, ptr @x acquire, align 4
+; CHECK-NEXT:    [[Y:%.*]] = load i32, ptr @y, align 4
+; CHECK-NEXT:    [[Z:%.*]] = add i32 [[X]], [[Y]]
+; CHECK-NEXT:    ret i32 [[Z]]
+;
 entry:
   %x = load i32, ptr @y
   store atomic i32 %x, ptr @x release, align 4
@@ -68,8 +101,14 @@ entry:
 
 ; GVN across monotonic store (allowed)
 define i32 @test9() nounwind uwtable ssp {
-; CHECK-LABEL: test9
-; CHECK: add i32 %x, %x
+; CHECK-LABEL: define i32 @test9
+; CHECK-SAME: () #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[X:%.*]] = load i32, ptr @y, align 4
+; CHECK-NEXT:    store atomic i32 [[X]], ptr @x monotonic, align 4
+; CHECK-NEXT:    [[Z:%.*]] = add i32 [[X]], [[X]]
+; CHECK-NEXT:    ret i32 [[Z]]
+;
 entry:
   %x = load i32, ptr @y
   store atomic i32 %x, ptr @x monotonic, align 4
@@ -80,8 +119,15 @@ entry:
 
 ; GVN of an unordered across monotonic load (not allowed)
 define i32 @test10() nounwind uwtable ssp {
-; CHECK-LABEL: test10
-; CHECK: add i32 %x, %y
+; CHECK-LABEL: define i32 @test10
+; CHECK-SAME: () #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[X:%.*]] = load atomic i32, ptr @y unordered, align 4
+; CHECK-NEXT:    [[CLOBBER:%.*]] = load atomic i32, ptr @x monotonic, align 4
+; CHECK-NEXT:    [[Y:%.*]] = load atomic i32, ptr @y monotonic, align 4
+; CHECK-NEXT:    [[Z:%.*]] = add i32 [[X]], [[Y]]
+; CHECK-NEXT:    ret i32 [[Z]]
+;
 entry:
   %x = load atomic i32, ptr @y unordered, align 4
   %clobber = load atomic i32, ptr @x monotonic, align 4
@@ -91,105 +137,140 @@ entry:
 }
 
 define i32 @PR22708(i1 %flag) {
-; CHECK-LABEL: PR22708
+; CHECK-LABEL: define i32 @PR22708
+; CHECK-SAME: (i1 [[FLAG:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br i1 [[FLAG]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
+; CHECK:       if.then:
+; CHECK-NEXT:    store i32 43, ptr @y, align 4
+; CHECK-NEXT:    br label [[IF_END]]
+; CHECK:       if.end:
+; CHECK-NEXT:    [[TMP0:%.*]] = load atomic i32, ptr @x acquire, align 4
+; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr @y, align 4
+; CHECK-NEXT:    ret i32 [[LOAD]]
+;
 entry:
   br i1 %flag, label %if.then, label %if.end
 
 if.then:
   store i32 43, ptr @y, align 4
-; CHECK: store i32 43, ptr @y, align 4
   br label %if.end
 
 if.end:
   load atomic i32, ptr @x acquire, align 4
   %load = load i32, ptr @y, align 4
-; CHECK: load atomic i32, ptr @x acquire, align 4
-; CHECK: load i32, ptr @y, align 4
   ret i32 %load
 }
 
-; CHECK-LABEL: @test12(
 ; Can't remove a load over a ordering barrier
 define i32 @test12(i1 %B, ptr %P1, ptr %P2) {
+; CHECK-LABEL: define i32 @test12
+; CHECK-SAME: (i1 [[B:%.*]], ptr [[P1:%.*]], ptr [[P2:%.*]]) {
+; CHECK-NEXT:    [[LOAD0:%.*]] = load i32, ptr [[P1]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i32, ptr [[P2]] seq_cst, align 4
+; CHECK-NEXT:    [[LOAD1:%.*]] = load i32, ptr [[P1]], align 4
+; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[B]], i32 [[LOAD0]], i32 [[LOAD1]]
+; CHECK-NEXT:    ret i32 [[SEL]]
+;
   %load0 = load i32, ptr %P1
   %1 = load atomic i32, ptr %P2 seq_cst, align 4
   %load1 = load i32, ptr %P1
   %sel = select i1 %B, i32 %load0, i32 %load1
   ret i32 %sel
-  ; CHECK: load i32, ptr %P1
-  ; CHECK: load i32, ptr %P1
 }
 
-; CHECK-LABEL: @test13(
 ; atomic to non-atomic forwarding is legal
 define i32 @test13(ptr %P1) {
+; CHECK-LABEL: define i32 @test13
+; CHECK-SAME: (ptr [[P1:%.*]]) {
+; CHECK-NEXT:    [[A:%.*]] = load atomic i32, ptr [[P1]] seq_cst, align 4
+; CHECK-NEXT:    ret i32 0
+;
   %a = load atomic i32, ptr %P1 seq_cst, align 4
   %b = load i32, ptr %P1
   %res = sub i32 %a, %b
   ret i32 %res
-  ; CHECK: load atomic i32, ptr %P1
-  ; CHECK: ret i32 0
 }
 
-; CHECK-LABEL: @test13b(
 define i32 @test13b(ptr %P1) {
+; CHECK-LABEL: define i32 @test13b
+; CHECK-SAME: (ptr [[P1:%.*]]) {
+; CHECK-NEXT:    store atomic i32 0, ptr [[P1]] unordered, align 4
+; CHECK-NEXT:    ret i32 0
+;
   store  atomic i32 0, ptr %P1 unordered, align 4
   %b = load i32, ptr %P1
   ret i32 %b
-  ; CHECK: ret i32 0
 }
 
-; CHECK-LABEL: @test14(
 ; atomic to unordered atomic forwarding is legal
 define i32 @test14(ptr %P1) {
+; CHECK-LABEL: define i32 @test14
+; CHECK-SAME: (ptr [[P1:%.*]]) {
+; CHECK-NEXT:    [[A:%.*]] = load atomic i32, ptr [[P1]] seq_cst, align 4
+; CHECK-NEXT:    ret i32 0
+;
   %a = load atomic i32, ptr %P1 seq_cst, align 4
   %b = load atomic i32, ptr %P1 unordered, align 4
   %res = sub i32 %a, %b
   ret i32 %res
-  ; CHECK: load atomic i32, ptr %P1 seq_cst
-  ; CHECK-NEXT: ret i32 0
 }
 
-; CHECK-LABEL: @test15(
 ; implementation restriction: can't forward to stonger
 ; than unordered
 define i32 @test15(ptr %P1, ptr %P2) {
+; CHECK-LABEL: define i32 @test15
+; CHECK-SAME: (ptr [[P1:%.*]], ptr [[P2:%.*]]) {
+; CHECK-NEXT:    [[A:%.*]] = load atomic i32, ptr [[P1]] seq_cst, align 4
+; CHECK-NEXT:    [[B:%.*]] = load atomic i32, ptr [[P1]] seq_cst, align 4
+; CHECK-NEXT:    [[RES:%.*]] = sub i32 [[A]], [[B]]
+; CHECK-NEXT:    ret i32 [[RES]]
+;
   %a = load atomic i32, ptr %P1 seq_cst, align 4
   %b = load atomic i32, ptr %P1 seq_cst, align 4
   %res = sub i32 %a, %b
   ret i32 %res
-  ; CHECK: load atomic i32, ptr %P1
-  ; CHECK: load atomic i32, ptr %P1
 }
 
-; CHECK-LABEL: @test16(
 ; forwarding non-atomic to atomic is wrong! (However,
 ; it would be legal to use the later value in place of the
 ; former in this particular example.  We just don't
 ; do that right now.)
 define i32 @test16(ptr %P1, ptr %P2) {
+; CHECK-LABEL: define i32 @test16
+; CHECK-SAME: (ptr [[P1:%.*]], ptr [[P2:%.*]]) {
+; CHECK-NEXT:    [[A:%.*]] = load i32, ptr [[P1]], align 4
+; CHECK-NEXT:    [[B:%.*]] = load atomic i32, ptr [[P1]] unordered, align 4
+; CHECK-NEXT:    [[RES:%.*]] = sub i32 [[A]], [[B]]
+; CHECK-NEXT:    ret i32 [[RES]]
+;
   %a = load i32, ptr %P1, align 4
   %b = load atomic i32, ptr %P1 unordered, align 4
   %res = sub i32 %a, %b
   ret i32 %res
-  ; CHECK: load i32, ptr %P1
-  ; CHECK: load atomic i32, ptr %P1
 }
 
-; CHECK-LABEL: @test16b(
 define i32 @test16b(ptr %P1) {
+; CHECK-LABEL: define i32 @test16b
+; CHECK-SAME: (ptr [[P1:%.*]]) {
+; CHECK-NEXT:    store i32 0, ptr [[P1]], align 4
+; CHECK-NEXT:    [[B:%.*]] = load atomic i32, ptr [[P1]] unordered, align 4
+; CHECK-NEXT:    ret i32 [[B]]
+;
   store i32 0, ptr %P1
   %b = load atomic i32, ptr %P1 unordered, align 4
   ret i32 %b
-  ; CHECK: load atomic i32, ptr %P1
 }
 
 ; Can't DSE across a full fence
 define void @fence_seq_cst_store(ptr %P1, ptr %P2) {
-; CHECK-LABEL: @fence_seq_cst_store(
-; CHECK: store
-; CHECK: store atomic
-; CHECK: store
+; CHECK-LABEL: define void @fence_seq_cst_store
+; CHECK-SAME: (ptr [[P1:%.*]], ptr [[P2:%.*]]) {
+; CHECK-NEXT:    store i32 0, ptr [[P1]], align 4
+; CHECK-NEXT:    store atomic i32 0, ptr [[P2]] seq_cst, align 4
+; CHECK-NEXT:    store i32 0, ptr [[P1]], align 4
+; CHECK-NEXT:    ret void
+;
   store i32 0, ptr %P1, align 4
   store atomic i32 0, ptr %P2 seq_cst, align 4
   store i32 0, ptr %P1, align 4
@@ -198,10 +279,13 @@ define void @fence_seq_cst_store(ptr %P1, ptr %P2) {
 
 ; Can't DSE across a full fence
 define void @fence_seq_cst(ptr %P1, ptr %P2) {
-; CHECK-LABEL: @fence_seq_cst(
-; CHECK: store
-; CHECK: fence seq_cst
-; CHECK: store
+; CHECK-LABEL: define void @fence_seq_cst
+; CHECK-SAME: (ptr [[P1:%.*]], ptr [[P2:%.*]]) {
+; CHECK-NEXT:    store i32 0, ptr [[P1]], align 4
+; CHECK-NEXT:    fence seq_cst
+; CHECK-NEXT:    store i32 0, ptr [[P1]], align 4
+; CHECK-NEXT:    ret void
+;
   store i32 0, ptr %P1, align 4
   fence seq_cst
   store i32 0, ptr %P1, align 4
@@ -210,10 +294,13 @@ define void @fence_seq_cst(ptr %P1, ptr %P2) {
 
 ; Can't DSE across a full syncscope("singlethread") fence
 define void @fence_seq_cst_st(ptr %P1, ptr %P2) {
-; CHECK-LABEL: @fence_seq_cst_st(
-; CHECK: store
-; CHECK: fence syncscope("singlethread") seq_cst
-; CHECK: store
+; CHECK-LABEL: define void @fence_seq_cst_st
+; CHECK-SAME: (ptr [[P1:%.*]], ptr [[P2:%.*]]) {
+; CHECK-NEXT:    store i32 0, ptr [[P1]], align 4
+; CHECK-NEXT:    fence syncscope("singlethread") seq_cst
+; CHECK-NEXT:    store i32 0, ptr [[P1]], align 4
+; CHECK-NEXT:    ret void
+;
   store i32 0, ptr %P1, align 4
   fence syncscope("singlethread") seq_cst
   store i32 0, ptr %P1, align 4
@@ -222,10 +309,13 @@ define void @fence_seq_cst_st(ptr %P1, ptr %P2) {
 
 ; Can't DSE across a full fence
 define void @fence_asm_sideeffect(ptr %P1, ptr %P2) {
-; CHECK-LABEL: @fence_asm_sideeffect(
-; CHECK: store
-; CHECK: call void asm sideeffect
-; CHECK: store
+; CHECK-LABEL: define void @fence_asm_sideeffect
+; CHECK-SAME: (ptr [[P1:%.*]], ptr [[P2:%.*]]) {
+; CHECK-NEXT:    store i32 0, ptr [[P1]], align 4
+; CHECK-NEXT:    call void asm sideeffect "", ""()
+; CHECK-NEXT:    store i32 0, ptr [[P1]], align 4
+; CHECK-NEXT:    ret void
+;
   store i32 0, ptr %P1, align 4
   call void asm sideeffect "", ""()
   store i32 0, ptr %P1, align 4
@@ -234,10 +324,13 @@ define void @fence_asm_sideeffect(ptr %P1, ptr %P2) {
 
 ; Can't DSE across a full fence
 define void @fence_asm_memory(ptr %P1, ptr %P2) {
-; CHECK-LABEL: @fence_asm_memory(
-; CHECK: store
-; CHECK: call void asm
-; CHECK: store
+; CHECK-LABEL: define void @fence_asm_memory
+; CHECK-SAME: (ptr [[P1:%.*]], ptr [[P2:%.*]]) {
+; CHECK-NEXT:    store i32 0, ptr [[P1]], align 4
+; CHECK-NEXT:    call void asm "", "~{memory}"()
+; CHECK-NEXT:    store i32 0, ptr [[P1]], align 4
+; CHECK-NEXT:    ret void
+;
   store i32 0, ptr %P1, align 4
   call void asm "", "~{memory}"()
   store i32 0, ptr %P1, align 4
@@ -246,32 +339,42 @@ define void @fence_asm_memory(ptr %P1, ptr %P2) {
 
 ; Can't remove a volatile load
 define i32 @volatile_load(ptr %P1, ptr %P2) {
+; CHECK-LABEL: define i32 @volatile_load
+; CHECK-SAME: (ptr [[P1:%.*]], ptr [[P2:%.*]]) {
+; CHECK-NEXT:    [[A:%.*]] = load i32, ptr [[P1]], align 4
+; CHECK-NEXT:    [[B:%.*]] = load volatile i32, ptr [[P1]], align 4
+; CHECK-NEXT:    [[RES:%.*]] = sub i32 [[A]], [[B]]
+; CHECK-NEXT:    ret i32 [[RES]]
+;
   %a = load i32, ptr %P1, align 4
   %b = load volatile i32, ptr %P1, align 4
   %res = sub i32 %a, %b
   ret i32 %res
-  ; CHECK-LABEL: @volatile_load(
-  ; CHECK: load i32, ptr %P1
-  ; CHECK: load volatile i32, ptr %P1
 }
 
 ; Can't remove redundant volatile loads
 define i32 @redundant_volatile_load(ptr %P1, ptr %P2) {
+; CHECK-LABEL: define i32 @redundant_volatile_load
+; CHECK-SAME: (ptr [[P1:%.*]], ptr [[P2:%.*]]) {
+; CHECK-NEXT:    [[A:%.*]] = load volatile i32, ptr [[P1]], align 4
+; CHECK-NEXT:    [[B:%.*]] = load volatile i32, ptr [[P1]], align 4
+; CHECK-NEXT:    [[RES:%.*]] = sub i32 [[A]], [[B]]
+; CHECK-NEXT:    ret i32 [[RES]]
+;
   %a = load volatile i32, ptr %P1, align 4
   %b = load volatile i32, ptr %P1, align 4
   %res = sub i32 %a, %b
   ret i32 %res
-  ; CHECK-LABEL: @redundant_volatile_load(
-  ; CHECK: load volatile i32, ptr %P1
-  ; CHECK: load volatile i32, ptr %P1
-  ; CHECK: sub
 }
 
 ; Can't DSE a volatile store
 define void @volatile_store(ptr %P1, ptr %P2) {
-; CHECK-LABEL: @volatile_store(
-; CHECK: store volatile
-; CHECK: store
+; CHECK-LABEL: define void @volatile_store
+; CHECK-SAME: (ptr [[P1:%.*]], ptr [[P2:%.*]]) {
+; CHECK-NEXT:    store volatile i32 0, ptr [[P1]], align 4
+; CHECK-NEXT:    store i32 3, ptr [[P1]], align 4
+; CHECK-NEXT:    ret void
+;
   store volatile i32 0, ptr %P1, align 4
   store i32 3, ptr %P1, align 4
   ret void
@@ -279,9 +382,12 @@ define void @volatile_store(ptr %P1, ptr %P2) {
 
 ; Can't DSE a redundant volatile store
 define void @redundant_volatile_store(ptr %P1, ptr %P2) {
-; CHECK-LABEL: @redundant_volatile_store(
-; CHECK: store volatile
-; CHECK: store volatile
+; CHECK-LABEL: define void @redundant_volatile_store
+; CHECK-SAME: (ptr [[P1:%.*]], ptr [[P2:%.*]]) {
+; CHECK-NEXT:    store volatile i32 0, ptr [[P1]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P1]], align 4
+; CHECK-NEXT:    ret void
+;
   store volatile i32 0, ptr %P1, align 4
   store volatile i32 0, ptr %P1, align 4
   ret void
@@ -289,20 +395,27 @@ define void @redundant_volatile_store(ptr %P1, ptr %P2) {
 
 ; Can value forward from volatiles
 define i32 @test20(ptr %P1, ptr %P2) {
+; CHECK-LABEL: define i32 @test20
+; CHECK-SAME: (ptr [[P1:%.*]], ptr [[P2:%.*]]) {
+; CHECK-NEXT:    [[A:%.*]] = load volatile i32, ptr [[P1]], align 4
+; CHECK-NEXT:    ret i32 0
+;
   %a = load volatile i32, ptr %P1, align 4
   %b = load i32, ptr %P1, align 4
   %res = sub i32 %a, %b
   ret i32 %res
-  ; CHECK-LABEL: @test20(
-  ; CHECK: load volatile i32, ptr %P1
-  ; CHECK: ret i32 0
 }
 
 ; We're currently conservative about widening
 define i64 @widen1(ptr %P1) {
-  ; CHECK-LABEL: @widen1(
-  ; CHECK: load atomic i32, ptr %P1
-  ; CHECK: load atomic i64, ptr %P1
+; CHECK-LABEL: define i64 @widen1
+; CHECK-SAME: (ptr [[P1:%.*]]) {
+; CHECK-NEXT:    [[A:%.*]] = load atomic i32, ptr [[P1]] unordered, align 4
+; CHECK-NEXT:    [[B:%.*]] = load atomic i64, ptr [[P1]] unordered, align 4
+; CHECK-NEXT:    [[A64:%.*]] = sext i32 [[A]] to i64
+; CHECK-NEXT:    [[RES:%.*]] = sub i64 [[A64]], [[B]]
+; CHECK-NEXT:    ret i64 [[RES]]
+;
   %a = load atomic i32, ptr %P1 unordered, align 4
   %b = load atomic i64, ptr %P1 unordered, align 4
   %a64 = sext i32 %a to i64
@@ -312,9 +425,14 @@ define i64 @widen1(ptr %P1) {
 
 ; narrowing does work
 define i64 @narrow(ptr %P1) {
-  ; CHECK-LABEL: @narrow(
-  ; CHECK: load atomic i64, ptr %P1
-  ; CHECK-NOT: load atomic i32, ptr %P1
+; CHECK-LABEL: define i64 @narrow
+; CHECK-SAME: (ptr [[P1:%.*]]) {
+; CHECK-NEXT:    [[A64:%.*]] = load atomic i64, ptr [[P1]] unordered, align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[A64]] to i32
+; CHECK-NEXT:    [[B64:%.*]] = sext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[RES:%.*]] = sub i64 [[A64]], [[B64]]
+; CHECK-NEXT:    ret i64 [[RES]]
+;
   %a64 = load atomic i64, ptr %P1 unordered, align 4
   %b = load atomic i32, ptr %P1 unordered, align 4
   %b64 = sext i32 %b to i64
@@ -324,9 +442,14 @@ define i64 @narrow(ptr %P1) {
 
 ; Missed optimization, we don't yet optimize ordered loads
 define i64 @narrow2(ptr %P1) {
-  ; CHECK-LABEL: @narrow2(
-  ; CHECK: load atomic i64, ptr %P1
-  ; CHECK: load atomic i32, ptr %P1
+; CHECK-LABEL: define i64 @narrow2
+; CHECK-SAME: (ptr [[P1:%.*]]) {
+; CHECK-NEXT:    [[A64:%.*]] = load atomic i64, ptr [[P1]] acquire, align 4
+; CHECK-NEXT:    [[B:%.*]] = load atomic i32, ptr [[P1]] acquire, align 4
+; CHECK-NEXT:    [[B64:%.*]] = sext i32 [[B]] to i64
+; CHECK-NEXT:    [[RES:%.*]] = sub i64 [[A64]], [[B64]]
+; CHECK-NEXT:    ret i64 [[RES]]
+;
   %a64 = load atomic i64, ptr %P1 acquire, align 4
   %b = load atomic i32, ptr %P1 acquire, align 4
   %b64 = sext i32 %b to i64
@@ -340,10 +463,16 @@ define i64 @narrow2(ptr %P1) {
 
 ; unordered atomic to unordered atomic
 define i32 @non_local_fre(ptr %P1) {
-; CHECK-LABEL: @non_local_fre(
-; CHECK: load atomic i32, ptr %P1
-; CHECK: ret i32 0
-; CHECK: ret i32 0
+; CHECK-LABEL: define i32 @non_local_fre
+; CHECK-SAME: (ptr [[P1:%.*]]) {
+; CHECK-NEXT:    [[A:%.*]] = load atomic i32, ptr [[P1]] unordered, align 4
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A]], 0
+; CHECK-NEXT:    br i1 [[CMP]], label [[EARLY:%.*]], label [[NEXT:%.*]]
+; CHECK:       early:
+; CHECK-NEXT:    ret i32 0
+; CHECK:       next:
+; CHECK-NEXT:    ret i32 0
+;
   %a = load atomic i32, ptr %P1 unordered, align 4
   %cmp = icmp eq i32 %a, 0
   br i1 %cmp, label %early, label %next
@@ -357,10 +486,16 @@ next:
 
 ; unordered atomic to non-atomic
 define i32 @non_local_fre2(ptr %P1) {
-; CHECK-LABEL: @non_local_fre2(
-; CHECK: load atomic i32, ptr %P1
-; CHECK: ret i32 0
-; CHECK: ret i32 0
+; CHECK-LABEL: define i32 @non_local_fre2
+; CHECK-SAME: (ptr [[P1:%.*]]) {
+; CHECK-NEXT:    [[A:%.*]] = load atomic i32, ptr [[P1]] unordered, align 4
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A]], 0
+; CHECK-NEXT:    br i1 [[CMP]], label [[EARLY:%.*]], label [[NEXT:%.*]]
+; CHECK:       early:
+; CHECK-NEXT:    ret i32 0
+; CHECK:       next:
+; CHECK-NEXT:    ret i32 0
+;
   %a = load atomic i32, ptr %P1 unordered, align 4
   %cmp = icmp eq i32 %a, 0
   br i1 %cmp, label %early, label %next
@@ -374,11 +509,18 @@ next:
 
 ; Can't forward ordered atomics.
 define i32 @non_local_fre3(ptr %P1) {
-; CHECK-LABEL: @non_local_fre3(
-; CHECK: load atomic i32, ptr %P1 acquire
-; CHECK: ret i32 0
-; CHECK: load atomic i32, ptr %P1 acquire
-; CHECK: ret i32 %res
+; CHECK-LABEL: define i32 @non_local_fre3
+; CHECK-SAME: (ptr [[P1:%.*]]) {
+; CHECK-NEXT:    [[A:%.*]] = load atomic i32, ptr [[P1]] acquire, align 4
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A]], 0
+; CHECK-NEXT:    br i1 [[CMP]], label [[EARLY:%.*]], label [[NEXT:%.*]]
+; CHECK:       early:
+; CHECK-NEXT:    ret i32 0
+; CHECK:       next:
+; CHECK-NEXT:    [[B:%.*]] = load atomic i32, ptr [[P1]] acquire, align 4
+; CHECK-NEXT:    [[RES:%.*]] = sub i32 [[A]], [[B]]
+; CHECK-NEXT:    ret i32 [[RES]]
+;
   %a = load atomic i32, ptr %P1 acquire, align 4
   %cmp = icmp eq i32 %a, 0
   br i1 %cmp, label %early, label %next
@@ -394,11 +536,19 @@ declare void @clobber()
 
 ; unordered atomic to unordered atomic
 define i32 @non_local_pre(ptr %P1) {
-; CHECK-LABEL: @non_local_pre(
-; CHECK: load atomic i32, ptr %P1 unordered
-; CHECK: load atomic i32, ptr %P1 unordered
-; CHECK: %b = phi i32 [ %b.pre, %early ], [ %a, %0 ]
-; CHECK: ret i32 %b
+; CHECK-LABEL: define i32 @non_local_pre
+; CHECK-SAME: (ptr [[P1:%.*]]) {
+; CHECK-NEXT:    [[A:%.*]] = load atomic i32, ptr [[P1]] unordered, align 4
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A]], 0
+; CHECK-NEXT:    br i1 [[CMP]], label [[EARLY:%.*]], label [[NEXT:%.*]]
+; CHECK:       early:
+; CHECK-NEXT:    call void @clobber()
+; CHECK-NEXT:    [[B_PRE:%.*]] = load atomic i32, ptr [[P1]] unordered, align 4
+; CHECK-NEXT:    br label [[NEXT]]
+; CHECK:       next:
+; CHECK-NEXT:    [[B:%.*]] = phi i32 [ [[B_PRE]], [[EARLY]] ], [ [[A]], [[TMP0:%.*]] ]
+; CHECK-NEXT:    ret i32 [[B]]
+;
   %a = load atomic i32, ptr %P1 unordered, align 4
   %cmp = icmp eq i32 %a, 0
   br i1 %cmp, label %early, label %next
@@ -412,11 +562,19 @@ next:
 
 ; unordered atomic to non-atomic
 define i32 @non_local_pre2(ptr %P1) {
-; CHECK-LABEL: @non_local_pre2(
-; CHECK: load atomic i32, ptr %P1 unordered
-; CHECK: load i32, ptr %P1
-; CHECK: %b = phi i32 [ %b.pre, %early ], [ %a, %0 ]
-; CHECK: ret i32 %b
+; CHECK-LABEL: define i32 @non_local_pre2
+; CHECK-SAME: (ptr [[P1:%.*]]) {
+; CHECK-NEXT:    [[A:%.*]] = load atomic i32, ptr [[P1]] unordered, align 4
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A]], 0
+; CHECK-NEXT:    br i1 [[CMP]], label [[EARLY:%.*]], label [[NEXT:%.*]]
+; CHECK:       early:
+; CHECK-NEXT:    call void @clobber()
+; CHECK-NEXT:    [[B_PRE:%.*]] = load i32, ptr [[P1]], align 4
+; CHECK-NEXT:    br label [[NEXT]]
+; CHECK:       next:
+; CHECK-NEXT:    [[B:%.*]] = phi i32 [ [[B_PRE]], [[EARLY]] ], [ [[A]], [[TMP0:%.*]] ]
+; CHECK-NEXT:    ret i32 [[B]]
+;
   %a = load atomic i32, ptr %P1 unordered, align 4
   %cmp = icmp eq i32 %a, 0
   br i1 %cmp, label %early, label %next
@@ -430,10 +588,18 @@ next:
 
 ; non-atomic to unordered atomic - can't forward!
 define i32 @non_local_pre3(ptr %P1) {
-; CHECK-LABEL: @non_local_pre3(
-; CHECK: %a = load i32, ptr %P1
-; CHECK: %b = load atomic i32, ptr %P1 unordered
-; CHECK: ret i32 %b
+; CHECK-LABEL: define i32 @non_local_pre3
+; CHECK-SAME: (ptr [[P1:%.*]]) {
+; CHECK-NEXT:    [[A:%.*]] = load i32, ptr [[P1]], align 4
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A]], 0
+; CHECK-NEXT:    br i1 [[CMP]], label [[EARLY:%.*]], label [[NEXT:%.*]]
+; CHECK:       early:
+; CHECK-NEXT:    call void @clobber()
+; CHECK-NEXT:    br label [[NEXT]]
+; CHECK:       next:
+; CHECK-NEXT:    [[B:%.*]] = load atomic i32, ptr [[P1]] unordered, align 4
+; CHECK-NEXT:    ret i32 [[B]]
+;
   %a = load i32, ptr %P1
   %cmp = icmp eq i32 %a, 0
   br i1 %cmp, label %early, label %next
@@ -447,10 +613,18 @@ next:
 
 ; ordered atomic to ordered atomic - can't forward
 define i32 @non_local_pre4(ptr %P1) {
-; CHECK-LABEL: @non_local_pre4(
-; CHECK: %a = load atomic i32, ptr %P1 seq_cst
-; CHECK: %b = load atomic i32, ptr %P1 seq_cst
-; CHECK: ret i32 %b
+; CHECK-LABEL: define i32 @non_local_pre4
+; CHECK-SAME: (ptr [[P1:%.*]]) {
+; CHECK-NEXT:    [[A:%.*]] = load atomic i32, ptr [[P1]] seq_cst, align 4
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A]], 0
+; CHECK-NEXT:    br i1 [[CMP]], label [[EARLY:%.*]], label [[NEXT:%.*]]
+; CHECK:       early:
+; CHECK-NEXT:    call void @clobber()
+; CHECK-NEXT:    br label [[NEXT]]
+; CHECK:       next:
+; CHECK-NEXT:    [[B:%.*]] = load atomic i32, ptr [[P1]] seq_cst, align 4
+; CHECK-NEXT:    ret i32 [[B]]
+;
   %a = load atomic i32, ptr %P1 seq_cst, align 4
   %cmp = icmp eq i32 %a, 0
   br i1 %cmp, label %early, label %next
@@ -464,10 +638,18 @@ next:
 
 ; can't remove volatile on any path
 define i32 @non_local_pre5(ptr %P1) {
-; CHECK-LABEL: @non_local_pre5(
-; CHECK: %a = load atomic i32, ptr %P1 seq_cst
-; CHECK: %b = load volatile i32, ptr %P1
-; CHECK: ret i32 %b
+; CHECK-LABEL: define i32 @non_local_pre5
+; CHECK-SAME: (ptr [[P1:%.*]]) {
+; CHECK-NEXT:    [[A:%.*]] = load atomic i32, ptr [[P1]] seq_cst, align 4
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A]], 0
+; CHECK-NEXT:    br i1 [[CMP]], label [[EARLY:%.*]], label [[NEXT:%.*]]
+; CHECK:       early:
+; CHECK-NEXT:    call void @clobber()
+; CHECK-NEXT:    br label [[NEXT]]
+; CHECK:       next:
+; CHECK-NEXT:    [[B:%.*]] = load volatile i32, ptr [[P1]], align 4
+; CHECK-NEXT:    ret i32 [[B]]
+;
   %a = load atomic i32, ptr %P1 seq_cst, align 4
   %cmp = icmp eq i32 %a, 0
   br i1 %cmp, label %early, label %next
@@ -482,11 +664,19 @@ next:
 
 ; ordered atomic to unordered atomic
 define i32 @non_local_pre6(ptr %P1) {
-; CHECK-LABEL: @non_local_pre6(
-; CHECK: load atomic i32, ptr %P1 seq_cst
-; CHECK: load atomic i32, ptr %P1 unordered
-; CHECK: %b = phi i32 [ %b.pre, %early ], [ %a, %0 ]
-; CHECK: ret i32 %b
+; CHECK-LABEL: define i32 @non_local_pre6
+; CHECK-SAME: (ptr [[P1:%.*]]) {
+; CHECK-NEXT:    [[A:%.*]] = load atomic i32, ptr [[P1]] seq_cst, align 4
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A]], 0
+; CHECK-NEXT:    br i1 [[CMP]], label [[EARLY:%.*]], label [[NEXT:%.*]]
+; CHECK:       early:
+; CHECK-NEXT:    call void @clobber()
+; CHECK-NEXT:    [[B_PRE:%.*]] = load atomic i32, ptr [[P1]] unordered, align 4
+; CHECK-NEXT:    br label [[NEXT]]
+; CHECK:       next:
+; CHECK-NEXT:    [[B:%.*]] = phi i32 [ [[B_PRE]], [[EARLY]] ], [ [[A]], [[TMP0:%.*]] ]
+; CHECK-NEXT:    ret i32 [[B]]
+;
   %a = load atomic i32, ptr %P1 seq_cst, align 4
   %cmp = icmp eq i32 %a, 0
   br i1 %cmp, label %early, label %next


        


More information about the llvm-commits mailing list