[llvm] r370256 - Use the handle --check-prefixes mechanism to de-verbosify a couple atomics tests [NFC]

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 28 13:27:39 PDT 2019


Author: reames
Date: Wed Aug 28 13:27:39 2019
New Revision: 370256

URL: http://llvm.org/viewvc/llvm-project?rev=370256&view=rev
Log:
Use the handle --check-prefixes mechanism to de-verbosify a couple atomics tests [NFC]


Modified:
    llvm/trunk/test/CodeGen/X86/atomic-monotonic.ll
    llvm/trunk/test/CodeGen/X86/atomic-unordered.ll

Modified: llvm/trunk/test/CodeGen/X86/atomic-monotonic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic-monotonic.ll?rev=370256&r1=370255&r2=370256&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic-monotonic.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atomic-monotonic.ll Wed Aug 28 13:27:39 2019
@@ -1,17 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -O0 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mattr=sse2 | FileCheck --check-prefix=CHECK-O0 %s
-; RUN: llc -O3 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mattr=sse2 | FileCheck --check-prefix=CHECK-O3 %s
+; RUN: llc -O0 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mattr=sse2 | FileCheck --check-prefixes=CHECK,CHECK-O0 %s
+; RUN: llc -O3 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mattr=sse2 | FileCheck --check-prefixes=CHECK,CHECK-O3 %s
 
 define i8 @load_i8(i8* %ptr) {
-; CHECK-O0-LABEL: load_i8:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movb (%rdi), %al
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: load_i8:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movb (%rdi), %al
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: load_i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movb (%rdi), %al
+; CHECK-NEXT:    retq
   %v = load atomic i8, i8* %ptr monotonic, align 1
   ret i8 %v
 }
@@ -62,57 +57,37 @@ define void @store_i16(i16* %ptr, i16 %v
 }
 
 define i32 @load_i32(i32* %ptr) {
-; CHECK-O0-LABEL: load_i32:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movl (%rdi), %eax
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: load_i32:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movl (%rdi), %eax
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: load_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl (%rdi), %eax
+; CHECK-NEXT:    retq
   %v = load atomic i32, i32* %ptr monotonic, align 4
   ret i32 %v
 }
 
 define void @store_i32(i32* %ptr, i32 %v) {
-; CHECK-O0-LABEL: store_i32:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movl %esi, (%rdi)
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: store_i32:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movl %esi, (%rdi)
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: store_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %esi, (%rdi)
+; CHECK-NEXT:    retq
   store atomic i32 %v, i32* %ptr monotonic, align 4
   ret void
 }
 
 define i64 @load_i64(i64* %ptr) {
-; CHECK-O0-LABEL: load_i64:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: load_i64:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: load_i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    retq
   %v = load atomic i64, i64* %ptr monotonic, align 8
   ret i64 %v
 }
 
 define void @store_i64(i64* %ptr, i64 %v) {
-; CHECK-O0-LABEL: store_i64:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq %rsi, (%rdi)
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: store_i64:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq %rsi, (%rdi)
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: store_i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq %rsi, (%rdi)
+; CHECK-NEXT:    retq
   store atomic i64 %v, i64* %ptr monotonic, align 8
   ret void
 }

Modified: llvm/trunk/test/CodeGen/X86/atomic-unordered.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic-unordered.ll?rev=370256&r1=370255&r2=370256&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic-unordered.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atomic-unordered.ll Wed Aug 28 13:27:39 2019
@@ -1,17 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -O0 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake | FileCheck --check-prefix=CHECK-O0 %s
-; RUN: llc -O3 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake | FileCheck --check-prefix=CHECK-O3 %s
+; RUN: llc -O0 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake | FileCheck --check-prefixes=CHECK,CHECK-O0 %s
+; RUN: llc -O3 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake | FileCheck --check-prefixes=CHECK,CHECK-O3 %s
 
 define i8 @load_i8(i8* %ptr) {
-; CHECK-O0-LABEL: load_i8:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movb (%rdi), %al
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: load_i8:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movb (%rdi), %al
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: load_i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movb (%rdi), %al
+; CHECK-NEXT:    retq
   %v = load atomic i8, i8* %ptr unordered, align 1
   ret i8 %v
 }
@@ -62,57 +57,37 @@ define void @store_i16(i16* %ptr, i16 %v
 }
 
 define i32 @load_i32(i32* %ptr) {
-; CHECK-O0-LABEL: load_i32:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movl (%rdi), %eax
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: load_i32:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movl (%rdi), %eax
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: load_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl (%rdi), %eax
+; CHECK-NEXT:    retq
   %v = load atomic i32, i32* %ptr unordered, align 4
   ret i32 %v
 }
 
 define void @store_i32(i32* %ptr, i32 %v) {
-; CHECK-O0-LABEL: store_i32:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movl %esi, (%rdi)
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: store_i32:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movl %esi, (%rdi)
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: store_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %esi, (%rdi)
+; CHECK-NEXT:    retq
   store atomic i32 %v, i32* %ptr unordered, align 4
   ret void
 }
 
 define i64 @load_i64(i64* %ptr) {
-; CHECK-O0-LABEL: load_i64:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: load_i64:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: load_i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    retq
   %v = load atomic i64, i64* %ptr unordered, align 8
   ret i64 %v
 }
 
 define void @store_i64(i64* %ptr, i64 %v) {
-; CHECK-O0-LABEL: store_i64:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq %rsi, (%rdi)
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: store_i64:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq %rsi, (%rdi)
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: store_i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq %rsi, (%rdi)
+; CHECK-NEXT:    retq
   store atomic i64 %v, i64* %ptr unordered, align 8
   ret void
 }
@@ -196,17 +171,11 @@ define void @narrow_writeback_xor(i64* %
 
 ; Legal if wider type is also atomic (TODO)
 define void @widen_store(i32* %p0, i32 %v1, i32 %v2) {
-; CHECK-O0-LABEL: widen_store:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movl %esi, (%rdi)
-; CHECK-O0-NEXT:    movl %edx, 4(%rdi)
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: widen_store:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movl %esi, (%rdi)
-; CHECK-O3-NEXT:    movl %edx, 4(%rdi)
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: widen_store:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %esi, (%rdi)
+; CHECK-NEXT:    movl %edx, 4(%rdi)
+; CHECK-NEXT:    retq
   %p1 = getelementptr i32, i32* %p0, i64 1
   store atomic i32 %v1, i32* %p0 unordered, align 8
   store atomic i32 %v2, i32* %p1 unordered, align 4
@@ -217,17 +186,11 @@ define void @widen_store(i32* %p0, i32 %
 ; the wider type might cross a cache line and violate the
 ; atomicity requirement.
 define void @widen_store_unaligned(i32* %p0, i32 %v1, i32 %v2) {
-; CHECK-O0-LABEL: widen_store_unaligned:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movl %esi, (%rdi)
-; CHECK-O0-NEXT:    movl %edx, 4(%rdi)
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: widen_store_unaligned:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movl %esi, (%rdi)
-; CHECK-O3-NEXT:    movl %edx, 4(%rdi)
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: widen_store_unaligned:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %esi, (%rdi)
+; CHECK-NEXT:    movl %edx, 4(%rdi)
+; CHECK-NEXT:    retq
   %p1 = getelementptr i32, i32* %p0, i64 1
   store atomic i32 %v1, i32* %p0 unordered, align 4
   store atomic i32 %v2, i32* %p1 unordered, align 4
@@ -236,17 +199,11 @@ define void @widen_store_unaligned(i32*
 
 ; Legal if wider type is also atomic (TODO)
 define void @widen_broadcast(i32* %p0, i32 %v) {
-; CHECK-O0-LABEL: widen_broadcast:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movl %esi, (%rdi)
-; CHECK-O0-NEXT:    movl %esi, 4(%rdi)
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: widen_broadcast:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movl %esi, (%rdi)
-; CHECK-O3-NEXT:    movl %esi, 4(%rdi)
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: widen_broadcast:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %esi, (%rdi)
+; CHECK-NEXT:    movl %esi, 4(%rdi)
+; CHECK-NEXT:    retq
   %p1 = getelementptr i32, i32* %p0, i64 1
   store atomic i32 %v, i32* %p0 unordered, align 8
   store atomic i32 %v, i32* %p1 unordered, align 4
@@ -255,17 +212,11 @@ define void @widen_broadcast(i32* %p0, i
 
 ; Not legal to widen due to alignment restriction
 define void @widen_broadcast_unaligned(i32* %p0, i32 %v) {
-; CHECK-O0-LABEL: widen_broadcast_unaligned:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movl %esi, (%rdi)
-; CHECK-O0-NEXT:    movl %esi, 4(%rdi)
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: widen_broadcast_unaligned:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movl %esi, (%rdi)
-; CHECK-O3-NEXT:    movl %esi, 4(%rdi)
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: widen_broadcast_unaligned:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %esi, (%rdi)
+; CHECK-NEXT:    movl %esi, 4(%rdi)
+; CHECK-NEXT:    retq
   %p1 = getelementptr i32, i32* %p0, i64 1
   store atomic i32 %v, i32* %p0 unordered, align 4
   store atomic i32 %v, i32* %p1 unordered, align 4
@@ -457,21 +408,13 @@ define void @store_i256(i256* %ptr, i256
 
 ; Legal if wider type is also atomic (TODO)
 define void @vec_store(i32* %p0, <2 x i32> %vec) {
-; CHECK-O0-LABEL: vec_store:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    vmovd %xmm0, %eax
-; CHECK-O0-NEXT:    vpextrd $1, %xmm0, %ecx
-; CHECK-O0-NEXT:    movl %eax, (%rdi)
-; CHECK-O0-NEXT:    movl %ecx, 4(%rdi)
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: vec_store:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    vmovd %xmm0, %eax
-; CHECK-O3-NEXT:    vpextrd $1, %xmm0, %ecx
-; CHECK-O3-NEXT:    movl %eax, (%rdi)
-; CHECK-O3-NEXT:    movl %ecx, 4(%rdi)
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: vec_store:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovd %xmm0, %eax
+; CHECK-NEXT:    vpextrd $1, %xmm0, %ecx
+; CHECK-NEXT:    movl %eax, (%rdi)
+; CHECK-NEXT:    movl %ecx, 4(%rdi)
+; CHECK-NEXT:    retq
   %v1 = extractelement <2 x i32> %vec, i32 0
   %v2 = extractelement <2 x i32> %vec, i32 1
   %p1 = getelementptr i32, i32* %p0, i64 1
@@ -482,21 +425,13 @@ define void @vec_store(i32* %p0, <2 x i3
 
 ; Not legal to widen due to alignment restriction
 define void @vec_store_unaligned(i32* %p0, <2 x i32> %vec) {
-; CHECK-O0-LABEL: vec_store_unaligned:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    vmovd %xmm0, %eax
-; CHECK-O0-NEXT:    vpextrd $1, %xmm0, %ecx
-; CHECK-O0-NEXT:    movl %eax, (%rdi)
-; CHECK-O0-NEXT:    movl %ecx, 4(%rdi)
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: vec_store_unaligned:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    vmovd %xmm0, %eax
-; CHECK-O3-NEXT:    vpextrd $1, %xmm0, %ecx
-; CHECK-O3-NEXT:    movl %eax, (%rdi)
-; CHECK-O3-NEXT:    movl %ecx, 4(%rdi)
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: vec_store_unaligned:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovd %xmm0, %eax
+; CHECK-NEXT:    vpextrd $1, %xmm0, %ecx
+; CHECK-NEXT:    movl %eax, (%rdi)
+; CHECK-NEXT:    movl %ecx, 4(%rdi)
+; CHECK-NEXT:    retq
   %v1 = extractelement <2 x i32> %vec, i32 0
   %v2 = extractelement <2 x i32> %vec, i32 1
   %p1 = getelementptr i32, i32* %p0, i64 1
@@ -510,19 +445,12 @@ define void @vec_store_unaligned(i32* %p
 ; Legal if wider type is also atomic (TODO)
 ; Also, can avoid register move from xmm to eax (TODO)
 define void @widen_broadcast2(i32* %p0, <2 x i32> %vec) {
-; CHECK-O0-LABEL: widen_broadcast2:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    vmovd %xmm0, %eax
-; CHECK-O0-NEXT:    movl %eax, (%rdi)
-; CHECK-O0-NEXT:    movl %eax, 4(%rdi)
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: widen_broadcast2:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    vmovd %xmm0, %eax
-; CHECK-O3-NEXT:    movl %eax, (%rdi)
-; CHECK-O3-NEXT:    movl %eax, 4(%rdi)
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: widen_broadcast2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovd %xmm0, %eax
+; CHECK-NEXT:    movl %eax, (%rdi)
+; CHECK-NEXT:    movl %eax, 4(%rdi)
+; CHECK-NEXT:    retq
   %v1 = extractelement <2 x i32> %vec, i32 0
   %p1 = getelementptr i32, i32* %p0, i64 1
   store atomic i32 %v1, i32* %p0 unordered, align 8
@@ -532,19 +460,12 @@ define void @widen_broadcast2(i32* %p0,
 
 ; Not legal to widen due to alignment restriction
 define void @widen_broadcast2_unaligned(i32* %p0, <2 x i32> %vec) {
-; CHECK-O0-LABEL: widen_broadcast2_unaligned:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    vmovd %xmm0, %eax
-; CHECK-O0-NEXT:    movl %eax, (%rdi)
-; CHECK-O0-NEXT:    movl %eax, 4(%rdi)
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: widen_broadcast2_unaligned:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    vmovd %xmm0, %eax
-; CHECK-O3-NEXT:    movl %eax, (%rdi)
-; CHECK-O3-NEXT:    movl %eax, 4(%rdi)
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: widen_broadcast2_unaligned:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovd %xmm0, %eax
+; CHECK-NEXT:    movl %eax, (%rdi)
+; CHECK-NEXT:    movl %eax, 4(%rdi)
+; CHECK-NEXT:    retq
   %v1 = extractelement <2 x i32> %vec, i32 0
   %p1 = getelementptr i32, i32* %p0, i64 1
   store atomic i32 %v1, i32* %p0 unordered, align 4
@@ -554,17 +475,11 @@ define void @widen_broadcast2_unaligned(
 
 ; Legal if wider type is also atomic (TODO)
 define void @widen_zero_init(i32* %p0, i32 %v1, i32 %v2) {
-; CHECK-O0-LABEL: widen_zero_init:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movl $0, (%rdi)
-; CHECK-O0-NEXT:    movl $0, 4(%rdi)
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: widen_zero_init:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movl $0, (%rdi)
-; CHECK-O3-NEXT:    movl $0, 4(%rdi)
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: widen_zero_init:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl $0, (%rdi)
+; CHECK-NEXT:    movl $0, 4(%rdi)
+; CHECK-NEXT:    retq
   %p1 = getelementptr i32, i32* %p0, i64 1
   store atomic i32 0, i32* %p0 unordered, align 8
   store atomic i32 0, i32* %p1 unordered, align 4
@@ -573,17 +488,11 @@ define void @widen_zero_init(i32* %p0, i
 
 ; Not legal to widen due to alignment restriction
 define void @widen_zero_init_unaligned(i32* %p0, i32 %v1, i32 %v2) {
-; CHECK-O0-LABEL: widen_zero_init_unaligned:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movl $0, (%rdi)
-; CHECK-O0-NEXT:    movl $0, 4(%rdi)
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: widen_zero_init_unaligned:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movl $0, (%rdi)
-; CHECK-O3-NEXT:    movl $0, 4(%rdi)
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: widen_zero_init_unaligned:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl $0, (%rdi)
+; CHECK-NEXT:    movl $0, 4(%rdi)
+; CHECK-NEXT:    retq
   %p1 = getelementptr i32, i32* %p0, i64 1
   store atomic i32 0, i32* %p0 unordered, align 4
   store atomic i32 0, i32* %p1 unordered, align 4
@@ -595,17 +504,11 @@ define void @widen_zero_init_unaligned(i
 
 ; Legal, as expected
 define i64 @load_fold_add1(i64* %p) {
-; CHECK-O0-LABEL: load_fold_add1:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    addq $15, %rax
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: load_fold_add1:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    addq $15, %rax
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: load_fold_add1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    addq $15, %rax
+; CHECK-NEXT:    retq
   %v = load atomic i64, i64* %p unordered, align 8
   %ret = add i64 %v, 15
   ret i64 %ret
@@ -665,34 +568,22 @@ define i64 @load_fold_sub1(i64* %p) {
 }
 
 define i64 @load_fold_sub2(i64* %p, i64 %v2) {
-; CHECK-O0-LABEL: load_fold_sub2:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    subq %rsi, %rax
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: load_fold_sub2:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    subq %rsi, %rax
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: load_fold_sub2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    subq %rsi, %rax
+; CHECK-NEXT:    retq
   %v = load atomic i64, i64* %p unordered, align 8
   %ret = sub i64 %v, %v2
   ret i64 %ret
 }
 
 define i64 @load_fold_sub3(i64* %p1, i64* %p2) {
-; CHECK-O0-LABEL: load_fold_sub3:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    subq (%rsi), %rax
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: load_fold_sub3:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    subq (%rsi), %rax
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: load_fold_sub3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    subq (%rsi), %rax
+; CHECK-NEXT:    retq
   %v = load atomic i64, i64* %p1 unordered, align 8
   %v2 = load atomic i64, i64* %p2 unordered, align 8
   %ret = sub i64 %v, %v2
@@ -1129,17 +1020,11 @@ define i64 @load_fold_urem3(i64* %p1, i6
 
 ; Legal, as expected
 define i64 @load_fold_shl1(i64* %p) {
-; CHECK-O0-LABEL: load_fold_shl1:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    shlq $15, %rax
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: load_fold_shl1:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    shlq $15, %rax
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: load_fold_shl1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    shlq $15, %rax
+; CHECK-NEXT:    retq
   %v = load atomic i64, i64* %p unordered, align 8
   %ret = shl i64 %v, 15
   ret i64 %ret
@@ -1185,17 +1070,11 @@ define i64 @load_fold_shl3(i64* %p1, i64
 
 ; Legal, as expected
 define i64 @load_fold_lshr1(i64* %p) {
-; CHECK-O0-LABEL: load_fold_lshr1:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    shrq $15, %rax
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: load_fold_lshr1:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    shrq $15, %rax
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: load_fold_lshr1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    shrq $15, %rax
+; CHECK-NEXT:    retq
   %v = load atomic i64, i64* %p unordered, align 8
   %ret = lshr i64 %v, 15
   ret i64 %ret
@@ -1241,17 +1120,11 @@ define i64 @load_fold_lshr3(i64* %p1, i6
 
 ; Legal, as expected
 define i64 @load_fold_ashr1(i64* %p) {
-; CHECK-O0-LABEL: load_fold_ashr1:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    sarq $15, %rax
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: load_fold_ashr1:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    sarq $15, %rax
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: load_fold_ashr1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    sarq $15, %rax
+; CHECK-NEXT:    retq
   %v = load atomic i64, i64* %p unordered, align 8
   %ret = ashr i64 %v, 15
   ret i64 %ret
@@ -1350,17 +1223,11 @@ define i64 @load_fold_and3(i64* %p1, i64
 
 ; Legal, as expected
 define i64 @load_fold_or1(i64* %p) {
-; CHECK-O0-LABEL: load_fold_or1:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    orq $15, %rax
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: load_fold_or1:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    orq $15, %rax
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: load_fold_or1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    orq $15, %rax
+; CHECK-NEXT:    retq
   %v = load atomic i64, i64* %p unordered, align 8
   %ret = or i64 %v, 15
   ret i64 %ret
@@ -1403,17 +1270,11 @@ define i64 @load_fold_or3(i64* %p1, i64*
 
 ; Legal, as expected
 define i64 @load_fold_xor1(i64* %p) {
-; CHECK-O0-LABEL: load_fold_xor1:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    xorq $15, %rax
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: load_fold_xor1:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    xorq $15, %rax
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: load_fold_xor1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    xorq $15, %rax
+; CHECK-NEXT:    retq
   %v = load atomic i64, i64* %p unordered, align 8
   %ret = xor i64 %v, 15
   ret i64 %ret
@@ -1601,21 +1462,13 @@ define void @rmw_fold_sub2(i64* %p, i64
 
 ; Legal, as expected
 define void @rmw_fold_mul1(i64* %p, i64 %v) {
-; CHECK-O0-LABEL: rmw_fold_mul1:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    leaq (%rax,%rax,4), %rax
-; CHECK-O0-NEXT:    leaq (%rax,%rax,2), %rax
-; CHECK-O0-NEXT:    movq %rax, (%rdi)
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: rmw_fold_mul1:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    leaq (%rax,%rax,4), %rax
-; CHECK-O3-NEXT:    leaq (%rax,%rax,2), %rax
-; CHECK-O3-NEXT:    movq %rax, (%rdi)
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: rmw_fold_mul1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    leaq (%rax,%rax,4), %rax
+; CHECK-NEXT:    leaq (%rax,%rax,2), %rax
+; CHECK-NEXT:    movq %rax, (%rdi)
+; CHECK-NEXT:    retq
   %prev = load atomic i64, i64* %p unordered, align 8
   %val = mul i64 %prev, 15
   store atomic i64 %val, i64* %p unordered, align 8
@@ -1715,23 +1568,14 @@ define void @rmw_fold_sdiv2(i64* %p, i64
 
 ; Legal, as expected
 define void @rmw_fold_udiv1(i64* %p, i64 %v) {
-; CHECK-O0-LABEL: rmw_fold_udiv1:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    movabsq $-8608480567731124087, %rcx # imm = 0x8888888888888889
-; CHECK-O0-NEXT:    mulq %rcx
-; CHECK-O0-NEXT:    shrq $3, %rdx
-; CHECK-O0-NEXT:    movq %rdx, (%rdi)
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: rmw_fold_udiv1:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    movabsq $-8608480567731124087, %rcx # imm = 0x8888888888888889
-; CHECK-O3-NEXT:    mulq %rcx
-; CHECK-O3-NEXT:    shrq $3, %rdx
-; CHECK-O3-NEXT:    movq %rdx, (%rdi)
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: rmw_fold_udiv1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    movabsq $-8608480567731124087, %rcx # imm = 0x8888888888888889
+; CHECK-NEXT:    mulq %rcx
+; CHECK-NEXT:    shrq $3, %rdx
+; CHECK-NEXT:    movq %rdx, (%rdi)
+; CHECK-NEXT:    retq
   %prev = load atomic i64, i64* %p unordered, align 8
   %val = udiv i64 %prev, 15
   store atomic i64 %val, i64* %p unordered, align 8
@@ -1923,19 +1767,12 @@ define void @rmw_fold_urem2(i64* %p, i64
 
 ; Legal to fold (TODO)
 define void @rmw_fold_shl1(i64* %p, i64 %v) {
-; CHECK-O0-LABEL: rmw_fold_shl1:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    shlq $15, %rax
-; CHECK-O0-NEXT:    movq %rax, (%rdi)
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: rmw_fold_shl1:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    shlq $15, %rax
-; CHECK-O3-NEXT:    movq %rax, (%rdi)
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: rmw_fold_shl1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    shlq $15, %rax
+; CHECK-NEXT:    movq %rax, (%rdi)
+; CHECK-NEXT:    retq
   %prev = load atomic i64, i64* %p unordered, align 8
   %val = shl i64 %prev, 15
   store atomic i64 %val, i64* %p unordered, align 8
@@ -1967,19 +1804,12 @@ define void @rmw_fold_shl2(i64* %p, i64
 
 ; Legal to fold (TODO)
 define void @rmw_fold_lshr1(i64* %p, i64 %v) {
-; CHECK-O0-LABEL: rmw_fold_lshr1:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    shrq $15, %rax
-; CHECK-O0-NEXT:    movq %rax, (%rdi)
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: rmw_fold_lshr1:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    shrq $15, %rax
-; CHECK-O3-NEXT:    movq %rax, (%rdi)
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: rmw_fold_lshr1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    shrq $15, %rax
+; CHECK-NEXT:    movq %rax, (%rdi)
+; CHECK-NEXT:    retq
   %prev = load atomic i64, i64* %p unordered, align 8
   %val = lshr i64 %prev, 15
   store atomic i64 %val, i64* %p unordered, align 8
@@ -2011,19 +1841,12 @@ define void @rmw_fold_lshr2(i64* %p, i64
 
 ; Legal to fold (TODO)
 define void @rmw_fold_ashr1(i64* %p, i64 %v) {
-; CHECK-O0-LABEL: rmw_fold_ashr1:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    sarq $15, %rax
-; CHECK-O0-NEXT:    movq %rax, (%rdi)
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: rmw_fold_ashr1:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    sarq $15, %rax
-; CHECK-O3-NEXT:    movq %rax, (%rdi)
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: rmw_fold_ashr1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    sarq $15, %rax
+; CHECK-NEXT:    movq %rax, (%rdi)
+; CHECK-NEXT:    retq
   %prev = load atomic i64, i64* %p unordered, align 8
   %val = ashr i64 %prev, 15
   store atomic i64 %val, i64* %p unordered, align 8
@@ -2174,17 +1997,11 @@ define void @rmw_fold_xor2(i64* %p, i64
 
 ; Legal to reduce the load width (TODO)
 define i32 @fold_trunc(i64* %p) {
-; CHECK-O0-LABEL: fold_trunc:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    # kill: def $eax killed $eax killed $rax
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: fold_trunc:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    # kill: def $eax killed $eax killed $rax
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: fold_trunc:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    # kill: def $eax killed $eax killed $rax
+; CHECK-NEXT:    retq
   %v = load atomic i64, i64* %p unordered, align 8
   %ret = trunc i64 %v to i32
   ret i32 %ret
@@ -2290,32 +2107,21 @@ define i32 @split_load(i64* %p) {
 
 ; TODO: should return constant
 define i64 @constant_folding(i64* %p) {
-; CHECK-O0-LABEL: constant_folding:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: constant_folding:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: constant_folding:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    retq
   %v = load atomic i64, i64* %p unordered, align 8
   ret i64 %v
 }
 
 ; Legal to forward and fold (TODO)
 define i64 @load_forwarding(i64* %p) {
-; CHECK-O0-LABEL: load_forwarding:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    orq (%rdi), %rax
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: load_forwarding:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    orq (%rdi), %rax
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: load_forwarding:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    orq (%rdi), %rax
+; CHECK-NEXT:    retq
   %v = load atomic i64, i64* %p unordered, align 8
   %v2 = load atomic i64, i64* %p unordered, align 8
   %ret = or i64 %v, %v2
@@ -2324,17 +2130,11 @@ define i64 @load_forwarding(i64* %p) {
 
 ; Legal to forward (TODO)
 define i64 @store_forward(i64* %p, i64 %v) {
-; CHECK-O0-LABEL: store_forward:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq %rsi, (%rdi)
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: store_forward:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq %rsi, (%rdi)
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: store_forward:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq %rsi, (%rdi)
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    retq
   store atomic i64 %v, i64* %p unordered, align 8
   %ret = load atomic i64, i64* %p unordered, align 8
   ret i64 %ret
@@ -2342,17 +2142,11 @@ define i64 @store_forward(i64* %p, i64 %
 
 ; Legal to kill (TODO)
 define void @dead_writeback(i64* %p) {
-; CHECK-O0-LABEL: dead_writeback:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    movq %rax, (%rdi)
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: dead_writeback:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    movq %rax, (%rdi)
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: dead_writeback:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    movq %rax, (%rdi)
+; CHECK-NEXT:    retq
   %v = load atomic i64, i64* %p unordered, align 8
   store atomic i64 %v, i64* %p unordered, align 8
   ret void
@@ -2360,17 +2154,11 @@ define void @dead_writeback(i64* %p) {
 
 ; Legal to kill (TODO)
 define void @dead_store(i64* %p, i64 %v) {
-; CHECK-O0-LABEL: dead_store:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq $0, (%rdi)
-; CHECK-O0-NEXT:    movq %rsi, (%rdi)
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: dead_store:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq $0, (%rdi)
-; CHECK-O3-NEXT:    movq %rsi, (%rdi)
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: dead_store:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq $0, (%rdi)
+; CHECK-NEXT:    movq %rsi, (%rdi)
+; CHECK-NEXT:    retq
   store atomic i64 0, i64* %p unordered, align 8
   store atomic i64 %v, i64* %p unordered, align 8
   ret void
@@ -2384,19 +2172,12 @@ define void @dead_store(i64* %p, i64 %v)
 ;; isn't violated.
 
 define i64 @nofold_fence(i64* %p) {
-; CHECK-O0-LABEL: nofold_fence:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    mfence
-; CHECK-O0-NEXT:    addq $15, %rax
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: nofold_fence:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    mfence
-; CHECK-O3-NEXT:    addq $15, %rax
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: nofold_fence:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    mfence
+; CHECK-NEXT:    addq $15, %rax
+; CHECK-NEXT:    retq
   %v = load atomic i64, i64* %p unordered, align 8
   fence seq_cst
   %ret = add i64 %v, 15
@@ -2404,19 +2185,12 @@ define i64 @nofold_fence(i64* %p) {
 }
 
 define i64 @nofold_fence_acquire(i64* %p) {
-; CHECK-O0-LABEL: nofold_fence_acquire:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    #MEMBARRIER
-; CHECK-O0-NEXT:    addq $15, %rax
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: nofold_fence_acquire:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    #MEMBARRIER
-; CHECK-O3-NEXT:    addq $15, %rax
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: nofold_fence_acquire:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    #MEMBARRIER
+; CHECK-NEXT:    addq $15, %rax
+; CHECK-NEXT:    retq
   %v = load atomic i64, i64* %p unordered, align 8
   fence acquire
   %ret = add i64 %v, 15
@@ -2425,19 +2199,12 @@ define i64 @nofold_fence_acquire(i64* %p
 
 
 define i64 @nofold_stfence(i64* %p) {
-; CHECK-O0-LABEL: nofold_stfence:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    #MEMBARRIER
-; CHECK-O0-NEXT:    addq $15, %rax
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: nofold_stfence:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    #MEMBARRIER
-; CHECK-O3-NEXT:    addq $15, %rax
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: nofold_stfence:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    #MEMBARRIER
+; CHECK-NEXT:    addq $15, %rax
+; CHECK-NEXT:    retq
   %v = load atomic i64, i64* %p unordered, align 8
   fence syncscope("singlethread") seq_cst
   %ret = add i64 %v, 15
@@ -2466,19 +2233,12 @@ define i64 @fold_constant(i64 %arg) {
 }
 
 define i64 @fold_constant_clobber(i64* %p, i64 %arg) {
-; CHECK-O0-LABEL: fold_constant_clobber:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq {{.*}}(%rip), %rax
-; CHECK-O0-NEXT:    movq $5, (%rdi)
-; CHECK-O0-NEXT:    addq %rsi, %rax
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: fold_constant_clobber:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq {{.*}}(%rip), %rax
-; CHECK-O3-NEXT:    movq $5, (%rdi)
-; CHECK-O3-NEXT:    addq %rsi, %rax
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: fold_constant_clobber:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq {{.*}}(%rip), %rax
+; CHECK-NEXT:    movq $5, (%rdi)
+; CHECK-NEXT:    addq %rsi, %rax
+; CHECK-NEXT:    retq
   %v = load atomic i64, i64* @Constant unordered, align 8
   store i64 5, i64* %p
   %ret = add i64 %v, %arg
@@ -2486,19 +2246,12 @@ define i64 @fold_constant_clobber(i64* %
 }
 
 define i64 @fold_constant_fence(i64 %arg) {
-; CHECK-O0-LABEL: fold_constant_fence:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq {{.*}}(%rip), %rax
-; CHECK-O0-NEXT:    mfence
-; CHECK-O0-NEXT:    addq %rdi, %rax
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: fold_constant_fence:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq {{.*}}(%rip), %rax
-; CHECK-O3-NEXT:    mfence
-; CHECK-O3-NEXT:    addq %rdi, %rax
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: fold_constant_fence:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq {{.*}}(%rip), %rax
+; CHECK-NEXT:    mfence
+; CHECK-NEXT:    addq %rdi, %rax
+; CHECK-NEXT:    retq
   %v = load atomic i64, i64* @Constant unordered, align 8
   fence seq_cst
   %ret = add i64 %v, %arg
@@ -2506,19 +2259,12 @@ define i64 @fold_constant_fence(i64 %arg
 }
 
 define i64 @fold_invariant_clobber(i64* dereferenceable(8) %p, i64 %arg) {
-; CHECK-O0-LABEL: fold_invariant_clobber:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    movq $5, (%rdi)
-; CHECK-O0-NEXT:    addq %rsi, %rax
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: fold_invariant_clobber:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    movq $5, (%rdi)
-; CHECK-O3-NEXT:    addq %rsi, %rax
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: fold_invariant_clobber:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    movq $5, (%rdi)
+; CHECK-NEXT:    addq %rsi, %rax
+; CHECK-NEXT:    retq
   %v = load atomic i64, i64* %p unordered, align 8, !invariant.load !{}
   store i64 5, i64* %p
   %ret = add i64 %v, %arg
@@ -2527,19 +2273,12 @@ define i64 @fold_invariant_clobber(i64*
 
 
 define i64 @fold_invariant_fence(i64* dereferenceable(8) %p, i64 %arg) {
-; CHECK-O0-LABEL: fold_invariant_fence:
-; CHECK-O0:       # %bb.0:
-; CHECK-O0-NEXT:    movq (%rdi), %rax
-; CHECK-O0-NEXT:    mfence
-; CHECK-O0-NEXT:    addq %rsi, %rax
-; CHECK-O0-NEXT:    retq
-;
-; CHECK-O3-LABEL: fold_invariant_fence:
-; CHECK-O3:       # %bb.0:
-; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    mfence
-; CHECK-O3-NEXT:    addq %rsi, %rax
-; CHECK-O3-NEXT:    retq
+; CHECK-LABEL: fold_invariant_fence:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq (%rdi), %rax
+; CHECK-NEXT:    mfence
+; CHECK-NEXT:    addq %rsi, %rax
+; CHECK-NEXT:    retq
   %v = load atomic i64, i64* %p unordered, align 8, !invariant.load !{}
   fence seq_cst
   %ret = add i64 %v, %arg




More information about the llvm-commits mailing list