[llvm] r238052 - [AArch64] Robustize atomic cmpxchg test a little more. NFC.
Ahmed Bougacha
ahmed.bougacha at gmail.com
Fri May 22 14:35:14 PDT 2015
Author: ab
Date: Fri May 22 16:35:14 2015
New Revision: 238052
URL: http://llvm.org/viewvc/llvm-project?rev=238052&view=rev
Log:
[AArch64] Robustize atomic cmpxchg test a little more. NFC.
We changed the test to test non-constant values in r238049.
We can also use CHECK-NEXT to be a little stricter.
Modified:
llvm/trunk/test/CodeGen/AArch64/arm64-atomic.ll
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-atomic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-atomic.ll?rev=238052&r1=238051&r2=238052&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-atomic.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-atomic.ll Fri May 22 16:35:14 2015
@@ -1,51 +1,51 @@
-; RUN: llc < %s -march=arm64 -verify-machineinstrs -mcpu=cyclone | FileCheck %s
+; RUN: llc < %s -march=arm64 -asm-verbose=false -verify-machineinstrs -mcpu=cyclone | FileCheck %s
-define i32 @val_compare_and_swap(i32* %p, i32 %cmp, i32 %new) {
+define i32 @val_compare_and_swap(i32* %p, i32 %cmp, i32 %new) #0 {
; CHECK-LABEL: val_compare_and_swap:
-; CHECK: ubfx x[[NEWVAL_REG:[0-9]+]], x2, #0, #32
-; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
-; CHECK: ldaxr [[RESULT:w[0-9]+]], [x0]
-; CHECK: cmp [[RESULT]], w1
-; CHECK: b.ne [[LABEL2:.?LBB[0-9]+_[0-9]+]]
-; CHECK: stxr [[SCRATCH_REG:w[0-9]+]], w[[NEWVAL_REG]], [x0]
-; CHECK: cbnz [[SCRATCH_REG]], [[LABEL]]
-; CHECK: [[LABEL2]]:
+; CHECK-NEXT: ubfx x[[NEWVAL_REG:[0-9]+]], x2, #0, #32
+; CHECK-NEXT: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
+; CHECK-NEXT: ldaxr [[RESULT:w[0-9]+]], [x0]
+; CHECK-NEXT: cmp [[RESULT]], w1
+; CHECK-NEXT: b.ne [[LABEL2:.?LBB[0-9]+_[0-9]+]]
+; CHECK-NEXT: stxr [[SCRATCH_REG:w[0-9]+]], w[[NEWVAL_REG]], [x0]
+; CHECK-NEXT: cbnz [[SCRATCH_REG]], [[LABEL]]
+; CHECK-NEXT: [[LABEL2]]:
%pair = cmpxchg i32* %p, i32 %cmp, i32 %new acquire acquire
%val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
-define i32 @val_compare_and_swap_rel(i32* %p, i32 %cmp, i32 %new) {
+define i32 @val_compare_and_swap_rel(i32* %p, i32 %cmp, i32 %new) #0 {
; CHECK-LABEL: val_compare_and_swap_rel:
-; CHECK: ubfx x[[NEWVAL_REG:[0-9]+]], x2, #0, #32
-; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
-; CHECK: ldaxr [[RESULT:w[0-9]+]], [x0]
-; CHECK: cmp [[RESULT]], w1
-; CHECK: b.ne [[LABEL2:.?LBB[0-9]+_[0-9]+]]
-; CHECK: stlxr [[SCRATCH_REG:w[0-9]+]], w[[NEWVAL_REG]], [x0]
-; CHECK: cbnz [[SCRATCH_REG]], [[LABEL]]
-; CHECK: [[LABEL2]]:
+; CHECK-NEXT: ubfx x[[NEWVAL_REG:[0-9]+]], x2, #0, #32
+; CHECK-NEXT: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
+; CHECK-NEXT: ldaxr [[RESULT:w[0-9]+]], [x0]
+; CHECK-NEXT: cmp [[RESULT]], w1
+; CHECK-NEXT: b.ne [[LABEL2:.?LBB[0-9]+_[0-9]+]]
+; CHECK-NEXT: stlxr [[SCRATCH_REG:w[0-9]+]], w[[NEWVAL_REG]], [x0]
+; CHECK-NEXT: cbnz [[SCRATCH_REG]], [[LABEL]]
+; CHECK-NEXT: [[LABEL2]]:
%pair = cmpxchg i32* %p, i32 %cmp, i32 %new acq_rel monotonic
%val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
}
-define i64 @val_compare_and_swap_64(i64* %p, i64 %cmp, i64 %new) {
+define i64 @val_compare_and_swap_64(i64* %p, i64 %cmp, i64 %new) #0 {
; CHECK-LABEL: val_compare_and_swap_64:
-; CHECK: mov x[[ADDR:[0-9]+]], x0
-; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
-; CHECK: ldxr [[RESULT:x[0-9]+]], [x[[ADDR]]]
-; CHECK: cmp [[RESULT]], x1
-; CHECK: b.ne [[LABEL2:.?LBB[0-9]+_[0-9]+]]
-; CHECK: stxr [[SCRATCH_REG:w[0-9]+]], x2, [x[[ADDR]]]
-; CHECK: cbnz [[SCRATCH_REG]], [[LABEL]]
-; CHECK: [[LABEL2]]:
+; CHECK-NEXT: mov x[[ADDR:[0-9]+]], x0
+; CHECK-NEXT: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
+; CHECK-NEXT: ldxr [[RESULT:x[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: cmp [[RESULT]], x1
+; CHECK-NEXT: b.ne [[LABEL2:.?LBB[0-9]+_[0-9]+]]
+; CHECK-NEXT: stxr [[SCRATCH_REG:w[0-9]+]], x2, [x[[ADDR]]]
+; CHECK-NEXT: cbnz [[SCRATCH_REG]], [[LABEL]]
+; CHECK-NEXT: [[LABEL2]]:
%pair = cmpxchg i64* %p, i64 %cmp, i64 %new monotonic monotonic
%val = extractvalue { i64, i1 } %pair, 0
ret i64 %val
}
-define i32 @fetch_and_nand(i32* %p) {
+define i32 @fetch_and_nand(i32* %p) #0 {
; CHECK-LABEL: fetch_and_nand:
; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
; CHECK: ldxr w[[DEST_REG:[0-9]+]], [x0]
@@ -59,7 +59,7 @@ define i32 @fetch_and_nand(i32* %p) {
ret i32 %val
}
-define i64 @fetch_and_nand_64(i64* %p) {
+define i64 @fetch_and_nand_64(i64* %p) #0 {
; CHECK-LABEL: fetch_and_nand_64:
; CHECK: mov x[[ADDR:[0-9]+]], x0
; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
@@ -73,7 +73,7 @@ define i64 @fetch_and_nand_64(i64* %p) {
ret i64 %val
}
-define i32 @fetch_and_or(i32* %p) {
+define i32 @fetch_and_or(i32* %p) #0 {
; CHECK-LABEL: fetch_and_or:
; CHECK: movz [[OLDVAL_REG:w[0-9]+]], #0x5
; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
@@ -87,7 +87,7 @@ define i32 @fetch_and_or(i32* %p) {
ret i32 %val
}
-define i64 @fetch_and_or_64(i64* %p) {
+define i64 @fetch_and_or_64(i64* %p) #0 {
; CHECK: fetch_and_or_64:
; CHECK: mov x[[ADDR:[0-9]+]], x0
; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]:
@@ -99,35 +99,35 @@ define i64 @fetch_and_or_64(i64* %p) {
ret i64 %val
}
-define void @acquire_fence() {
+define void @acquire_fence() #0 {
fence acquire
ret void
; CHECK-LABEL: acquire_fence:
; CHECK: dmb ishld
}
-define void @release_fence() {
+define void @release_fence() #0 {
fence release
ret void
; CHECK-LABEL: release_fence:
; CHECK: dmb ish{{$}}
}
-define void @seq_cst_fence() {
+define void @seq_cst_fence() #0 {
fence seq_cst
ret void
; CHECK-LABEL: seq_cst_fence:
; CHECK: dmb ish{{$}}
}
-define i32 @atomic_load(i32* %p) {
+define i32 @atomic_load(i32* %p) #0 {
%r = load atomic i32, i32* %p seq_cst, align 4
ret i32 %r
; CHECK-LABEL: atomic_load:
; CHECK: ldar
}
-define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) {
+define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) #0 {
; CHECK-LABEL: atomic_load_relaxed_8:
%ptr_unsigned = getelementptr i8, i8* %p, i32 4095
%val_unsigned = load atomic i8, i8* %ptr_unsigned monotonic, align 1
@@ -152,7 +152,7 @@ define i8 @atomic_load_relaxed_8(i8* %p,
ret i8 %tot3
}
-define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) {
+define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) #0 {
; CHECK-LABEL: atomic_load_relaxed_16:
%ptr_unsigned = getelementptr i16, i16* %p, i32 4095
%val_unsigned = load atomic i16, i16* %ptr_unsigned monotonic, align 2
@@ -177,7 +177,7 @@ define i16 @atomic_load_relaxed_16(i16*
ret i16 %tot3
}
-define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) {
+define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) #0 {
; CHECK-LABEL: atomic_load_relaxed_32:
%ptr_unsigned = getelementptr i32, i32* %p, i32 4095
%val_unsigned = load atomic i32, i32* %ptr_unsigned monotonic, align 4
@@ -202,7 +202,7 @@ define i32 @atomic_load_relaxed_32(i32*
ret i32 %tot3
}
-define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) {
+define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) #0 {
; CHECK-LABEL: atomic_load_relaxed_64:
%ptr_unsigned = getelementptr i64, i64* %p, i32 4095
%val_unsigned = load atomic i64, i64* %ptr_unsigned monotonic, align 8
@@ -228,14 +228,14 @@ define i64 @atomic_load_relaxed_64(i64*
}
-define void @atomc_store(i32* %p) {
+define void @atomc_store(i32* %p) #0 {
store atomic i32 4, i32* %p seq_cst, align 4
ret void
; CHECK-LABEL: atomc_store:
; CHECK: stlr
}
-define void @atomic_store_relaxed_8(i8* %p, i32 %off32, i8 %val) {
+define void @atomic_store_relaxed_8(i8* %p, i32 %off32, i8 %val) #0 {
; CHECK-LABEL: atomic_store_relaxed_8:
%ptr_unsigned = getelementptr i8, i8* %p, i32 4095
store atomic i8 %val, i8* %ptr_unsigned monotonic, align 1
@@ -257,7 +257,7 @@ define void @atomic_store_relaxed_8(i8*
ret void
}
-define void @atomic_store_relaxed_16(i16* %p, i32 %off32, i16 %val) {
+define void @atomic_store_relaxed_16(i16* %p, i32 %off32, i16 %val) #0 {
; CHECK-LABEL: atomic_store_relaxed_16:
%ptr_unsigned = getelementptr i16, i16* %p, i32 4095
store atomic i16 %val, i16* %ptr_unsigned monotonic, align 2
@@ -279,7 +279,7 @@ define void @atomic_store_relaxed_16(i16
ret void
}
-define void @atomic_store_relaxed_32(i32* %p, i32 %off32, i32 %val) {
+define void @atomic_store_relaxed_32(i32* %p, i32 %off32, i32 %val) #0 {
; CHECK-LABEL: atomic_store_relaxed_32:
%ptr_unsigned = getelementptr i32, i32* %p, i32 4095
store atomic i32 %val, i32* %ptr_unsigned monotonic, align 4
@@ -301,7 +301,7 @@ define void @atomic_store_relaxed_32(i32
ret void
}
-define void @atomic_store_relaxed_64(i64* %p, i32 %off32, i64 %val) {
+define void @atomic_store_relaxed_64(i64* %p, i32 %off32, i64 %val) #0 {
; CHECK-LABEL: atomic_store_relaxed_64:
%ptr_unsigned = getelementptr i64, i64* %p, i32 4095
store atomic i64 %val, i64* %ptr_unsigned monotonic, align 8
@@ -347,3 +347,5 @@ return:
%retval.0 = phi i32 [ %add.i2, %if.else ], [ %add.i, %entry ]
ret i32 %retval.0
}
+
+attributes #0 = { nounwind }
More information about the llvm-commits
mailing list