[llvm] b2773d1 - [LoongArch] Precommit a test for atomic cmpxchg optmization

Weining Lu via llvm-commits llvm-commits at lists.llvm.org
Tue Oct 17 07:32:58 PDT 2023


Author: Weining Lu
Date: 2023-10-17T22:29:51+08:00
New Revision: b2773d170cb4bdb4b19ba801b5eb55395024b3ae

URL: https://github.com/llvm/llvm-project/commit/b2773d170cb4bdb4b19ba801b5eb55395024b3ae
DIFF: https://github.com/llvm/llvm-project/commit/b2773d170cb4bdb4b19ba801b5eb55395024b3ae.diff

LOG: [LoongArch] Precommit a test for atomic cmpxchg optmization

Added: 
    

Modified: 
    llvm/test/CodeGen/LoongArch/ir-instruction/atomic-cmpxchg.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/atomic-cmpxchg.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/atomic-cmpxchg.ll
index 2f764fd831ee2e0..76f9ebed0d93ba4 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/atomic-cmpxchg.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/atomic-cmpxchg.ll
@@ -104,6 +104,109 @@ define void @cmpxchg_i64_acquire_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
   ret void
 }
 
+define void @cmpxchg_i8_acquire_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
+; LA64-LABEL: cmpxchg_i8_acquire_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    andi $a2, $a2, 255
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    ori $a4, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a4, $a3
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    addi.w $a2, $a2, 0
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB4_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a5, $a4, $a3
+; LA64-NEXT:    bne $a5, $a1, .LBB4_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB4_1 Depth=1
+; LA64-NEXT:    andn $a5, $a4, $a3
+; LA64-NEXT:    or $a5, $a5, $a2
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB4_1
+; LA64-NEXT:    b .LBB4_4
+; LA64-NEXT:  .LBB4_3:
+; LA64-NEXT:    dbar 20
+; LA64-NEXT:  .LBB4_4:
+; LA64-NEXT:    ret
+  %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val acquire monotonic
+  ret void
+}
+
+define void @cmpxchg_i16_acquire_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounwind {
+; LA64-LABEL: cmpxchg_i16_acquire_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    slli.d $a3, $a0, 3
+; LA64-NEXT:    sll.w $a1, $a1, $a3
+; LA64-NEXT:    bstrpick.d $a2, $a2, 15, 0
+; LA64-NEXT:    sll.w $a2, $a2, $a3
+; LA64-NEXT:    lu12i.w $a4, 15
+; LA64-NEXT:    ori $a4, $a4, 4095
+; LA64-NEXT:    sll.w $a3, $a4, $a3
+; LA64-NEXT:    addi.w $a3, $a3, 0
+; LA64-NEXT:    addi.w $a2, $a2, 0
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:  .LBB5_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a5, $a4, $a3
+; LA64-NEXT:    bne $a5, $a1, .LBB5_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB5_1 Depth=1
+; LA64-NEXT:    andn $a5, $a4, $a3
+; LA64-NEXT:    or $a5, $a5, $a2
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB5_1
+; LA64-NEXT:    b .LBB5_4
+; LA64-NEXT:  .LBB5_3:
+; LA64-NEXT:    dbar 20
+; LA64-NEXT:  .LBB5_4:
+; LA64-NEXT:    ret
+  %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val acquire monotonic
+  ret void
+}
+
+define void @cmpxchg_i32_acquire_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounwind {
+; LA64-LABEL: cmpxchg_i32_acquire_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:  .LBB6_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a3, $a0, 0
+; LA64-NEXT:    bne $a3, $a1, .LBB6_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB6_1 Depth=1
+; LA64-NEXT:    move $a4, $a2
+; LA64-NEXT:    sc.w $a4, $a0, 0
+; LA64-NEXT:    beqz $a4, .LBB6_1
+; LA64-NEXT:    b .LBB6_4
+; LA64-NEXT:  .LBB6_3:
+; LA64-NEXT:    dbar 20
+; LA64-NEXT:  .LBB6_4:
+; LA64-NEXT:    ret
+  %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val acquire monotonic
+  ret void
+}
+
+define void @cmpxchg_i64_acquire_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwind {
+; LA64-LABEL: cmpxchg_i64_acquire_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:  .LBB7_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.d $a3, $a0, 0
+; LA64-NEXT:    bne $a3, $a1, .LBB7_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB7_1 Depth=1
+; LA64-NEXT:    move $a4, $a2
+; LA64-NEXT:    sc.d $a4, $a0, 0
+; LA64-NEXT:    beqz $a4, .LBB7_1
+; LA64-NEXT:    b .LBB7_4
+; LA64-NEXT:  .LBB7_3:
+; LA64-NEXT:    dbar 20
+; LA64-NEXT:  .LBB7_4:
+; LA64-NEXT:    ret
+  %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val acquire monotonic
+  ret void
+}
+
 define i8 @cmpxchg_i8_acquire_acquire_reti8(ptr %ptr, i8 %cmp, i8 %val) nounwind {
 ; LA64-LABEL: cmpxchg_i8_acquire_acquire_reti8:
 ; LA64:       # %bb.0:
@@ -118,19 +221,19 @@ define i8 @cmpxchg_i8_acquire_acquire_reti8(ptr %ptr, i8 %cmp, i8 %val) nounwind
 ; LA64-NEXT:    sll.w $a1, $a1, $a3
 ; LA64-NEXT:    addi.w $a1, $a1, 0
 ; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
-; LA64-NEXT:  .LBB4_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB8_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a5, $a0, 0
 ; LA64-NEXT:    and $a6, $a5, $a4
-; LA64-NEXT:    bne $a6, $a1, .LBB4_3
-; LA64-NEXT:  # %bb.2: # in Loop: Header=BB4_1 Depth=1
+; LA64-NEXT:    bne $a6, $a1, .LBB8_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB8_1 Depth=1
 ; LA64-NEXT:    andn $a6, $a5, $a4
 ; LA64-NEXT:    or $a6, $a6, $a2
 ; LA64-NEXT:    sc.w $a6, $a0, 0
-; LA64-NEXT:    beqz $a6, .LBB4_1
-; LA64-NEXT:    b .LBB4_4
-; LA64-NEXT:  .LBB4_3:
+; LA64-NEXT:    beqz $a6, .LBB8_1
+; LA64-NEXT:    b .LBB8_4
+; LA64-NEXT:  .LBB8_3:
 ; LA64-NEXT:    dbar 20
-; LA64-NEXT:  .LBB4_4:
+; LA64-NEXT:  .LBB8_4:
 ; LA64-NEXT:    srl.w $a0, $a5, $a3
 ; LA64-NEXT:    ret
   %tmp = cmpxchg ptr %ptr, i8 %cmp, i8 %val acquire acquire
@@ -153,19 +256,19 @@ define i16 @cmpxchg_i16_acquire_acquire_reti16(ptr %ptr, i16 %cmp, i16 %val) nou
 ; LA64-NEXT:    sll.w $a1, $a1, $a4
 ; LA64-NEXT:    addi.w $a1, $a1, 0
 ; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
-; LA64-NEXT:  .LBB5_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB9_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a5, $a0, 0
 ; LA64-NEXT:    and $a6, $a5, $a3
-; LA64-NEXT:    bne $a6, $a1, .LBB5_3
-; LA64-NEXT:  # %bb.2: # in Loop: Header=BB5_1 Depth=1
+; LA64-NEXT:    bne $a6, $a1, .LBB9_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB9_1 Depth=1
 ; LA64-NEXT:    andn $a6, $a5, $a3
 ; LA64-NEXT:    or $a6, $a6, $a2
 ; LA64-NEXT:    sc.w $a6, $a0, 0
-; LA64-NEXT:    beqz $a6, .LBB5_1
-; LA64-NEXT:    b .LBB5_4
-; LA64-NEXT:  .LBB5_3:
+; LA64-NEXT:    beqz $a6, .LBB9_1
+; LA64-NEXT:    b .LBB9_4
+; LA64-NEXT:  .LBB9_3:
 ; LA64-NEXT:    dbar 20
-; LA64-NEXT:  .LBB5_4:
+; LA64-NEXT:  .LBB9_4:
 ; LA64-NEXT:    srl.w $a0, $a5, $a4
 ; LA64-NEXT:    ret
   %tmp = cmpxchg ptr %ptr, i16 %cmp, i16 %val acquire acquire
@@ -176,17 +279,17 @@ define i16 @cmpxchg_i16_acquire_acquire_reti16(ptr %ptr, i16 %cmp, i16 %val) nou
 define i32 @cmpxchg_i32_acquire_acquire_reti32(ptr %ptr, i32 %cmp, i32 %val) nounwind {
 ; LA64-LABEL: cmpxchg_i32_acquire_acquire_reti32:
 ; LA64:       # %bb.0:
-; LA64-NEXT:  .LBB6_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB10_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a3, $a0, 0
-; LA64-NEXT:    bne $a3, $a1, .LBB6_3
-; LA64-NEXT:  # %bb.2: # in Loop: Header=BB6_1 Depth=1
+; LA64-NEXT:    bne $a3, $a1, .LBB10_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB10_1 Depth=1
 ; LA64-NEXT:    move $a4, $a2
 ; LA64-NEXT:    sc.w $a4, $a0, 0
-; LA64-NEXT:    beqz $a4, .LBB6_1
-; LA64-NEXT:    b .LBB6_4
-; LA64-NEXT:  .LBB6_3:
+; LA64-NEXT:    beqz $a4, .LBB10_1
+; LA64-NEXT:    b .LBB10_4
+; LA64-NEXT:  .LBB10_3:
 ; LA64-NEXT:    dbar 20
-; LA64-NEXT:  .LBB6_4:
+; LA64-NEXT:  .LBB10_4:
 ; LA64-NEXT:    move $a0, $a3
 ; LA64-NEXT:    ret
   %tmp = cmpxchg ptr %ptr, i32 %cmp, i32 %val acquire acquire
@@ -197,17 +300,17 @@ define i32 @cmpxchg_i32_acquire_acquire_reti32(ptr %ptr, i32 %cmp, i32 %val) nou
 define i64 @cmpxchg_i64_acquire_acquire_reti64(ptr %ptr, i64 %cmp, i64 %val) nounwind {
 ; LA64-LABEL: cmpxchg_i64_acquire_acquire_reti64:
 ; LA64:       # %bb.0:
-; LA64-NEXT:  .LBB7_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB11_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.d $a3, $a0, 0
-; LA64-NEXT:    bne $a3, $a1, .LBB7_3
-; LA64-NEXT:  # %bb.2: # in Loop: Header=BB7_1 Depth=1
+; LA64-NEXT:    bne $a3, $a1, .LBB11_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB11_1 Depth=1
 ; LA64-NEXT:    move $a4, $a2
 ; LA64-NEXT:    sc.d $a4, $a0, 0
-; LA64-NEXT:    beqz $a4, .LBB7_1
-; LA64-NEXT:    b .LBB7_4
-; LA64-NEXT:  .LBB7_3:
+; LA64-NEXT:    beqz $a4, .LBB11_1
+; LA64-NEXT:    b .LBB11_4
+; LA64-NEXT:  .LBB11_3:
 ; LA64-NEXT:    dbar 20
-; LA64-NEXT:  .LBB7_4:
+; LA64-NEXT:  .LBB11_4:
 ; LA64-NEXT:    move $a0, $a3
 ; LA64-NEXT:    ret
   %tmp = cmpxchg ptr %ptr, i64 %cmp, i64 %val acquire acquire
@@ -229,19 +332,19 @@ define i1 @cmpxchg_i8_acquire_acquire_reti1(ptr %ptr, i8 %cmp, i8 %val) nounwind
 ; LA64-NEXT:    addi.w $a1, $a1, 0
 ; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
 ; LA64-NEXT:    addi.w $a3, $a4, 0
-; LA64-NEXT:  .LBB8_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB12_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a5, $a0, 0
 ; LA64-NEXT:    and $a6, $a5, $a3
-; LA64-NEXT:    bne $a6, $a1, .LBB8_3
-; LA64-NEXT:  # %bb.2: # in Loop: Header=BB8_1 Depth=1
+; LA64-NEXT:    bne $a6, $a1, .LBB12_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB12_1 Depth=1
 ; LA64-NEXT:    andn $a6, $a5, $a3
 ; LA64-NEXT:    or $a6, $a6, $a2
 ; LA64-NEXT:    sc.w $a6, $a0, 0
-; LA64-NEXT:    beqz $a6, .LBB8_1
-; LA64-NEXT:    b .LBB8_4
-; LA64-NEXT:  .LBB8_3:
+; LA64-NEXT:    beqz $a6, .LBB12_1
+; LA64-NEXT:    b .LBB12_4
+; LA64-NEXT:  .LBB12_3:
 ; LA64-NEXT:    dbar 20
-; LA64-NEXT:  .LBB8_4:
+; LA64-NEXT:  .LBB12_4:
 ; LA64-NEXT:    and $a0, $a5, $a4
 ; LA64-NEXT:    addi.w $a0, $a0, 0
 ; LA64-NEXT:    xor $a0, $a1, $a0
@@ -267,19 +370,19 @@ define i1 @cmpxchg_i16_acquire_acquire_reti1(ptr %ptr, i16 %cmp, i16 %val) nounw
 ; LA64-NEXT:    addi.w $a1, $a1, 0
 ; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
 ; LA64-NEXT:    addi.w $a4, $a3, 0
-; LA64-NEXT:  .LBB9_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB13_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a5, $a0, 0
 ; LA64-NEXT:    and $a6, $a5, $a4
-; LA64-NEXT:    bne $a6, $a1, .LBB9_3
-; LA64-NEXT:  # %bb.2: # in Loop: Header=BB9_1 Depth=1
+; LA64-NEXT:    bne $a6, $a1, .LBB13_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB13_1 Depth=1
 ; LA64-NEXT:    andn $a6, $a5, $a4
 ; LA64-NEXT:    or $a6, $a6, $a2
 ; LA64-NEXT:    sc.w $a6, $a0, 0
-; LA64-NEXT:    beqz $a6, .LBB9_1
-; LA64-NEXT:    b .LBB9_4
-; LA64-NEXT:  .LBB9_3:
+; LA64-NEXT:    beqz $a6, .LBB13_1
+; LA64-NEXT:    b .LBB13_4
+; LA64-NEXT:  .LBB13_3:
 ; LA64-NEXT:    dbar 20
-; LA64-NEXT:  .LBB9_4:
+; LA64-NEXT:  .LBB13_4:
 ; LA64-NEXT:    and $a0, $a5, $a3
 ; LA64-NEXT:    addi.w $a0, $a0, 0
 ; LA64-NEXT:    xor $a0, $a1, $a0
@@ -293,17 +396,17 @@ define i1 @cmpxchg_i16_acquire_acquire_reti1(ptr %ptr, i16 %cmp, i16 %val) nounw
 define i1 @cmpxchg_i32_acquire_acquire_reti1(ptr %ptr, i32 %cmp, i32 %val) nounwind {
 ; LA64-LABEL: cmpxchg_i32_acquire_acquire_reti1:
 ; LA64:       # %bb.0:
-; LA64-NEXT:  .LBB10_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB14_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a3, $a0, 0
-; LA64-NEXT:    bne $a3, $a1, .LBB10_3
-; LA64-NEXT:  # %bb.2: # in Loop: Header=BB10_1 Depth=1
+; LA64-NEXT:    bne $a3, $a1, .LBB14_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB14_1 Depth=1
 ; LA64-NEXT:    move $a4, $a2
 ; LA64-NEXT:    sc.w $a4, $a0, 0
-; LA64-NEXT:    beqz $a4, .LBB10_1
-; LA64-NEXT:    b .LBB10_4
-; LA64-NEXT:  .LBB10_3:
+; LA64-NEXT:    beqz $a4, .LBB14_1
+; LA64-NEXT:    b .LBB14_4
+; LA64-NEXT:  .LBB14_3:
 ; LA64-NEXT:    dbar 20
-; LA64-NEXT:  .LBB10_4:
+; LA64-NEXT:  .LBB14_4:
 ; LA64-NEXT:    addi.w $a0, $a1, 0
 ; LA64-NEXT:    xor $a0, $a3, $a0
 ; LA64-NEXT:    sltui $a0, $a0, 1
@@ -316,17 +419,17 @@ define i1 @cmpxchg_i32_acquire_acquire_reti1(ptr %ptr, i32 %cmp, i32 %val) nounw
 define i1 @cmpxchg_i64_acquire_acquire_reti1(ptr %ptr, i64 %cmp, i64 %val) nounwind {
 ; LA64-LABEL: cmpxchg_i64_acquire_acquire_reti1:
 ; LA64:       # %bb.0:
-; LA64-NEXT:  .LBB11_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB15_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.d $a3, $a0, 0
-; LA64-NEXT:    bne $a3, $a1, .LBB11_3
-; LA64-NEXT:  # %bb.2: # in Loop: Header=BB11_1 Depth=1
+; LA64-NEXT:    bne $a3, $a1, .LBB15_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB15_1 Depth=1
 ; LA64-NEXT:    move $a4, $a2
 ; LA64-NEXT:    sc.d $a4, $a0, 0
-; LA64-NEXT:    beqz $a4, .LBB11_1
-; LA64-NEXT:    b .LBB11_4
-; LA64-NEXT:  .LBB11_3:
+; LA64-NEXT:    beqz $a4, .LBB15_1
+; LA64-NEXT:    b .LBB15_4
+; LA64-NEXT:  .LBB15_3:
 ; LA64-NEXT:    dbar 20
-; LA64-NEXT:  .LBB11_4:
+; LA64-NEXT:  .LBB15_4:
 ; LA64-NEXT:    xor $a0, $a3, $a1
 ; LA64-NEXT:    sltui $a0, $a0, 1
 ; LA64-NEXT:    ret
@@ -349,19 +452,19 @@ define void @cmpxchg_i8_monotonic_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind
 ; LA64-NEXT:    addi.w $a2, $a2, 0
 ; LA64-NEXT:    addi.w $a1, $a1, 0
 ; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
-; LA64-NEXT:  .LBB12_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB16_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a5, $a4, $a3
-; LA64-NEXT:    bne $a5, $a1, .LBB12_3
-; LA64-NEXT:  # %bb.2: # in Loop: Header=BB12_1 Depth=1
+; LA64-NEXT:    bne $a5, $a1, .LBB16_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB16_1 Depth=1
 ; LA64-NEXT:    andn $a5, $a4, $a3
 ; LA64-NEXT:    or $a5, $a5, $a2
 ; LA64-NEXT:    sc.w $a5, $a0, 0
-; LA64-NEXT:    beqz $a5, .LBB12_1
-; LA64-NEXT:    b .LBB12_4
-; LA64-NEXT:  .LBB12_3:
+; LA64-NEXT:    beqz $a5, .LBB16_1
+; LA64-NEXT:    b .LBB16_4
+; LA64-NEXT:  .LBB16_3:
 ; LA64-NEXT:    dbar 1792
-; LA64-NEXT:  .LBB12_4:
+; LA64-NEXT:  .LBB16_4:
 ; LA64-NEXT:    ret
   %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val monotonic monotonic
   ret void
@@ -382,19 +485,19 @@ define void @cmpxchg_i16_monotonic_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounw
 ; LA64-NEXT:    addi.w $a2, $a2, 0
 ; LA64-NEXT:    addi.w $a1, $a1, 0
 ; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
-; LA64-NEXT:  .LBB13_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB17_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a5, $a4, $a3
-; LA64-NEXT:    bne $a5, $a1, .LBB13_3
-; LA64-NEXT:  # %bb.2: # in Loop: Header=BB13_1 Depth=1
+; LA64-NEXT:    bne $a5, $a1, .LBB17_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB17_1 Depth=1
 ; LA64-NEXT:    andn $a5, $a4, $a3
 ; LA64-NEXT:    or $a5, $a5, $a2
 ; LA64-NEXT:    sc.w $a5, $a0, 0
-; LA64-NEXT:    beqz $a5, .LBB13_1
-; LA64-NEXT:    b .LBB13_4
-; LA64-NEXT:  .LBB13_3:
+; LA64-NEXT:    beqz $a5, .LBB17_1
+; LA64-NEXT:    b .LBB17_4
+; LA64-NEXT:  .LBB17_3:
 ; LA64-NEXT:    dbar 1792
-; LA64-NEXT:  .LBB13_4:
+; LA64-NEXT:  .LBB17_4:
 ; LA64-NEXT:    ret
   %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val monotonic monotonic
   ret void
@@ -403,17 +506,17 @@ define void @cmpxchg_i16_monotonic_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounw
 define void @cmpxchg_i32_monotonic_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounwind {
 ; LA64-LABEL: cmpxchg_i32_monotonic_monotonic:
 ; LA64:       # %bb.0:
-; LA64-NEXT:  .LBB14_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB18_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a3, $a0, 0
-; LA64-NEXT:    bne $a3, $a1, .LBB14_3
-; LA64-NEXT:  # %bb.2: # in Loop: Header=BB14_1 Depth=1
+; LA64-NEXT:    bne $a3, $a1, .LBB18_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB18_1 Depth=1
 ; LA64-NEXT:    move $a4, $a2
 ; LA64-NEXT:    sc.w $a4, $a0, 0
-; LA64-NEXT:    beqz $a4, .LBB14_1
-; LA64-NEXT:    b .LBB14_4
-; LA64-NEXT:  .LBB14_3:
+; LA64-NEXT:    beqz $a4, .LBB18_1
+; LA64-NEXT:    b .LBB18_4
+; LA64-NEXT:  .LBB18_3:
 ; LA64-NEXT:    dbar 1792
-; LA64-NEXT:  .LBB14_4:
+; LA64-NEXT:  .LBB18_4:
 ; LA64-NEXT:    ret
   %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val monotonic monotonic
   ret void
@@ -422,17 +525,17 @@ define void @cmpxchg_i32_monotonic_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounw
 define void @cmpxchg_i64_monotonic_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwind {
 ; LA64-LABEL: cmpxchg_i64_monotonic_monotonic:
 ; LA64:       # %bb.0:
-; LA64-NEXT:  .LBB15_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB19_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.d $a3, $a0, 0
-; LA64-NEXT:    bne $a3, $a1, .LBB15_3
-; LA64-NEXT:  # %bb.2: # in Loop: Header=BB15_1 Depth=1
+; LA64-NEXT:    bne $a3, $a1, .LBB19_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB19_1 Depth=1
 ; LA64-NEXT:    move $a4, $a2
 ; LA64-NEXT:    sc.d $a4, $a0, 0
-; LA64-NEXT:    beqz $a4, .LBB15_1
-; LA64-NEXT:    b .LBB15_4
-; LA64-NEXT:  .LBB15_3:
+; LA64-NEXT:    beqz $a4, .LBB19_1
+; LA64-NEXT:    b .LBB19_4
+; LA64-NEXT:  .LBB19_3:
 ; LA64-NEXT:    dbar 1792
-; LA64-NEXT:  .LBB15_4:
+; LA64-NEXT:  .LBB19_4:
 ; LA64-NEXT:    ret
   %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val monotonic monotonic
   ret void
@@ -452,19 +555,19 @@ define i8 @cmpxchg_i8_monotonic_monotonic_reti8(ptr %ptr, i8 %cmp, i8 %val) noun
 ; LA64-NEXT:    sll.w $a1, $a1, $a3
 ; LA64-NEXT:    addi.w $a1, $a1, 0
 ; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
-; LA64-NEXT:  .LBB16_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB20_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a5, $a0, 0
 ; LA64-NEXT:    and $a6, $a5, $a4
-; LA64-NEXT:    bne $a6, $a1, .LBB16_3
-; LA64-NEXT:  # %bb.2: # in Loop: Header=BB16_1 Depth=1
+; LA64-NEXT:    bne $a6, $a1, .LBB20_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB20_1 Depth=1
 ; LA64-NEXT:    andn $a6, $a5, $a4
 ; LA64-NEXT:    or $a6, $a6, $a2
 ; LA64-NEXT:    sc.w $a6, $a0, 0
-; LA64-NEXT:    beqz $a6, .LBB16_1
-; LA64-NEXT:    b .LBB16_4
-; LA64-NEXT:  .LBB16_3:
+; LA64-NEXT:    beqz $a6, .LBB20_1
+; LA64-NEXT:    b .LBB20_4
+; LA64-NEXT:  .LBB20_3:
 ; LA64-NEXT:    dbar 1792
-; LA64-NEXT:  .LBB16_4:
+; LA64-NEXT:  .LBB20_4:
 ; LA64-NEXT:    srl.w $a0, $a5, $a3
 ; LA64-NEXT:    ret
   %tmp = cmpxchg ptr %ptr, i8 %cmp, i8 %val monotonic monotonic
@@ -487,19 +590,19 @@ define i16 @cmpxchg_i16_monotonic_monotonic_reti16(ptr %ptr, i16 %cmp, i16 %val)
 ; LA64-NEXT:    sll.w $a1, $a1, $a4
 ; LA64-NEXT:    addi.w $a1, $a1, 0
 ; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
-; LA64-NEXT:  .LBB17_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB21_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a5, $a0, 0
 ; LA64-NEXT:    and $a6, $a5, $a3
-; LA64-NEXT:    bne $a6, $a1, .LBB17_3
-; LA64-NEXT:  # %bb.2: # in Loop: Header=BB17_1 Depth=1
+; LA64-NEXT:    bne $a6, $a1, .LBB21_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB21_1 Depth=1
 ; LA64-NEXT:    andn $a6, $a5, $a3
 ; LA64-NEXT:    or $a6, $a6, $a2
 ; LA64-NEXT:    sc.w $a6, $a0, 0
-; LA64-NEXT:    beqz $a6, .LBB17_1
-; LA64-NEXT:    b .LBB17_4
-; LA64-NEXT:  .LBB17_3:
+; LA64-NEXT:    beqz $a6, .LBB21_1
+; LA64-NEXT:    b .LBB21_4
+; LA64-NEXT:  .LBB21_3:
 ; LA64-NEXT:    dbar 1792
-; LA64-NEXT:  .LBB17_4:
+; LA64-NEXT:  .LBB21_4:
 ; LA64-NEXT:    srl.w $a0, $a5, $a4
 ; LA64-NEXT:    ret
   %tmp = cmpxchg ptr %ptr, i16 %cmp, i16 %val monotonic monotonic
@@ -510,17 +613,17 @@ define i16 @cmpxchg_i16_monotonic_monotonic_reti16(ptr %ptr, i16 %cmp, i16 %val)
 define i32 @cmpxchg_i32_monotonic_monotonic_reti32(ptr %ptr, i32 %cmp, i32 %val) nounwind {
 ; LA64-LABEL: cmpxchg_i32_monotonic_monotonic_reti32:
 ; LA64:       # %bb.0:
-; LA64-NEXT:  .LBB18_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB22_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a3, $a0, 0
-; LA64-NEXT:    bne $a3, $a1, .LBB18_3
-; LA64-NEXT:  # %bb.2: # in Loop: Header=BB18_1 Depth=1
+; LA64-NEXT:    bne $a3, $a1, .LBB22_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB22_1 Depth=1
 ; LA64-NEXT:    move $a4, $a2
 ; LA64-NEXT:    sc.w $a4, $a0, 0
-; LA64-NEXT:    beqz $a4, .LBB18_1
-; LA64-NEXT:    b .LBB18_4
-; LA64-NEXT:  .LBB18_3:
+; LA64-NEXT:    beqz $a4, .LBB22_1
+; LA64-NEXT:    b .LBB22_4
+; LA64-NEXT:  .LBB22_3:
 ; LA64-NEXT:    dbar 1792
-; LA64-NEXT:  .LBB18_4:
+; LA64-NEXT:  .LBB22_4:
 ; LA64-NEXT:    move $a0, $a3
 ; LA64-NEXT:    ret
   %tmp = cmpxchg ptr %ptr, i32 %cmp, i32 %val monotonic monotonic
@@ -531,17 +634,17 @@ define i32 @cmpxchg_i32_monotonic_monotonic_reti32(ptr %ptr, i32 %cmp, i32 %val)
 define i64 @cmpxchg_i64_monotonic_monotonic_reti64(ptr %ptr, i64 %cmp, i64 %val) nounwind {
 ; LA64-LABEL: cmpxchg_i64_monotonic_monotonic_reti64:
 ; LA64:       # %bb.0:
-; LA64-NEXT:  .LBB19_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB23_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.d $a3, $a0, 0
-; LA64-NEXT:    bne $a3, $a1, .LBB19_3
-; LA64-NEXT:  # %bb.2: # in Loop: Header=BB19_1 Depth=1
+; LA64-NEXT:    bne $a3, $a1, .LBB23_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB23_1 Depth=1
 ; LA64-NEXT:    move $a4, $a2
 ; LA64-NEXT:    sc.d $a4, $a0, 0
-; LA64-NEXT:    beqz $a4, .LBB19_1
-; LA64-NEXT:    b .LBB19_4
-; LA64-NEXT:  .LBB19_3:
+; LA64-NEXT:    beqz $a4, .LBB23_1
+; LA64-NEXT:    b .LBB23_4
+; LA64-NEXT:  .LBB23_3:
 ; LA64-NEXT:    dbar 1792
-; LA64-NEXT:  .LBB19_4:
+; LA64-NEXT:  .LBB23_4:
 ; LA64-NEXT:    move $a0, $a3
 ; LA64-NEXT:    ret
   %tmp = cmpxchg ptr %ptr, i64 %cmp, i64 %val monotonic monotonic
@@ -563,19 +666,19 @@ define i1 @cmpxchg_i8_monotonic_monotonic_reti1(ptr %ptr, i8 %cmp, i8 %val) noun
 ; LA64-NEXT:    addi.w $a1, $a1, 0
 ; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
 ; LA64-NEXT:    addi.w $a3, $a4, 0
-; LA64-NEXT:  .LBB20_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB24_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a5, $a0, 0
 ; LA64-NEXT:    and $a6, $a5, $a3
-; LA64-NEXT:    bne $a6, $a1, .LBB20_3
-; LA64-NEXT:  # %bb.2: # in Loop: Header=BB20_1 Depth=1
+; LA64-NEXT:    bne $a6, $a1, .LBB24_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB24_1 Depth=1
 ; LA64-NEXT:    andn $a6, $a5, $a3
 ; LA64-NEXT:    or $a6, $a6, $a2
 ; LA64-NEXT:    sc.w $a6, $a0, 0
-; LA64-NEXT:    beqz $a6, .LBB20_1
-; LA64-NEXT:    b .LBB20_4
-; LA64-NEXT:  .LBB20_3:
+; LA64-NEXT:    beqz $a6, .LBB24_1
+; LA64-NEXT:    b .LBB24_4
+; LA64-NEXT:  .LBB24_3:
 ; LA64-NEXT:    dbar 1792
-; LA64-NEXT:  .LBB20_4:
+; LA64-NEXT:  .LBB24_4:
 ; LA64-NEXT:    and $a0, $a5, $a4
 ; LA64-NEXT:    addi.w $a0, $a0, 0
 ; LA64-NEXT:    xor $a0, $a1, $a0
@@ -601,19 +704,19 @@ define i1 @cmpxchg_i16_monotonic_monotonic_reti1(ptr %ptr, i16 %cmp, i16 %val) n
 ; LA64-NEXT:    addi.w $a1, $a1, 0
 ; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
 ; LA64-NEXT:    addi.w $a4, $a3, 0
-; LA64-NEXT:  .LBB21_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB25_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a5, $a0, 0
 ; LA64-NEXT:    and $a6, $a5, $a4
-; LA64-NEXT:    bne $a6, $a1, .LBB21_3
-; LA64-NEXT:  # %bb.2: # in Loop: Header=BB21_1 Depth=1
+; LA64-NEXT:    bne $a6, $a1, .LBB25_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB25_1 Depth=1
 ; LA64-NEXT:    andn $a6, $a5, $a4
 ; LA64-NEXT:    or $a6, $a6, $a2
 ; LA64-NEXT:    sc.w $a6, $a0, 0
-; LA64-NEXT:    beqz $a6, .LBB21_1
-; LA64-NEXT:    b .LBB21_4
-; LA64-NEXT:  .LBB21_3:
+; LA64-NEXT:    beqz $a6, .LBB25_1
+; LA64-NEXT:    b .LBB25_4
+; LA64-NEXT:  .LBB25_3:
 ; LA64-NEXT:    dbar 1792
-; LA64-NEXT:  .LBB21_4:
+; LA64-NEXT:  .LBB25_4:
 ; LA64-NEXT:    and $a0, $a5, $a3
 ; LA64-NEXT:    addi.w $a0, $a0, 0
 ; LA64-NEXT:    xor $a0, $a1, $a0
@@ -627,17 +730,17 @@ define i1 @cmpxchg_i16_monotonic_monotonic_reti1(ptr %ptr, i16 %cmp, i16 %val) n
 define i1 @cmpxchg_i32_monotonic_monotonic_reti1(ptr %ptr, i32 %cmp, i32 %val) nounwind {
 ; LA64-LABEL: cmpxchg_i32_monotonic_monotonic_reti1:
 ; LA64:       # %bb.0:
-; LA64-NEXT:  .LBB22_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB26_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a3, $a0, 0
-; LA64-NEXT:    bne $a3, $a1, .LBB22_3
-; LA64-NEXT:  # %bb.2: # in Loop: Header=BB22_1 Depth=1
+; LA64-NEXT:    bne $a3, $a1, .LBB26_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB26_1 Depth=1
 ; LA64-NEXT:    move $a4, $a2
 ; LA64-NEXT:    sc.w $a4, $a0, 0
-; LA64-NEXT:    beqz $a4, .LBB22_1
-; LA64-NEXT:    b .LBB22_4
-; LA64-NEXT:  .LBB22_3:
+; LA64-NEXT:    beqz $a4, .LBB26_1
+; LA64-NEXT:    b .LBB26_4
+; LA64-NEXT:  .LBB26_3:
 ; LA64-NEXT:    dbar 1792
-; LA64-NEXT:  .LBB22_4:
+; LA64-NEXT:  .LBB26_4:
 ; LA64-NEXT:    addi.w $a0, $a1, 0
 ; LA64-NEXT:    xor $a0, $a3, $a0
 ; LA64-NEXT:    sltui $a0, $a0, 1
@@ -650,17 +753,17 @@ define i1 @cmpxchg_i32_monotonic_monotonic_reti1(ptr %ptr, i32 %cmp, i32 %val) n
 define i1 @cmpxchg_i64_monotonic_monotonic_reti1(ptr %ptr, i64 %cmp, i64 %val) nounwind {
 ; LA64-LABEL: cmpxchg_i64_monotonic_monotonic_reti1:
 ; LA64:       # %bb.0:
-; LA64-NEXT:  .LBB23_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:  .LBB27_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.d $a3, $a0, 0
-; LA64-NEXT:    bne $a3, $a1, .LBB23_3
-; LA64-NEXT:  # %bb.2: # in Loop: Header=BB23_1 Depth=1
+; LA64-NEXT:    bne $a3, $a1, .LBB27_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB27_1 Depth=1
 ; LA64-NEXT:    move $a4, $a2
 ; LA64-NEXT:    sc.d $a4, $a0, 0
-; LA64-NEXT:    beqz $a4, .LBB23_1
-; LA64-NEXT:    b .LBB23_4
-; LA64-NEXT:  .LBB23_3:
+; LA64-NEXT:    beqz $a4, .LBB27_1
+; LA64-NEXT:    b .LBB27_4
+; LA64-NEXT:  .LBB27_3:
 ; LA64-NEXT:    dbar 1792
-; LA64-NEXT:  .LBB23_4:
+; LA64-NEXT:  .LBB27_4:
 ; LA64-NEXT:    xor $a0, $a3, $a1
 ; LA64-NEXT:    sltui $a0, $a0, 1
 ; LA64-NEXT:    ret


        


More information about the llvm-commits mailing list